Add a new pagepool_order member in the fec_enet_private struct
to allow dynamic configuration of page size for an instance. This
change clears the hardcoded page size assumptions.
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Reviewed-by: Wei Fang <wei.fang@nxp.com>
Signed-off-by: Shenwei Wang <shenwei.wang@nxp.com>
---
drivers/net/ethernet/freescale/fec.h | 1 +
drivers/net/ethernet/freescale/fec_main.c | 5 +++--
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 2969088dda09..47317346b2f3 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -620,6 +620,7 @@ struct fec_enet_private {
unsigned int total_tx_ring_size;
unsigned int total_rx_ring_size;
unsigned int max_buf_size;
+ unsigned int pagepool_order;
struct platform_device *pdev;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 5a21000aca59..f046d32a62fb 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1780,7 +1780,7 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
* These get messed up if we get called due to a busy condition.
*/
bdp = rxq->bd.cur;
- xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq);
+ xdp_init_buff(&xdp, (PAGE_SIZE << fep->pagepool_order), &rxq->xdp_rxq);
while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
@@ -1850,7 +1850,7 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
* include that when passing upstream as it messes up
* bridging applications.
*/
- skb = build_skb(page_address(page), PAGE_SIZE);
+ skb = build_skb(page_address(page), (PAGE_SIZE << fep->pagepool_order));
if (unlikely(!skb)) {
page_pool_recycle_direct(rxq->page_pool, page);
ndev->stats.rx_dropped++;
@@ -4559,6 +4559,7 @@ fec_probe(struct platform_device *pdev)
fec_enet_clk_enable(ndev, false);
pinctrl_pm_select_sleep_state(&pdev->dev);
+ fep->pagepool_order = 0;
fep->max_buf_size = PKT_MAXBUF_SIZE;
ndev->max_mtu = fep->max_buf_size - ETH_HLEN - ETH_FCS_LEN;
--
2.43.0
On Thu, 4 Sep 2025 15:34:59 -0500 Shenwei Wang wrote:
> diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
> index 5a21000aca59..f046d32a62fb 100644
> --- a/drivers/net/ethernet/freescale/fec_main.c
> +++ b/drivers/net/ethernet/freescale/fec_main.c
> @@ -1780,7 +1780,7 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
> * These get messed up if we get called due to a busy condition.
> */
> bdp = rxq->bd.cur;
> - xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq);
> + xdp_init_buff(&xdp, (PAGE_SIZE << fep->pagepool_order), &rxq->xdp_rxq);
please drop the unnecessary parenthesis, and
> while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
>
> @@ -1850,7 +1850,7 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
> * include that when passing upstream as it messes up
> * bridging applications.
> */
> - skb = build_skb(page_address(page), PAGE_SIZE);
> + skb = build_skb(page_address(page), (PAGE_SIZE << fep->pagepool_order));
wrap the lines at 80 chars
On Thu, Sep 04, 2025 at 03:34:59PM -0500, Shenwei Wang wrote:
> Add a new pagepool_order member in the fec_enet_private struct
> to allow dynamic configuration of page size for an instance. This
> change clears the hardcoded page size assumptions.
>
> Reviewed-by: Andrew Lunn <andrew@lunn.ch>
> Reviewed-by: Wei Fang <wei.fang@nxp.com>
> Signed-off-by: Shenwei Wang <shenwei.wang@nxp.com>
> ---
Reviewed-by: Frank Li <Frank.Li@nxp.com>
> drivers/net/ethernet/freescale/fec.h | 1 +
> drivers/net/ethernet/freescale/fec_main.c | 5 +++--
> 2 files changed, 4 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
> index 2969088dda09..47317346b2f3 100644
> --- a/drivers/net/ethernet/freescale/fec.h
> +++ b/drivers/net/ethernet/freescale/fec.h
> @@ -620,6 +620,7 @@ struct fec_enet_private {
> unsigned int total_tx_ring_size;
> unsigned int total_rx_ring_size;
> unsigned int max_buf_size;
> + unsigned int pagepool_order;
>
> struct platform_device *pdev;
>
> diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
> index 5a21000aca59..f046d32a62fb 100644
> --- a/drivers/net/ethernet/freescale/fec_main.c
> +++ b/drivers/net/ethernet/freescale/fec_main.c
> @@ -1780,7 +1780,7 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
> * These get messed up if we get called due to a busy condition.
> */
> bdp = rxq->bd.cur;
> - xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq);
> + xdp_init_buff(&xdp, (PAGE_SIZE << fep->pagepool_order), &rxq->xdp_rxq);
>
> while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
>
> @@ -1850,7 +1850,7 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
> * include that when passing upstream as it messes up
> * bridging applications.
> */
> - skb = build_skb(page_address(page), PAGE_SIZE);
> + skb = build_skb(page_address(page), (PAGE_SIZE << fep->pagepool_order));
> if (unlikely(!skb)) {
> page_pool_recycle_direct(rxq->page_pool, page);
> ndev->stats.rx_dropped++;
> @@ -4559,6 +4559,7 @@ fec_probe(struct platform_device *pdev)
> fec_enet_clk_enable(ndev, false);
> pinctrl_pm_select_sleep_state(&pdev->dev);
>
> + fep->pagepool_order = 0;
> fep->max_buf_size = PKT_MAXBUF_SIZE;
> ndev->max_mtu = fep->max_buf_size - ETH_HLEN - ETH_FCS_LEN;
>
> --
> 2.43.0
>
© 2016 - 2026 Red Hat, Inc.