[PATCH v5 net-next 4/5] net: fec: add change_mtu to support dynamic buffer allocation

Shenwei Wang posted 5 patches 4 weeks ago
There is a newer version of this series
[PATCH v5 net-next 4/5] net: fec: add change_mtu to support dynamic buffer allocation
Posted by Shenwei Wang 4 weeks ago
Add a fec_change_mtu() handler to recalculate the pagepool_order based
on the new_mtu value. It will update the rx_frame_size accordingly if
the pagepool_order is changed.

If the interface is running, it stops RX/TX, and recreate the pagepool
with the new configuration.

Signed-off-by: Shenwei Wang <shenwei.wang@nxp.com>
---
 drivers/net/ethernet/freescale/fec.h      |  5 +-
 drivers/net/ethernet/freescale/fec_main.c | 57 ++++++++++++++++++++++-
 2 files changed, 58 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index f1032a11aa76..0127cfa5529f 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -348,10 +348,11 @@ struct bufdesc_ex {
  * the skbuffer directly.
  */
 
+#define FEC_DRV_RESERVE_SPACE (XDP_PACKET_HEADROOM + \
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 #define FEC_ENET_XDP_HEADROOM	(XDP_PACKET_HEADROOM)
 #define FEC_ENET_RX_PAGES	256
-#define FEC_ENET_RX_FRSIZE	(PAGE_SIZE - FEC_ENET_XDP_HEADROOM \
-		- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define FEC_ENET_RX_FRSIZE	(PAGE_SIZE - FEC_DRV_RESERVE_SPACE)
 #define FEC_ENET_RX_FRPPG	(PAGE_SIZE / FEC_ENET_RX_FRSIZE)
 #define RX_RING_SIZE		(FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
 #define FEC_ENET_TX_FRSIZE	2048
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index cf5118838f9c..295420d2b71b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -470,14 +470,14 @@ fec_enet_create_page_pool(struct fec_enet_private *fep,
 {
 	struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
 	struct page_pool_params pp_params = {
-		.order = 0,
+		.order = fep->pagepool_order,
 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
 		.pool_size = size,
 		.nid = dev_to_node(&fep->pdev->dev),
 		.dev = &fep->pdev->dev,
 		.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
 		.offset = FEC_ENET_XDP_HEADROOM,
-		.max_len = FEC_ENET_RX_FRSIZE,
+		.max_len = fep->rx_frame_size,
 	};
 	int err;
 
@@ -4020,6 +4020,58 @@ static int fec_hwtstamp_set(struct net_device *ndev,
 	return fec_ptp_set(ndev, config, extack);
 }
 
+static int fec_change_mtu(struct net_device *ndev, int new_mtu)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	int old_mtu, old_order, old_size, order, done;
+	int ret = 0;
+
+	order = get_order(new_mtu + ETH_HLEN + ETH_FCS_LEN + FEC_DRV_RESERVE_SPACE);
+	old_order = fep->pagepool_order;
+	old_size = fep->rx_frame_size;
+	old_mtu = READ_ONCE(ndev->mtu);
+	fep->pagepool_order = order;
+	fep->rx_frame_size = (PAGE_SIZE << order) - FEC_DRV_RESERVE_SPACE;
+
+	if (!netif_running(ndev)) {
+		WRITE_ONCE(ndev->mtu, new_mtu);
+		return 0;
+	}
+
+	/* Stop TX/RX to update MAX_FL based on the new_mtu
+	 * and free/re-allocate the buffers if needs.
+	 */
+	napi_disable(&fep->napi);
+	netif_tx_disable(ndev);
+	read_poll_timeout(fec_enet_rx_napi, done, (done == 0),
+			  10, 1000, false, &fep->napi, 10);
+	fec_stop(ndev);
+
+	WRITE_ONCE(ndev->mtu, new_mtu);
+
+	if (order != old_order) {
+		fec_enet_free_buffers(ndev);
+
+		/* Create the pagepool based on the new mtu.
+		 * Revert to the original settings if buffer
+		 * allocation fails.
+		 */
+		if (fec_enet_alloc_buffers(ndev) < 0) {
+			fep->pagepool_order = old_order;
+			fep->rx_frame_size = old_size;
+			WRITE_ONCE(ndev->mtu, old_mtu);
+			fec_enet_alloc_buffers(ndev);
+			ret = -ENOMEM;
+		}
+	}
+
+	fec_restart(ndev);
+	napi_enable(&fep->napi);
+	netif_tx_start_all_queues(ndev);
+
+	return ret;
+}
+
 static const struct net_device_ops fec_netdev_ops = {
 	.ndo_open		= fec_enet_open,
 	.ndo_stop		= fec_enet_close,
@@ -4029,6 +4081,7 @@ static const struct net_device_ops fec_netdev_ops = {
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_tx_timeout		= fec_timeout,
 	.ndo_set_mac_address	= fec_set_mac_address,
+	.ndo_change_mtu		= fec_change_mtu,
 	.ndo_eth_ioctl		= phy_do_ioctl_running,
 	.ndo_set_features	= fec_set_features,
 	.ndo_bpf		= fec_enet_bpf,
-- 
2.43.0
Re: [PATCH v5 net-next 4/5] net: fec: add change_mtu to support dynamic buffer allocation
Posted by Jakub Kicinski 3 weeks, 6 days ago
On Thu,  4 Sep 2025 15:35:01 -0500 Shenwei Wang wrote:
> +		if (fec_enet_alloc_buffers(ndev) < 0) {
> +			fep->pagepool_order = old_order;
> +			fep->rx_frame_size = old_size;
> +			WRITE_ONCE(ndev->mtu, old_mtu);
> +			fec_enet_alloc_buffers(ndev);

And how do you know that it will succeed now?
You can't leave the device in-operational due to reconfig request when
system is under memory pressure. You need to save the previous buffers
so that you can restore them without having to allocate.

> +			ret = -ENOMEM;
> +		}
Re: [PATCH v5 net-next 4/5] net: fec: add change_mtu to support dynamic buffer allocation
Posted by Frank Li 4 weeks ago
On Thu, Sep 04, 2025 at 03:35:01PM -0500, Shenwei Wang wrote:
> Add a fec_change_mtu() handler to recalculate the pagepool_order based
> on the new_mtu value. It will update the rx_frame_size accordingly if
> the pagepool_order is changed.

Remove "It will".

>
> If the interface is running, it stops RX/TX, and recreate the pagepool
> with the new configuration.

If the interface is running, stop RX/TX and ...

>
> Signed-off-by: Shenwei Wang <shenwei.wang@nxp.com>
> ---
>  drivers/net/ethernet/freescale/fec.h      |  5 +-
>  drivers/net/ethernet/freescale/fec_main.c | 57 ++++++++++++++++++++++-
>  2 files changed, 58 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
> index f1032a11aa76..0127cfa5529f 100644
> --- a/drivers/net/ethernet/freescale/fec.h
> +++ b/drivers/net/ethernet/freescale/fec.h
> @@ -348,10 +348,11 @@ struct bufdesc_ex {
>   * the skbuffer directly.
>   */
>
...
>
> +static int fec_change_mtu(struct net_device *ndev, int new_mtu)
> +{
> +	struct fec_enet_private *fep = netdev_priv(ndev);
> +	int old_mtu, old_order, old_size, order, done;
> +	int ret = 0;
> +
> +	order = get_order(new_mtu + ETH_HLEN + ETH_FCS_LEN + FEC_DRV_RESERVE_SPACE);
> +	old_order = fep->pagepool_order;
> +	old_size = fep->rx_frame_size;
> +	old_mtu = READ_ONCE(ndev->mtu);
> +	fep->pagepool_order = order;
> +	fep->rx_frame_size = (PAGE_SIZE << order) - FEC_DRV_RESERVE_SPACE;
> +
> +	if (!netif_running(ndev)) {
> +		WRITE_ONCE(ndev->mtu, new_mtu);
> +		return 0;
> +	}
> +
> +	/* Stop TX/RX to update MAX_FL based on the new_mtu
> +	 * and free/re-allocate the buffers if needs.
> +	 */
> +	napi_disable(&fep->napi);
> +	netif_tx_disable(ndev);
> +	read_poll_timeout(fec_enet_rx_napi, done, (done == 0),
> +			  10, 1000, false, &fep->napi, 10);
> +	fec_stop(ndev);

I think you need move fep->pagepool_order and fep->rx_frame_size to here.
incase update rx_frame_size impact running queue.

Frank
> +
> +	WRITE_ONCE(ndev->mtu, new_mtu);
> +
> +	if (order != old_order) {
> +		fec_enet_free_buffers(ndev);
> +
> +		/* Create the pagepool based on the new mtu.
> +		 * Revert to the original settings if buffer
> +		 * allocation fails.
> +		 */
> +		if (fec_enet_alloc_buffers(ndev) < 0) {
> +			fep->pagepool_order = old_order;
> +			fep->rx_frame_size = old_size;
> +			WRITE_ONCE(ndev->mtu, old_mtu);
> +			fec_enet_alloc_buffers(ndev);
> +			ret = -ENOMEM;
> +		}
> +	}
> +
> +	fec_restart(ndev);
> +	napi_enable(&fep->napi);
> +	netif_tx_start_all_queues(ndev);
> +
> +	return ret;
> +}
> +
>  static const struct net_device_ops fec_netdev_ops = {
>  	.ndo_open		= fec_enet_open,
>  	.ndo_stop		= fec_enet_close,
> @@ -4029,6 +4081,7 @@ static const struct net_device_ops fec_netdev_ops = {
>  	.ndo_validate_addr	= eth_validate_addr,
>  	.ndo_tx_timeout		= fec_timeout,
>  	.ndo_set_mac_address	= fec_set_mac_address,
> +	.ndo_change_mtu		= fec_change_mtu,
>  	.ndo_eth_ioctl		= phy_do_ioctl_running,
>  	.ndo_set_features	= fec_set_features,
>  	.ndo_bpf		= fec_enet_bpf,
> --
> 2.43.0
>