Implement XSK NDOs (setup, wakeup) and create XSK
Rx and Tx queues. xsk_qid stores the queue id for
a given port which has been registered for zero copy
AF_XDP and used to acquire UMEM pointer if registered.
Based on the xsk_qid and the xsk_pool (umem) the driver
is either in copy or zero copy mode. In case of copy mode
the xsk_qid value will be invalid and will be set to valid
queue id when enabling zero copy. To enable zero copy, the
Rx queues are destroyed, i.e., descriptors pushed to fq
and cq are freed to remap them to xdp buffers from the umem.
Signed-off-by: Meghana Malladi <m-malladi@ti.com>
---
drivers/net/ethernet/ti/icssg/icssg_common.c | 2 +-
drivers/net/ethernet/ti/icssg/icssg_prueth.c | 138 +++++++++++++++++++
drivers/net/ethernet/ti/icssg/icssg_prueth.h | 10 ++
3 files changed, 149 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index 94021751b6b7..cc52cff70d7e 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -754,7 +754,7 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
}
pa = page_address(page);
- if (emac->xdp_prog) {
+ if (prueth_xdp_is_enabled(emac)) {
xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq);
xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index fa2d0bd329ab..fcdc1d0a004b 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -47,6 +47,9 @@
NETIF_F_HW_HSR_TAG_INS | \
NETIF_F_HW_HSR_TAG_RM)
+#define PRUETH_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC |\
+ DMA_ATTR_WEAK_ORDERING)
+
/* CTRLMMR_ICSSG_RGMII_CTRL register bits */
#define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
@@ -735,6 +738,20 @@ static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
return 0;
}
+static void prueth_set_xsk_pool(struct prueth_emac *emac, u16 queue_id)
+{
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[queue_id];
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+
+ if (emac->xsk_qid != queue_id) {
+ rx_chn->xsk_pool = NULL;
+ tx_chn->xsk_pool = NULL;
+ } else {
+ rx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
+ tx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
+ }
+}
+
static void prueth_destroy_txq(struct prueth_emac *emac)
{
int ret, i;
@@ -875,6 +892,7 @@ static int emac_ndo_open(struct net_device *ndev)
return ret;
}
+ emac->xsk_qid = -EINVAL;
init_completion(&emac->cmd_complete);
ret = prueth_init_tx_chns(emac);
if (ret) {
@@ -1200,6 +1218,109 @@ static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
return 0;
}
+static int prueth_xsk_pool_enable(struct prueth_emac *emac,
+ struct xsk_buff_pool *pool, u16 queue_id)
+{
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ u32 frame_size;
+ int ret;
+
+ if (queue_id >= PRUETH_MAX_RX_FLOWS ||
+ queue_id >= emac->tx_ch_num) {
+ netdev_err(emac->ndev, "Invalid XSK queue ID %d\n", queue_id);
+ return -EINVAL;
+ }
+
+ frame_size = xsk_pool_get_rx_frame_size(pool);
+ if (frame_size < PRUETH_MAX_PKT_SIZE)
+ return -EOPNOTSUPP;
+
+ ret = xsk_pool_dma_map(pool, rx_chn->dma_dev, PRUETH_RX_DMA_ATTR);
+ if (ret) {
+ netdev_err(emac->ndev, "Failed to map XSK pool: %d\n", ret);
+ return ret;
+ }
+
+ if (netif_running(emac->ndev)) {
+ /* stop packets from wire for graceful teardown */
+ ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
+ if (ret)
+ return ret;
+ prueth_destroy_rxq(emac);
+ }
+
+ emac->xsk_qid = queue_id;
+ prueth_set_xsk_pool(emac, queue_id);
+
+ if (netif_running(emac->ndev)) {
+ ret = prueth_create_rxq(emac);
+ if (ret) {
+ netdev_err(emac->ndev, "Failed to create RX queue: %d\n", ret);
+ return ret;
+ }
+ ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
+ if (ret) {
+ prueth_destroy_rxq(emac);
+ return ret;
+ }
+ ret = prueth_xsk_wakeup(emac->ndev, queue_id, XDP_WAKEUP_RX);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int prueth_xsk_pool_disable(struct prueth_emac *emac, u16 queue_id)
+{
+ struct xsk_buff_pool *pool;
+ int ret;
+
+ if (queue_id >= PRUETH_MAX_RX_FLOWS ||
+ queue_id >= emac->tx_ch_num) {
+ netdev_err(emac->ndev, "Invalid XSK queue ID %d\n", queue_id);
+ return -EINVAL;
+ }
+
+ if (emac->xsk_qid != queue_id) {
+ netdev_err(emac->ndev, "XSK queue ID %d not registered\n", queue_id);
+ return -EINVAL;
+ }
+
+ pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
+ if (!pool) {
+ netdev_err(emac->ndev, "No XSK pool registered for queue %d\n", queue_id);
+ return -EINVAL;
+ }
+
+ if (netif_running(emac->ndev)) {
+ /* stop packets from wire for graceful teardown */
+ ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
+ if (ret)
+ return ret;
+ prueth_destroy_rxq(emac);
+ }
+
+ xsk_pool_dma_unmap(pool, PRUETH_RX_DMA_ATTR);
+ emac->xsk_qid = -EINVAL;
+ prueth_set_xsk_pool(emac, queue_id);
+
+ if (netif_running(emac->ndev)) {
+ ret = prueth_create_rxq(emac);
+ if (ret) {
+ netdev_err(emac->ndev, "Failed to create RX queue: %d\n", ret);
+ return ret;
+ }
+ ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
+ if (ret) {
+ prueth_destroy_rxq(emac);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/**
* emac_ndo_bpf - implements ndo_bpf for icssg_prueth
* @ndev: network adapter device
@@ -1214,11 +1335,27 @@ static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
switch (bpf->command) {
case XDP_SETUP_PROG:
return emac_xdp_setup(emac, bpf);
+ case XDP_SETUP_XSK_POOL:
+ return bpf->xsk.pool ?
+ prueth_xsk_pool_enable(emac, bpf->xsk.pool, bpf->xsk.queue_id) :
+ prueth_xsk_pool_disable(emac, bpf->xsk.queue_id);
default:
return -EINVAL;
}
}
+int prueth_xsk_wakeup(struct net_device *ndev, u32 qid, u32 flags)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ if (qid >= PRUETH_MAX_RX_FLOWS || qid >= emac->tx_ch_num) {
+ netdev_err(ndev, "Invalid XSK queue ID %d\n", qid);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct net_device_ops emac_netdev_ops = {
.ndo_open = emac_ndo_open,
.ndo_stop = emac_ndo_stop,
@@ -1237,6 +1374,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_xdp_xmit = emac_xdp_xmit,
.ndo_hwtstamp_get = icssg_ndo_get_ts_config,
.ndo_hwtstamp_set = icssg_ndo_set_ts_config,
+ .ndo_xsk_wakeup = prueth_xsk_wakeup,
};
static int prueth_netdev_init(struct prueth *prueth,
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index 5cc90b66035a..a5e3774b0388 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -38,6 +38,8 @@
#include <net/devlink.h>
#include <net/xdp.h>
#include <net/page_pool/helpers.h>
+#include <net/xsk_buff_pool.h>
+#include <net/xdp_sock_drv.h>
#include "icssg_config.h"
#include "icss_iep.h"
@@ -126,6 +128,7 @@ struct prueth_tx_chn {
char name[32];
struct hrtimer tx_hrtimer;
unsigned long tx_pace_timeout_ns;
+ struct xsk_buff_pool *xsk_pool;
};
struct prueth_rx_chn {
@@ -138,6 +141,7 @@ struct prueth_rx_chn {
char name[32];
struct page_pool *pg_pool;
struct xdp_rxq_info xdp_rxq;
+ struct xsk_buff_pool *xsk_pool;
};
enum prueth_swdata_type {
@@ -241,6 +245,7 @@ struct prueth_emac {
struct netdev_hw_addr_list vlan_mcast_list[MAX_VLAN_ID];
struct bpf_prog *xdp_prog;
struct xdp_attachment_info xdpi;
+ int xsk_qid;
};
/* The buf includes headroom compatible with both skb and xdpf */
@@ -503,5 +508,10 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
unsigned int q_idx);
void prueth_rx_cleanup(void *data, dma_addr_t desc_dma);
void prueth_tx_cleanup(void *data, dma_addr_t desc_dma);
+int prueth_xsk_wakeup(struct net_device *ndev, u32 qid, u32 flags);
+static inline bool prueth_xdp_is_enabled(struct prueth_emac *emac)
+{
+ return !!READ_ONCE(emac->xdp_prog);
+}
#endif /* __NET_TI_ICSSG_PRUETH_H */
--
2.43.0
On 10/23/25 11:39 AM, Meghana Malladi wrote:
> @@ -1200,6 +1218,109 @@ static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
> return 0;
> }
>
> +static int prueth_xsk_pool_enable(struct prueth_emac *emac,
> + struct xsk_buff_pool *pool, u16 queue_id)
> +{
> + struct prueth_rx_chn *rx_chn = &emac->rx_chns;
> + u32 frame_size;
> + int ret;
> +
> + if (queue_id >= PRUETH_MAX_RX_FLOWS ||
> + queue_id >= emac->tx_ch_num) {
> + netdev_err(emac->ndev, "Invalid XSK queue ID %d\n", queue_id);
> + return -EINVAL;
> + }
> +
> + frame_size = xsk_pool_get_rx_frame_size(pool);
> + if (frame_size < PRUETH_MAX_PKT_SIZE)
> + return -EOPNOTSUPP;
> +
> + ret = xsk_pool_dma_map(pool, rx_chn->dma_dev, PRUETH_RX_DMA_ATTR);
> + if (ret) {
> + netdev_err(emac->ndev, "Failed to map XSK pool: %d\n", ret);
> + return ret;
> + }
> +
> + if (netif_running(emac->ndev)) {
> + /* stop packets from wire for graceful teardown */
> + ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
> + if (ret)
> + return ret;
> + prueth_destroy_rxq(emac);
> + }
> +
> + emac->xsk_qid = queue_id;
> + prueth_set_xsk_pool(emac, queue_id);
> +
> + if (netif_running(emac->ndev)) {
> + ret = prueth_create_rxq(emac);
It looks like this falls short of Jakub's request on v2:
https://lore.kernel.org/netdev/20250903174847.5d8d1c9f@kernel.org/
about not freeing the rx queue for reconfig.
I think you should:
- stop the H/W from processing incoming packets,
- spool all the pending packets
- attach/detach the xsk_pool
- refill the ring
- re-enable the H/W
/P
Hi Paolo,
On 10/28/25 16:27, Paolo Abeni wrote:
> On 10/23/25 11: 39 AM, Meghana Malladi wrote: > @@ -1200,6 +1218,109 @@
> static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf
> *bpf) > return 0; > } > > +static int prueth_xsk_pool_enable(struct
> prueth_emac *emac,
> ZjQcmQRYFpfptBannerStart
> This message was sent from outside of Texas Instruments.
> Do not click links or open attachments unless you recognize the source
> of this email and know the content is safe.
> Report Suspicious
> <https://us-phishalarm-ewt.proofpoint.com/EWT/v1/G3vK!
> updqHb0lvOd6ACXFPDODXzFjW2RtkIpblpWr3zui2O2JqWTyRCLKc2i7Pa7uSMBZYpq8H7tTr-jp_nDelg_OUrmNCgZ8_m0$>
> ZjQcmQRYFpfptBannerEnd
>
> On 10/23/25 11:39 AM, Meghana Malladi wrote:
>> @@ -1200,6 +1218,109 @@ static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
>> return 0;
>> }
>>
>> +static int prueth_xsk_pool_enable(struct prueth_emac *emac,
>> + struct xsk_buff_pool *pool, u16 queue_id)
>> +{
>> + struct prueth_rx_chn *rx_chn = &emac->rx_chns;
>> + u32 frame_size;
>> + int ret;
>> +
>> + if (queue_id >= PRUETH_MAX_RX_FLOWS ||
>> + queue_id >= emac->tx_ch_num) {
>> + netdev_err(emac->ndev, "Invalid XSK queue ID %d\n", queue_id);
>> + return -EINVAL;
>> + }
>> +
>> + frame_size = xsk_pool_get_rx_frame_size(pool);
>> + if (frame_size < PRUETH_MAX_PKT_SIZE)
>> + return -EOPNOTSUPP;
>> +
>> + ret = xsk_pool_dma_map(pool, rx_chn->dma_dev, PRUETH_RX_DMA_ATTR);
>> + if (ret) {
>> + netdev_err(emac->ndev, "Failed to map XSK pool: %d\n", ret);
>> + return ret;
>> + }
>> +
>> + if (netif_running(emac->ndev)) {
>> + /* stop packets from wire for graceful teardown */
>> + ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
>> + if (ret)
>> + return ret;
>> + prueth_destroy_rxq(emac);
>> + }
>> +
>> + emac->xsk_qid = queue_id;
>> + prueth_set_xsk_pool(emac, queue_id);
>> +
>> + if (netif_running(emac->ndev)) {
>> + ret = prueth_create_rxq(emac);
>
> It looks like this falls short of Jakub's request on v2:
>
> https://urldefense.com/v3/__https://lore.kernel.org/
> netdev/20250903174847.5d8d1c9f@kernel.org/__;!!G3vK!
> TxEOF2PZA-2oagU7Gmq2PdyHrceI_sWFRSCMP2meOxVrs8eqStDUSTPi2kyzjva1rgUzQUtYbd9g$ <https://urldefense.com/v3/__https://lore.kernel.org/netdev/20250903174847.5d8d1c9f@kernel.org/__;!!G3vK!TxEOF2PZA-2oagU7Gmq2PdyHrceI_sWFRSCMP2meOxVrs8eqStDUSTPi2kyzjva1rgUzQUtYbd9g$>
>
> about not freeing the rx queue for reconfig.
>
I tried honoring Jakub's comment to avoid freeing the rx memory wherever
necessary.
"In case of icssg driver, freeing the rx memory is necessary as the
rx descriptor memory is owned by the cppi dma controller and can be
mapped to a single memory model (pages/xdp buffers) at a given time.
In order to remap it, the memory needs to be freed and reallocated."
> I think you should:
> - stop the H/W from processing incoming packets,
> - spool all the pending packets
> - attach/detach the xsk_pool
> - refill the ring
> - re-enable the H/W
>
Current implementation follows the same sequence:
1. Does a channel teardown -> stop incoming traffic
2. free the rx descriptors from free queue and completion queue -> spool
all pending packets/descriptors
3. attach/detach the xsk pool
4. allocate rx descriptors and fill the freeq after mapping them to the
correct memory buffers -> refill the ring
5. restart the NAPI - re-enable the H/W to recv the traffic
I am still working on skipping 2 and 4 steps but this will be a long
shot. Need to make sure all corner cases are getting covered. If this
approach looks doable without causing any regressions I might post it as
a followup patch later in the future.
> /P
>
Hi Paolo,
On 10/30/25 10:13, Meghana Malladi wrote:
> Hi Paolo,
>
> On 10/28/25 16:27, Paolo Abeni wrote:
>> On 10/23/25 11: 39 AM, Meghana Malladi wrote: > @@ -1200,6 +1218,109
>> @@ static int emac_xdp_setup(struct prueth_emac *emac, struct
>> netdev_bpf *bpf) > return 0; > } > > +static int
>> prueth_xsk_pool_enable(struct prueth_emac *emac,
>> ZjQcmQRYFpfptBannerStart
>> This message was sent from outside of Texas Instruments.
>> Do not click links or open attachments unless you recognize the source
>> of this email and know the content is safe.
>> Report Suspicious
>> <https://us-phishalarm-ewt.proofpoint.com/EWT/v1/G3vK!
>> updqHb0lvOd6ACXFPDODXzFjW2RtkIpblpWr3zui2O2JqWTyRCLKc2i7Pa7uSMBZYpq8H7tTr-jp_nDelg_OUrmNCgZ8_m0$>
>> ZjQcmQRYFpfptBannerEnd
>>
>> On 10/23/25 11:39 AM, Meghana Malladi wrote:
>>> @@ -1200,6 +1218,109 @@ static int emac_xdp_setup(struct prueth_emac
>>> *emac, struct netdev_bpf *bpf)
>>> return 0;
>>> }
>>>
>>> +static int prueth_xsk_pool_enable(struct prueth_emac *emac,
>>> + struct xsk_buff_pool *pool, u16 queue_id)
>>> +{
>>> + struct prueth_rx_chn *rx_chn = &emac->rx_chns;
>>> + u32 frame_size;
>>> + int ret;
>>> +
>>> + if (queue_id >= PRUETH_MAX_RX_FLOWS ||
>>> + queue_id >= emac->tx_ch_num) {
>>> + netdev_err(emac->ndev, "Invalid XSK queue ID %d\n", queue_id);
>>> + return -EINVAL;
>>> + }
>>> +
>>> + frame_size = xsk_pool_get_rx_frame_size(pool);
>>> + if (frame_size < PRUETH_MAX_PKT_SIZE)
>>> + return -EOPNOTSUPP;
>>> +
>>> + ret = xsk_pool_dma_map(pool, rx_chn->dma_dev, PRUETH_RX_DMA_ATTR);
>>> + if (ret) {
>>> + netdev_err(emac->ndev, "Failed to map XSK pool: %d\n", ret);
>>> + return ret;
>>> + }
>>> +
>>> + if (netif_running(emac->ndev)) {
>>> + /* stop packets from wire for graceful teardown */
>>> + ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
>>> + if (ret)
>>> + return ret;
>>> + prueth_destroy_rxq(emac);
>>> + }
>>> +
>>> + emac->xsk_qid = queue_id;
>>> + prueth_set_xsk_pool(emac, queue_id);
>>> +
>>> + if (netif_running(emac->ndev)) {
>>> + ret = prueth_create_rxq(emac);
>>
>> It looks like this falls short of Jakub's request on v2:
>>
>> https://urldefense.com/v3/__https://lore.kernel.org/
>> netdev/20250903174847.5d8d1c9f@kernel.org/__;!!G3vK!
>> TxEOF2PZA-2oagU7Gmq2PdyHrceI_sWFRSCMP2meOxVrs8eqStDUSTPi2kyzjva1rgUzQUtYbd9g$ <https://urldefense.com/v3/__https://lore.kernel.org/netdev/20250903174847.5d8d1c9f@kernel.org/__;!!G3vK!TxEOF2PZA-2oagU7Gmq2PdyHrceI_sWFRSCMP2meOxVrs8eqStDUSTPi2kyzjva1rgUzQUtYbd9g$>
>>
>> about not freeing the rx queue for reconfig.
>>
>
> I tried honoring Jakub's comment to avoid freeing the rx memory wherever
> necessary.
>
> "In case of icssg driver, freeing the rx memory is necessary as the
> rx descriptor memory is owned by the cppi dma controller and can be
> mapped to a single memory model (pages/xdp buffers) at a given time.
> In order to remap it, the memory needs to be freed and reallocated."
>
Just to make sure we are on the same page, does the above explanation
make sense to you or do you want me to make any changes in this series
for v5 ?
>> I think you should:
>> - stop the H/W from processing incoming packets,
>> - spool all the pending packets
>> - attach/detach the xsk_pool
>> - refill the ring
>> - re-enable the H/W
>>
>
> Current implementation follows the same sequence:
> 1. Does a channel teardown -> stop incoming traffic
> 2. free the rx descriptors from free queue and completion queue -> spool
> all pending packets/descriptors
> 3. attach/detach the xsk pool
> 4. allocate rx descriptors and fill the freeq after mapping them to the
> correct memory buffers -> refill the ring
> 5. restart the NAPI - re-enable the H/W to recv the traffic
>
> I am still working on skipping 2 and 4 steps but this will be a long
> shot. Need to make sure all corner cases are getting covered. If this
> approach looks doable without causing any regressions I might post it as
> a followup patch later in the future.
>
>> /P
>>
>
thanks,
Meghana
On Tue, 4 Nov 2025 14:23:24 +0530 Meghana Malladi wrote: > > I tried honoring Jakub's comment to avoid freeing the rx memory wherever > > necessary. > > > > "In case of icssg driver, freeing the rx memory is necessary as the > > rx descriptor memory is owned by the cppi dma controller and can be > > mapped to a single memory model (pages/xdp buffers) at a given time. > > In order to remap it, the memory needs to be freed and reallocated." > > Just to make sure we are on the same page, does the above explanation > make sense to you or do you want me to make any changes in this series > for v5 ? No. Based on your reply below you seem to understand what is being asked, so you're expected to do it. > >> I think you should: > >> - stop the H/W from processing incoming packets, > >> - spool all the pending packets > >> - attach/detach the xsk_pool > >> - refill the ring > >> - re-enable the H/W > > > > Current implementation follows the same sequence: > > 1. Does a channel teardown -> stop incoming traffic > > 2. free the rx descriptors from free queue and completion queue -> spool > > all pending packets/descriptors > > 3. attach/detach the xsk pool > > 4. allocate rx descriptors and fill the freeq after mapping them to the > > correct memory buffers -> refill the ring > > 5. restart the NAPI - re-enable the H/W to recv the traffic > > > > I am still working on skipping 2 and 4 steps but this will be a long > > shot. Need to make sure all corner cases are getting covered. If this > > approach looks doable without causing any regressions I might post it as > > a followup patch later in the future.
Hi Jakub, On 11/5/25 05:18, Jakub Kicinski wrote: > On Tue, 4 Nov 2025 14:23:24 +0530 Meghana Malladi wrote: >>> I tried honoring Jakub's comment to avoid freeing the rx memory wherever >>> necessary. >>> >>> "In case of icssg driver, freeing the rx memory is necessary as the >>> rx descriptor memory is owned by the cppi dma controller and can be >>> mapped to a single memory model (pages/xdp buffers) at a given time. >>> In order to remap it, the memory needs to be freed and reallocated." >> >> Just to make sure we are on the same page, does the above explanation >> make sense to you or do you want me to make any changes in this series >> for v5 ? > > No. Based on your reply below you seem to understand what is being > asked, so you're expected to do it. > Yes, this series currently implements whatever Paolo mentioned below. >>>> I think you should: >>>> - stop the H/W from processing incoming packets, >>>> - spool all the pending packets >>>> - attach/detach the xsk_pool >>>> - refill the ring >>>> - re-enable the H/W >>> >>> Current implementation follows the same sequence: >>> 1. Does a channel teardown -> stop incoming traffic >>> 2. free the rx descriptors from free queue and completion queue -> spool >>> all pending packets/descriptors >>> 3. attach/detach the xsk pool >>> 4. allocate rx descriptors and fill the freeq after mapping them to the >>> correct memory buffers -> refill the ring >>> 5. restart the NAPI - re-enable the H/W to recv the traffic >>> Sorry for the confusion. Whatever I mentioned below might have given an impression that there was additional required work; that wasn’t my intention. What I described is only a possible design enhancement and not mandatory. The current patch series is complete and does not have gaps in its design. >>> I am still working on skipping 2 and 4 steps but this will be a long >>> shot. Need to make sure all corner cases are getting covered. If this >>> approach looks doable without causing any regressions I might post it as >>> a followup patch later in the future.
© 2016 - 2026 Red Hat, Inc.