XSk infra's been using its own DMA sync shortcut to try avoiding
redundant function calls. Now that there is a generic one, remove
the custom implementation and rely on the generic helpers.
xsk_buff_dma_sync_for_cpu() doesn't need the second argument anymore,
remove it.
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
---
include/net/xdp_sock_drv.h | 7 ++---
include/net/xsk_buff_pool.h | 14 +++-------
drivers/net/ethernet/engleder/tsnep_main.c | 2 +-
.../net/ethernet/freescale/dpaa2/dpaa2-xsk.c | 2 +-
drivers/net/ethernet/intel/i40e/i40e_xsk.c | 2 +-
drivers/net/ethernet/intel/ice/ice_xsk.c | 2 +-
drivers/net/ethernet/intel/igc/igc_main.c | 2 +-
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 2 +-
.../ethernet/mellanox/mlx5/core/en/xsk/rx.c | 4 +--
.../net/ethernet/mellanox/mlx5/core/en_rx.c | 2 +-
drivers/net/ethernet/netronome/nfp/nfd3/xsk.c | 2 +-
.../net/ethernet/stmicro/stmmac/stmmac_main.c | 2 +-
net/xdp/xsk_buff_pool.c | 28 ++-----------------
13 files changed, 20 insertions(+), 51 deletions(-)
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index c9aec9ab6191..0a5dca2b2b3f 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -219,13 +219,10 @@ static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool
return meta;
}
-static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
+static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
{
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
- if (!pool->dma_need_sync)
- return;
-
xp_dma_sync_for_cpu(xskb);
}
@@ -402,7 +399,7 @@ static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool
return NULL;
}
-static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
+static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
{
}
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 99dd7376df6a..bacb33f1e3e5 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -43,7 +43,6 @@ struct xsk_dma_map {
refcount_t users;
struct list_head list; /* Protected by the RTNL_LOCK */
u32 dma_pages_cnt;
- bool dma_need_sync;
};
struct xsk_buff_pool {
@@ -82,7 +81,6 @@ struct xsk_buff_pool {
u8 tx_metadata_len; /* inherited from umem */
u8 cached_need_wakeup;
bool uses_need_wakeup;
- bool dma_need_sync;
bool unaligned;
bool tx_sw_csum;
void *addrs;
@@ -155,21 +153,17 @@ static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
return xskb->frame_dma;
}
-void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
{
- xp_dma_sync_for_cpu_slow(xskb);
+ dma_sync_single_for_cpu(xskb->pool->dev, xskb->dma,
+ xskb->pool->frame_len,
+ DMA_BIDIRECTIONAL);
}
-void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
- size_t size);
static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
dma_addr_t dma, size_t size)
{
- if (!pool->dma_need_sync)
- return;
-
- xp_dma_sync_for_device_slow(pool, dma, size);
+ dma_sync_single_for_device(pool->dev, dma, size, DMA_BIDIRECTIONAL);
}
/* Masks for xdp_umem_page flags.
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 4b15af6b7122..44da335d66bd 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -1587,7 +1587,7 @@ static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
length = __le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_LENGTH_MASK;
xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN);
- xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(entry->xdp);
/* RX metadata with timestamps is in front of actual data,
* subtract metadata size to get length of actual data and
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
index 051748b997f3..a466c2379146 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
@@ -55,7 +55,7 @@ static u32 dpaa2_xsk_run_xdp(struct dpaa2_eth_priv *priv,
xdp_set_data_meta_invalid(xdp_buff);
xdp_buff->rxq = &ch->xdp_rxq;
- xsk_buff_dma_sync_for_cpu(xdp_buff, ch->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(xdp_buff);
xdp_act = bpf_prog_run_xdp(xdp_prog, xdp_buff);
/* xdp.data pointer may have changed */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index a85b425794df..4e885df789ef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -482,7 +482,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
bi = *i40e_rx_bi(rx_ring, next_to_process);
xsk_buff_set_size(bi, size);
- xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(bi);
if (!first)
first = bi;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index aa81d1162b81..7541f223bf4f 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -878,7 +878,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
ICE_RX_FLX_DESC_PKT_LEN_M;
xsk_buff_set_size(xdp, size);
- xsk_buff_dma_sync_for_cpu(xdp, xsk_pool);
+ xsk_buff_dma_sync_for_cpu(xdp);
if (!first) {
first = xdp;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 34c257a51ed1..8b2e5b623314 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -2812,7 +2812,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
}
bi->xdp->data_end = bi->xdp->data + size;
- xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(bi->xdp);
res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
switch (res) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 397cb773fabb..3e3b471e53f0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -303,7 +303,7 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
}
bi->xdp->data_end = bi->xdp->data + size;
- xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(bi->xdp);
xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index b8dd74453655..1b7132fa70de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -270,7 +270,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
/* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
mxbuf->cqe = cqe;
xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
- xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(&mxbuf->xdp);
net_prefetch(mxbuf->xdp.data);
/* Possible flows:
@@ -319,7 +319,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
/* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
mxbuf->cqe = cqe;
xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
- xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(&mxbuf->xdp);
net_prefetch(mxbuf->xdp.data);
prog = rcu_dereference(rq->xdp_prog);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index d601b5faaed5..b5333da20e8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -917,7 +917,7 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
if (!rq->xsk_pool) {
count = mlx5e_refill_rx_wqes(rq, head, wqe_bulk);
- } else if (likely(!rq->xsk_pool->dma_need_sync)) {
+ } else if (likely(!dma_dev_need_sync(rq->pdev))) {
mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
count = mlx5e_xsk_alloc_rx_wqes_batched(rq, head, wqe_bulk);
} else {
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
index 45be6954d5aa..01cfa9cc1b5e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
@@ -184,7 +184,7 @@ nfp_nfd3_xsk_rx(struct nfp_net_rx_ring *rx_ring, int budget,
xrxbuf->xdp->data += meta_len;
xrxbuf->xdp->data_end = xrxbuf->xdp->data + pkt_len;
xdp_set_data_meta_invalid(xrxbuf->xdp);
- xsk_buff_dma_sync_for_cpu(xrxbuf->xdp, r_vec->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(xrxbuf->xdp);
net_prefetch(xrxbuf->xdp->data);
if (meta_len) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 59bf83904b62..80b7a6451d15 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -5361,7 +5361,7 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
/* RX buffer is good and fit into a XSK pool buffer */
buf->xdp->data_end = buf->xdp->data + buf1_len;
- xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(buf->xdp);
prog = READ_ONCE(priv->xdp_prog);
res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index ce60ecd48a4d..b2cce6dbe6d8 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -338,7 +338,6 @@ static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_devi
dma_map->netdev = netdev;
dma_map->dev = dev;
- dma_map->dma_need_sync = false;
dma_map->dma_pages_cnt = nr_pages;
refcount_set(&dma_map->users, 1);
list_add(&dma_map->list, &umem->xsk_dma_list);
@@ -424,7 +423,6 @@ static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_
pool->dev = dma_map->dev;
pool->dma_pages_cnt = dma_map->dma_pages_cnt;
- pool->dma_need_sync = dma_map->dma_need_sync;
memcpy(pool->dma_pages, dma_map->dma_pages,
pool->dma_pages_cnt * sizeof(*pool->dma_pages));
@@ -460,8 +458,6 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
__xp_dma_unmap(dma_map, attrs);
return -ENOMEM;
}
- if (dma_need_sync(dev, dma))
- dma_map->dma_need_sync = true;
dma_map->dma_pages[i] = dma;
}
@@ -557,11 +553,8 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
xskb->xdp.data_meta = xskb->xdp.data;
xskb->xdp.flags = 0;
- if (pool->dma_need_sync) {
- dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
- pool->frame_len,
- DMA_BIDIRECTIONAL);
- }
+ xp_dma_sync_for_device(pool, xskb->dma, pool->frame_len);
+
return &xskb->xdp;
}
EXPORT_SYMBOL(xp_alloc);
@@ -633,7 +626,7 @@ u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
{
u32 nb_entries1 = 0, nb_entries2;
- if (unlikely(pool->dma_need_sync)) {
+ if (unlikely(dma_dev_need_sync(pool->dev))) {
struct xdp_buff *buff;
/* Slow path */
@@ -693,18 +686,3 @@ dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr)
(addr & ~PAGE_MASK);
}
EXPORT_SYMBOL(xp_raw_get_dma);
-
-void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb)
-{
- dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0,
- xskb->pool->frame_len, DMA_BIDIRECTIONAL);
-}
-EXPORT_SYMBOL(xp_dma_sync_for_cpu_slow);
-
-void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
- size_t size)
-{
- dma_sync_single_range_for_device(pool->dev, dma, 0,
- size, DMA_BIDIRECTIONAL);
-}
-EXPORT_SYMBOL(xp_dma_sync_for_device_slow);
--
2.45.0
On Mon, 6 May 2024 11:48:55 +0200 Alexander Lobakin wrote: > XSk infra's been using its own DMA sync shortcut to try avoiding > redundant function calls. Now that there is a generic one, remove > the custom implementation and rely on the generic helpers. > xsk_buff_dma_sync_for_cpu() doesn't need the second argument anymore, > remove it. I think this is crashing xsk tests: [ 91.048963] BUG: kernel NULL pointer dereference, address: 0000000000000464 [ 91.049412] #PF: supervisor read access in kernel mode [ 91.049739] #PF: error_code(0x0000) - not-present page [ 91.050057] PGD 0 P4D 0 [ 91.050221] Oops: 0000 [#1] PREEMPT SMP NOPTI [ 91.050500] CPU: 1 PID: 114 Comm: new_name Tainted: G OE 6.9.0-rc6-gad3c108348fd-dirty #372 [ 91.051088] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1.1 04/01/2014 [ 91.051649] RIP: 0010:xp_alloc+0x76/0x240 [ 91.051903] Code: 48 89 0a 48 89 00 48 89 40 08 41 c7 44 24 34 00 00 00 00 49 8b 44 24 18 48 05 00 01 00 00 49 89 04 24 49 89 44 24 10 48 8b 3b <f6> 87 64 04 00 00 20 0f 85 16 01 00 00 48 8b 44 24 08 65 48 33 04 [ 91.053055] RSP: 0018:ffff99e7c00f0b00 EFLAGS: 00010286 [ 91.053400] RAX: ffff99e7c0c9d100 RBX: ffff89a400901c00 RCX: 0000000000010000 [ 91.053838] RDX: 0000000000000000 RSI: 0000000000000010 RDI: 0000000000000000 [ 91.054277] RBP: ffff89a4026e30e0 R08: 0000000000000001 R09: 0000000000009000 [ 91.054716] R10: 779660ad50f0d4e6 R11: 79b5ce88640fb4f7 R12: ffff89a40c31d870 [ 91.055156] R13: 0000000000000020 R14: 0000000000000000 R15: ffff89a4068c6000 [ 91.055596] FS: 00007f87685bef80(0000) GS:ffff89a43bd00000(0000) knlGS:0000000000000000 [ 91.056090] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 91.056458] CR2: 0000000000000464 CR3: 000000010229c001 CR4: 0000000000770ef0 [ 91.056904] PKRU: 55555554 [ 91.057079] Call Trace: [ 91.057237] <IRQ> [ 91.057371] ? __die_body+0x1f/0x70 [ 91.057595] ? page_fault_oops+0x15a/0x460 [ 91.057852] ? find_held_lock+0x2b/0x80 [ 91.058093] ? __skb_flow_dissect+0x30f/0x1f10 [ 91.058374] ? lock_release+0xbd/0x280 [ 91.058610] ? exc_page_fault+0x67/0x1e0 [ 91.058859] ? asm_exc_page_fault+0x26/0x30 [ 91.059126] ? xp_alloc+0x76/0x240 [ 91.059341] __xsk_rcv+0x1f0/0x360 [ 91.059558] ? __skb_get_hash+0x5b/0x1f0 [ 91.059804] ? __skb_get_hash+0x5b/0x1f0 [ 91.060050] __xsk_map_redirect+0x7c/0x2c0 [ 91.060315] ? rcu_read_lock_held_common+0x2e/0x50 [ 91.060622] xdp_do_redirect+0x28f/0x4b0 [ 91.060871] veth_xdp_rcv_skb+0x29e/0x930 [ 91.061126] veth_xdp_rcv+0x184/0x290 [ 91.061358] ? update_load_avg+0x8c/0x8c0 [ 91.061609] ? select_task_rq_fair+0x1ff/0x15a0 [ 91.061894] ? place_entity+0x19/0x100 [ 91.062131] veth_poll+0x6c/0x2f0 [ 91.062343] ? _raw_spin_unlock_irqrestore+0x27/0x50 [ 91.062653] ? try_to_wake_up+0x261/0x8d0 [ 91.062905] ? find_held_lock+0x2b/0x80 [ 91.063147] __napi_poll+0x27/0x200 [ 91.063376] net_rx_action+0x172/0x320 [ 91.063617] __do_softirq+0xb6/0x3a3 [ 91.063843] ? __dev_direct_xmit+0x167/0x1b0 [ 91.064114] do_softirq.part.0+0x3b/0x70 [ 91.064373] </IRQ> [ 91.064511] <TASK> [ 91.064650] __local_bh_enable_ip+0xbd/0xe0 [ 91.064913] __dev_direct_xmit+0x16c/0x1b0 [ 91.065171] xsk_generic_xmit+0x703/0xb10 [ 91.065425] xsk_sendmsg+0x21f/0x2f0
From: Jakub Kicinski <kuba@kernel.org> Date: Mon, 6 May 2024 11:29:31 -0700 > On Mon, 6 May 2024 11:48:55 +0200 Alexander Lobakin wrote: >> XSk infra's been using its own DMA sync shortcut to try avoiding >> redundant function calls. Now that there is a generic one, remove >> the custom implementation and rely on the generic helpers. >> xsk_buff_dma_sync_for_cpu() doesn't need the second argument anymore, >> remove it. > > I think this is crashing xsk tests: > > [ 91.048963] BUG: kernel NULL pointer dereference, address: 0000000000000464 > [ 91.049412] #PF: supervisor read access in kernel mode > [ 91.049739] #PF: error_code(0x0000) - not-present page > [ 91.050057] PGD 0 P4D 0 > [ 91.050221] Oops: 0000 [#1] PREEMPT SMP NOPTI > [ 91.050500] CPU: 1 PID: 114 Comm: new_name Tainted: G OE 6.9.0-rc6-gad3c108348fd-dirty #372 > [ 91.051088] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1.1 04/01/2014 > [ 91.051649] RIP: 0010:xp_alloc+0x76/0x240 Ah okay. Didn't account generic (non-ZC) XSk. Will fix in v6. Thanks for the report! > [ 91.051903] Code: 48 89 0a 48 89 00 48 89 40 08 41 c7 44 24 34 00 00 00 00 49 8b 44 24 18 48 05 00 01 00 00 49 89 04 24 49 89 44 24 10 48 8b 3b <f6> 87 64 04 00 00 20 0f 85 16 01 00 00 48 8b 44 24 08 65 48 33 04 > [ 91.053055] RSP: 0018:ffff99e7c00f0b00 EFLAGS: 00010286 > [ 91.053400] RAX: ffff99e7c0c9d100 RBX: ffff89a400901c00 RCX: 0000000000010000 > [ 91.053838] RDX: 0000000000000000 RSI: 0000000000000010 RDI: 0000000000000000 > [ 91.054277] RBP: ffff89a4026e30e0 R08: 0000000000000001 R09: 0000000000009000 > [ 91.054716] R10: 779660ad50f0d4e6 R11: 79b5ce88640fb4f7 R12: ffff89a40c31d870 > [ 91.055156] R13: 0000000000000020 R14: 0000000000000000 R15: ffff89a4068c6000 > [ 91.055596] FS: 00007f87685bef80(0000) GS:ffff89a43bd00000(0000) knlGS:0000000000000000 > [ 91.056090] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 > [ 91.056458] CR2: 0000000000000464 CR3: 000000010229c001 CR4: 0000000000770ef0 > [ 91.056904] PKRU: 55555554 > [ 91.057079] Call Trace: > [ 91.057237] <IRQ> > [ 91.057371] ? __die_body+0x1f/0x70 > [ 91.057595] ? page_fault_oops+0x15a/0x460 > [ 91.057852] ? find_held_lock+0x2b/0x80 > [ 91.058093] ? __skb_flow_dissect+0x30f/0x1f10 > [ 91.058374] ? lock_release+0xbd/0x280 > [ 91.058610] ? exc_page_fault+0x67/0x1e0 > [ 91.058859] ? asm_exc_page_fault+0x26/0x30 > [ 91.059126] ? xp_alloc+0x76/0x240 > [ 91.059341] __xsk_rcv+0x1f0/0x360 > [ 91.059558] ? __skb_get_hash+0x5b/0x1f0 > [ 91.059804] ? __skb_get_hash+0x5b/0x1f0 > [ 91.060050] __xsk_map_redirect+0x7c/0x2c0 > [ 91.060315] ? rcu_read_lock_held_common+0x2e/0x50 > [ 91.060622] xdp_do_redirect+0x28f/0x4b0 > [ 91.060871] veth_xdp_rcv_skb+0x29e/0x930 > [ 91.061126] veth_xdp_rcv+0x184/0x290 > [ 91.061358] ? update_load_avg+0x8c/0x8c0 > [ 91.061609] ? select_task_rq_fair+0x1ff/0x15a0 > [ 91.061894] ? place_entity+0x19/0x100 > [ 91.062131] veth_poll+0x6c/0x2f0 > [ 91.062343] ? _raw_spin_unlock_irqrestore+0x27/0x50 > [ 91.062653] ? try_to_wake_up+0x261/0x8d0 > [ 91.062905] ? find_held_lock+0x2b/0x80 > [ 91.063147] __napi_poll+0x27/0x200 > [ 91.063376] net_rx_action+0x172/0x320 > [ 91.063617] __do_softirq+0xb6/0x3a3 > [ 91.063843] ? __dev_direct_xmit+0x167/0x1b0 > [ 91.064114] do_softirq.part.0+0x3b/0x70 > [ 91.064373] </IRQ> > [ 91.064511] <TASK> > [ 91.064650] __local_bh_enable_ip+0xbd/0xe0 > [ 91.064913] __dev_direct_xmit+0x16c/0x1b0 > [ 91.065171] xsk_generic_xmit+0x703/0xb10 > [ 91.065425] xsk_sendmsg+0x21f/0x2f0 Olek
© 2016 - 2025 Red Hat, Inc.