.../ethernet/google/gve/gve_buffer_mgmt_dqo.c | 5 +++ drivers/net/ethernet/google/gve/gve_rx_dqo.c | 36 ++++++++++++++++--- 2 files changed, 36 insertions(+), 5 deletions(-)
From: Mina Almasry <almasrymina@google.com>
Declare PP_FLAG_ALLOW_UNREADABLE_NETMEM to turn on unreadable netmem
support in GVE.
We also drop any net_iov packets where header split is not enabled.
We're unable to process packets where the header landed in unreadable
netmem.
Use page_pool_dma_sync_netmem_for_cpu in lieu of
dma_sync_single_range_for_cpu to correctly handle unreadable netmem
that should not be dma-sync'd.
Disable rx_copybreak optimization if payload is unreadable netmem as
that needs access to the payload.
Signed-off-by: Mina Almasry <almasrymina@google.com>
Signed-off-by: Ziwei Xiao <ziweixiao@google.com>
Signed-off-by: Harshitha Ramamurthy <hramamurthy@google.com>
---
.../ethernet/google/gve/gve_buffer_mgmt_dqo.c | 5 +++
drivers/net/ethernet/google/gve/gve_rx_dqo.c | 36 ++++++++++++++++---
2 files changed, 36 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
index 8f5021e59e0a..0e2b703c673a 100644
--- a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
@@ -260,6 +260,11 @@ struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
.offset = xdp ? XDP_PACKET_HEADROOM : 0,
};
+ if (priv->header_split_enabled) {
+ pp.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp.queue_idx = rx->q_num;
+ }
+
return page_pool_create(&pp);
}
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 7380c2b7a2d8..8c75a4d1e3e7 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -718,6 +718,24 @@ static int gve_rx_xsk_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
+static void gve_dma_sync(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state, u16 buf_len)
+{
+ struct gve_rx_slot_page_info *page_info = &buf_state->page_info;
+
+ if (rx->dqo.page_pool) {
+ page_pool_dma_sync_netmem_for_cpu(rx->dqo.page_pool,
+ page_info->netmem,
+ page_info->page_offset,
+ buf_len);
+ } else {
+ dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
+ page_info->page_offset +
+ page_info->pad,
+ buf_len, DMA_FROM_DEVICE);
+ }
+}
+
/* Returns 0 if descriptor is completed successfully.
* Returns -EINVAL if descriptor is invalid.
* Returns -ENOMEM if data cannot be copied to skb.
@@ -793,13 +811,19 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
rx->rx_hsplit_unsplit_pkt += unsplit;
rx->rx_hsplit_bytes += hdr_len;
u64_stats_update_end(&rx->statss);
+ } else if (!rx->ctx.skb_head && rx->dqo.page_pool &&
+ netmem_is_net_iov(buf_state->page_info.netmem)) {
+ /* when header split is disabled, the header went to the packet
+ * buffer. If the packet buffer is a net_iov, those can't be
+ * easily mapped into the kernel space to access the header
+ * required to process the packet.
+ */
+ gve_free_buffer(rx, buf_state);
+ return -EFAULT;
}
/* Sync the portion of dma buffer for CPU to read. */
- dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
- buf_state->page_info.page_offset +
- buf_state->page_info.pad,
- buf_len, DMA_FROM_DEVICE);
+ gve_dma_sync(priv, rx, buf_state, buf_len);
/* Append to current skb if one exists. */
if (rx->ctx.skb_head) {
@@ -837,7 +861,9 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
u64_stats_update_end(&rx->statss);
}
- if (eop && buf_len <= priv->rx_copybreak) {
+ if (eop && buf_len <= priv->rx_copybreak &&
+ !(rx->dqo.page_pool &&
+ netmem_is_net_iov(buf_state->page_info.netmem))) {
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len);
if (unlikely(!rx->ctx.skb_head))
--
2.50.0.727.gbf7dc18ff4-goog
On Wed, Jul 23, 2025 at 10:28:29PM +0000, Harshitha Ramamurthy wrote: ... > diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c ... > @@ -793,13 +811,19 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, > rx->rx_hsplit_unsplit_pkt += unsplit; > rx->rx_hsplit_bytes += hdr_len; > u64_stats_update_end(&rx->statss); > + } else if (!rx->ctx.skb_head && rx->dqo.page_pool && > + netmem_is_net_iov(buf_state->page_info.netmem)) { > + /* when header split is disabled, the header went to the packet > + * buffer. If the packet buffer is a net_iov, those can't be > + * easily mapped into the kernel space to access the header > + * required to process the packet. > + */ > + gve_free_buffer(rx, buf_state); > + return -EFAULT; nit: I think it would be nice to consistently handle error paths in this function using goto error. > } > > /* Sync the portion of dma buffer for CPU to read. */ > - dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr, > - buf_state->page_info.page_offset + > - buf_state->page_info.pad, > - buf_len, DMA_FROM_DEVICE); > + gve_dma_sync(priv, rx, buf_state, buf_len); > > /* Append to current skb if one exists. */ > if (rx->ctx.skb_head) {
© 2016 - 2025 Red Hat, Inc.