To simplify struct page, the page pool members of struct page should be
moved to other, allowing these members to be removed from struct page.
Reuse struct net_iov for also system memory, that already mirrored the
page pool members.
Signed-off-by: Byungchul Park <byungchul@sk.com>
---
include/linux/skbuff.h | 4 +--
include/net/netmem.h | 20 ++++++------
include/net/page_pool/memory_provider.h | 6 ++--
io_uring/zcrx.c | 42 ++++++++++++-------------
net/core/devmem.c | 14 ++++-----
net/core/devmem.h | 24 +++++++-------
net/core/page_pool.c | 6 ++--
net/ipv4/tcp.c | 2 +-
8 files changed, 59 insertions(+), 59 deletions(-)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index b974a277975a8..bf67c47319a56 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3598,10 +3598,10 @@ static inline bool skb_frag_is_net_iov(const skb_frag_t *frag)
* skb_frag_net_iov - retrieve the net_iov referred to by fragment
* @frag: the fragment
*
- * Return: the &struct net_iov associated with @frag. Returns NULL if this
+ * Return: the &struct netmem_desc associated with @frag. Returns NULL if this
* frag has no associated net_iov.
*/
-static inline struct net_iov *skb_frag_net_iov(const skb_frag_t *frag)
+static inline struct netmem_desc *skb_frag_net_iov(const skb_frag_t *frag)
{
if (!skb_frag_is_net_iov(frag))
return NULL;
diff --git a/include/net/netmem.h b/include/net/netmem.h
index c61d5b21e7b42..45c209d6cc689 100644
--- a/include/net/netmem.h
+++ b/include/net/netmem.h
@@ -20,7 +20,7 @@ DECLARE_STATIC_KEY_FALSE(page_pool_mem_providers);
*/
#define NET_IOV 0x01UL
-struct net_iov {
+struct netmem_desc {
unsigned long __unused_padding;
unsigned long pp_magic;
struct page_pool *pp;
@@ -31,7 +31,7 @@ struct net_iov {
struct net_iov_area {
/* Array of net_iovs for this area. */
- struct net_iov *niovs;
+ struct netmem_desc *niovs;
size_t num_niovs;
/* Offset into the dma-buf where this chunk starts. */
@@ -56,19 +56,19 @@ struct net_iov_area {
*/
#define NET_IOV_ASSERT_OFFSET(pg, iov) \
static_assert(offsetof(struct page, pg) == \
- offsetof(struct net_iov, iov))
+ offsetof(struct netmem_desc, iov))
NET_IOV_ASSERT_OFFSET(pp_magic, pp_magic);
NET_IOV_ASSERT_OFFSET(pp, pp);
NET_IOV_ASSERT_OFFSET(dma_addr, dma_addr);
NET_IOV_ASSERT_OFFSET(pp_ref_count, pp_ref_count);
#undef NET_IOV_ASSERT_OFFSET
-static inline struct net_iov_area *net_iov_owner(const struct net_iov *niov)
+static inline struct net_iov_area *net_iov_owner(const struct netmem_desc *niov)
{
return niov->owner;
}
-static inline unsigned int net_iov_idx(const struct net_iov *niov)
+static inline unsigned int net_iov_idx(const struct netmem_desc *niov)
{
return niov - net_iov_owner(niov)->niovs;
}
@@ -118,17 +118,17 @@ static inline struct page *netmem_to_page(netmem_ref netmem)
return __netmem_to_page(netmem);
}
-static inline struct net_iov *netmem_to_net_iov(netmem_ref netmem)
+static inline struct netmem_desc *netmem_to_net_iov(netmem_ref netmem)
{
if (netmem_is_net_iov(netmem))
- return (struct net_iov *)((__force unsigned long)netmem &
+ return (struct netmem_desc *)((__force unsigned long)netmem &
~NET_IOV);
DEBUG_NET_WARN_ON_ONCE(true);
return NULL;
}
-static inline netmem_ref net_iov_to_netmem(struct net_iov *niov)
+static inline netmem_ref net_iov_to_netmem(struct netmem_desc *niov)
{
return (__force netmem_ref)((unsigned long)niov | NET_IOV);
}
@@ -168,9 +168,9 @@ static inline unsigned long netmem_pfn_trace(netmem_ref netmem)
return page_to_pfn(netmem_to_page(netmem));
}
-static inline struct net_iov *__netmem_clear_lsb(netmem_ref netmem)
+static inline struct netmem_desc *__netmem_clear_lsb(netmem_ref netmem)
{
- return (struct net_iov *)((__force unsigned long)netmem & ~NET_IOV);
+ return (struct netmem_desc *)((__force unsigned long)netmem & ~NET_IOV);
}
/**
diff --git a/include/net/page_pool/memory_provider.h b/include/net/page_pool/memory_provider.h
index ada4f968960ae..83aac2e9692c0 100644
--- a/include/net/page_pool/memory_provider.h
+++ b/include/net/page_pool/memory_provider.h
@@ -19,9 +19,9 @@ struct memory_provider_ops {
void (*uninstall)(void *mp_priv, struct netdev_rx_queue *rxq);
};
-bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr);
-void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov);
-void net_mp_niov_clear_page_pool(struct net_iov *niov);
+bool net_mp_niov_set_dma_addr(struct netmem_desc *niov, dma_addr_t addr);
+void net_mp_niov_set_page_pool(struct page_pool *pool, struct netmem_desc *niov);
+void net_mp_niov_clear_page_pool(struct netmem_desc *niov);
int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
struct pp_memory_provider_params *p);
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index 0f46e0404c045..c0b66039d43df 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -34,7 +34,7 @@ static void __io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
int i;
for (i = 0; i < nr_mapped; i++) {
- struct net_iov *niov = &area->nia.niovs[i];
+ struct netmem_desc *niov = &area->nia.niovs[i];
dma_addr_t dma;
dma = page_pool_get_dma_addr_netmem(net_iov_to_netmem(niov));
@@ -55,7 +55,7 @@ static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
int i;
for (i = 0; i < area->nia.num_niovs; i++) {
- struct net_iov *niov = &area->nia.niovs[i];
+ struct netmem_desc *niov = &area->nia.niovs[i];
dma_addr_t dma;
dma = dma_map_page_attrs(ifq->dev, area->pages[i], 0, PAGE_SIZE,
@@ -79,7 +79,7 @@ static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
}
static void io_zcrx_sync_for_device(const struct page_pool *pool,
- struct net_iov *niov)
+ struct netmem_desc *niov)
{
#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
dma_addr_t dma_addr;
@@ -106,21 +106,21 @@ struct io_zcrx_args {
static const struct memory_provider_ops io_uring_pp_zc_ops;
-static inline struct io_zcrx_area *io_zcrx_iov_to_area(const struct net_iov *niov)
+static inline struct io_zcrx_area *io_zcrx_iov_to_area(const struct netmem_desc *niov)
{
struct net_iov_area *owner = net_iov_owner(niov);
return container_of(owner, struct io_zcrx_area, nia);
}
-static inline atomic_t *io_get_user_counter(struct net_iov *niov)
+static inline atomic_t *io_get_user_counter(struct netmem_desc *niov)
{
struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
return &area->user_refs[net_iov_idx(niov)];
}
-static bool io_zcrx_put_niov_uref(struct net_iov *niov)
+static bool io_zcrx_put_niov_uref(struct netmem_desc *niov)
{
atomic_t *uref = io_get_user_counter(niov);
@@ -130,12 +130,12 @@ static bool io_zcrx_put_niov_uref(struct net_iov *niov)
return true;
}
-static void io_zcrx_get_niov_uref(struct net_iov *niov)
+static void io_zcrx_get_niov_uref(struct netmem_desc *niov)
{
atomic_inc(io_get_user_counter(niov));
}
-static inline struct page *io_zcrx_iov_page(const struct net_iov *niov)
+static inline struct page *io_zcrx_iov_page(const struct netmem_desc *niov)
{
struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
@@ -242,7 +242,7 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
goto err;
for (i = 0; i < nr_iovs; i++) {
- struct net_iov *niov = &area->nia.niovs[i];
+ struct netmem_desc *niov = &area->nia.niovs[i];
niov->owner = &area->nia;
area->freelist[i] = i;
@@ -435,7 +435,7 @@ void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx)
io_zcrx_ifq_free(ifq);
}
-static struct net_iov *__io_zcrx_get_free_niov(struct io_zcrx_area *area)
+static struct netmem_desc *__io_zcrx_get_free_niov(struct io_zcrx_area *area)
{
unsigned niov_idx;
@@ -445,7 +445,7 @@ static struct net_iov *__io_zcrx_get_free_niov(struct io_zcrx_area *area)
return &area->nia.niovs[niov_idx];
}
-static void io_zcrx_return_niov_freelist(struct net_iov *niov)
+static void io_zcrx_return_niov_freelist(struct netmem_desc *niov)
{
struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
@@ -454,7 +454,7 @@ static void io_zcrx_return_niov_freelist(struct net_iov *niov)
spin_unlock_bh(&area->freelist_lock);
}
-static void io_zcrx_return_niov(struct net_iov *niov)
+static void io_zcrx_return_niov(struct netmem_desc *niov)
{
netmem_ref netmem = net_iov_to_netmem(niov);
@@ -476,7 +476,7 @@ static void io_zcrx_scrub(struct io_zcrx_ifq *ifq)
/* Reclaim back all buffers given to the user space. */
for (i = 0; i < area->nia.num_niovs; i++) {
- struct net_iov *niov = &area->nia.niovs[i];
+ struct netmem_desc *niov = &area->nia.niovs[i];
int nr;
if (!atomic_read(io_get_user_counter(niov)))
@@ -532,7 +532,7 @@ static void io_zcrx_ring_refill(struct page_pool *pp,
do {
struct io_uring_zcrx_rqe *rqe = io_zcrx_get_rqe(ifq, mask);
struct io_zcrx_area *area;
- struct net_iov *niov;
+ struct netmem_desc *niov;
unsigned niov_idx, area_idx;
area_idx = rqe->off >> IORING_ZCRX_AREA_SHIFT;
@@ -573,7 +573,7 @@ static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq)
spin_lock_bh(&area->freelist_lock);
while (area->free_count && pp->alloc.count < PP_ALLOC_CACHE_REFILL) {
- struct net_iov *niov = __io_zcrx_get_free_niov(area);
+ struct netmem_desc *niov = __io_zcrx_get_free_niov(area);
netmem_ref netmem = net_iov_to_netmem(niov);
net_mp_niov_set_page_pool(pp, niov);
@@ -604,7 +604,7 @@ static netmem_ref io_pp_zc_alloc_netmems(struct page_pool *pp, gfp_t gfp)
static bool io_pp_zc_release_netmem(struct page_pool *pp, netmem_ref netmem)
{
- struct net_iov *niov;
+ struct netmem_desc *niov;
if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
return false;
@@ -678,7 +678,7 @@ static const struct memory_provider_ops io_uring_pp_zc_ops = {
.uninstall = io_pp_uninstall,
};
-static bool io_zcrx_queue_cqe(struct io_kiocb *req, struct net_iov *niov,
+static bool io_zcrx_queue_cqe(struct io_kiocb *req, struct netmem_desc *niov,
struct io_zcrx_ifq *ifq, int off, int len)
{
struct io_uring_zcrx_cqe *rcqe;
@@ -701,9 +701,9 @@ static bool io_zcrx_queue_cqe(struct io_kiocb *req, struct net_iov *niov,
return true;
}
-static struct net_iov *io_zcrx_alloc_fallback(struct io_zcrx_area *area)
+static struct netmem_desc *io_zcrx_alloc_fallback(struct io_zcrx_area *area)
{
- struct net_iov *niov = NULL;
+ struct netmem_desc *niov = NULL;
spin_lock_bh(&area->freelist_lock);
if (area->free_count)
@@ -726,7 +726,7 @@ static ssize_t io_zcrx_copy_chunk(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
while (len) {
size_t copy_size = min_t(size_t, PAGE_SIZE, len);
const int dst_off = 0;
- struct net_iov *niov;
+ struct netmem_desc *niov;
struct page *dst_page;
void *dst_addr;
@@ -784,7 +784,7 @@ static int io_zcrx_copy_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
static int io_zcrx_recv_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
const skb_frag_t *frag, int off, int len)
{
- struct net_iov *niov;
+ struct netmem_desc *niov;
if (unlikely(!skb_frag_is_net_iov(frag)))
return io_zcrx_copy_frag(req, ifq, frag, off, len);
diff --git a/net/core/devmem.c b/net/core/devmem.c
index 6e27a47d04935..5568478dd2ff6 100644
--- a/net/core/devmem.c
+++ b/net/core/devmem.c
@@ -28,7 +28,7 @@ static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);
static const struct memory_provider_ops dmabuf_devmem_ops;
-bool net_is_devmem_iov(struct net_iov *niov)
+bool net_is_devmem_iov(struct netmem_desc *niov)
{
return niov->pp->mp_ops == &dmabuf_devmem_ops;
}
@@ -43,7 +43,7 @@ static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool,
kfree(owner);
}
-static dma_addr_t net_devmem_get_dma_addr(const struct net_iov *niov)
+static dma_addr_t net_devmem_get_dma_addr(const struct netmem_desc *niov)
{
struct dmabuf_genpool_chunk_owner *owner;
@@ -74,12 +74,12 @@ void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
kfree(binding);
}
-struct net_iov *
+struct netmem_desc *
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
{
struct dmabuf_genpool_chunk_owner *owner;
unsigned long dma_addr;
- struct net_iov *niov;
+ struct netmem_desc *niov;
ssize_t offset;
ssize_t index;
@@ -99,7 +99,7 @@ net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
return niov;
}
-void net_devmem_free_dmabuf(struct net_iov *niov)
+void net_devmem_free_dmabuf(struct netmem_desc *niov)
{
struct net_devmem_dmabuf_binding *binding = net_devmem_iov_binding(niov);
unsigned long dma_addr = net_devmem_get_dma_addr(niov);
@@ -233,7 +233,7 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
dma_addr_t dma_addr = sg_dma_address(sg);
struct dmabuf_genpool_chunk_owner *owner;
size_t len = sg_dma_len(sg);
- struct net_iov *niov;
+ struct netmem_desc *niov;
owner = kzalloc_node(sizeof(*owner), GFP_KERNEL,
dev_to_node(&dev->dev));
@@ -319,7 +319,7 @@ int mp_dmabuf_devmem_init(struct page_pool *pool)
netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp)
{
struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
- struct net_iov *niov;
+ struct netmem_desc *niov;
netmem_ref netmem;
niov = net_devmem_alloc_dmabuf(binding);
diff --git a/net/core/devmem.h b/net/core/devmem.h
index 7fc158d527293..314ab0acdf716 100644
--- a/net/core/devmem.h
+++ b/net/core/devmem.h
@@ -70,7 +70,7 @@ int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
struct netlink_ext_ack *extack);
static inline struct dmabuf_genpool_chunk_owner *
-net_devmem_iov_to_chunk_owner(const struct net_iov *niov)
+net_devmem_iov_to_chunk_owner(const struct netmem_desc *niov)
{
struct net_iov_area *owner = net_iov_owner(niov);
@@ -78,17 +78,17 @@ net_devmem_iov_to_chunk_owner(const struct net_iov *niov)
}
static inline struct net_devmem_dmabuf_binding *
-net_devmem_iov_binding(const struct net_iov *niov)
+net_devmem_iov_binding(const struct netmem_desc *niov)
{
return net_devmem_iov_to_chunk_owner(niov)->binding;
}
-static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
+static inline u32 net_devmem_iov_binding_id(const struct netmem_desc *niov)
{
return net_devmem_iov_binding(niov)->id;
}
-static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
+static inline unsigned long net_iov_virtual_addr(const struct netmem_desc *niov)
{
struct net_iov_area *owner = net_iov_owner(niov);
@@ -111,11 +111,11 @@ net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
__net_devmem_dmabuf_binding_free(binding);
}
-struct net_iov *
+struct netmem_desc *
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
-void net_devmem_free_dmabuf(struct net_iov *ppiov);
+void net_devmem_free_dmabuf(struct netmem_desc *ppiov);
-bool net_is_devmem_iov(struct net_iov *niov);
+bool net_is_devmem_iov(struct netmem_desc *niov);
#else
struct net_devmem_dmabuf_binding;
@@ -146,27 +146,27 @@ net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
return -EOPNOTSUPP;
}
-static inline struct net_iov *
+static inline struct netmem_desc *
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
{
return NULL;
}
-static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
+static inline void net_devmem_free_dmabuf(struct netmem_desc *ppiov)
{
}
-static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
+static inline unsigned long net_iov_virtual_addr(const struct netmem_desc *niov)
{
return 0;
}
-static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
+static inline u32 net_devmem_iov_binding_id(const struct netmem_desc *niov)
{
return 0;
}
-static inline bool net_is_devmem_iov(struct net_iov *niov)
+static inline bool net_is_devmem_iov(struct netmem_desc *niov)
{
return false;
}
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 7745ad924ae2d..575fdab337414 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -1198,7 +1198,7 @@ void page_pool_update_nid(struct page_pool *pool, int new_nid)
}
EXPORT_SYMBOL(page_pool_update_nid);
-bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr)
+bool net_mp_niov_set_dma_addr(struct netmem_desc *niov, dma_addr_t addr)
{
return page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov), addr);
}
@@ -1206,7 +1206,7 @@ bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr)
/* Associate a niov with a page pool. Should follow with a matching
* net_mp_niov_clear_page_pool()
*/
-void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov)
+void net_mp_niov_set_page_pool(struct page_pool *pool, struct netmem_desc *niov)
{
netmem_ref netmem = net_iov_to_netmem(niov);
@@ -1219,7 +1219,7 @@ void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov)
/* Disassociate a niov from a page pool. Should only be used in the
* ->release_netmem() path.
*/
-void net_mp_niov_clear_page_pool(struct net_iov *niov)
+void net_mp_niov_clear_page_pool(struct netmem_desc *niov)
{
netmem_ref netmem = net_iov_to_netmem(niov);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6edc441b37023..76199c832ef82 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2483,7 +2483,7 @@ static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb,
*/
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- struct net_iov *niov;
+ struct netmem_desc *niov;
u64 frag_offset;
int end;
--
2.17.1
On 5/9/25 12:51, Byungchul Park wrote: > To simplify struct page, the page pool members of struct page should be > moved to other, allowing these members to be removed from struct page. > > Reuse struct net_iov for also system memory, that already mirrored the > page pool members. > > Signed-off-by: Byungchul Park <byungchul@sk.com> > --- > include/linux/skbuff.h | 4 +-- > include/net/netmem.h | 20 ++++++------ > include/net/page_pool/memory_provider.h | 6 ++-- > io_uring/zcrx.c | 42 ++++++++++++------------- You're unnecessarily complicating it for yourself. It'll certainly conflict with changes in the io_uring tree, and hence it can't be taken normally through the net tree. Why are you renaming it in the first place? If there are good reasons, maybe you can try adding a temporary alias of the struct and patch io_uring later separately. -- Pavel Begunkov
On Mon, May 12, 2025 at 02:11:13PM +0100, Pavel Begunkov wrote: > On 5/9/25 12:51, Byungchul Park wrote: > > To simplify struct page, the page pool members of struct page should be > > moved to other, allowing these members to be removed from struct page. > > > > Reuse struct net_iov for also system memory, that already mirrored the > > page pool members. > > > > Signed-off-by: Byungchul Park <byungchul@sk.com> > > --- > > include/linux/skbuff.h | 4 +-- > > include/net/netmem.h | 20 ++++++------ > > include/net/page_pool/memory_provider.h | 6 ++-- > > io_uring/zcrx.c | 42 ++++++++++++------------- > > You're unnecessarily complicating it for yourself. It'll certainly > conflict with changes in the io_uring tree, and hence it can't > be taken normally through the net tree. > > Why are you renaming it in the first place? If there are good It's because the struct should be used for not only io vetor things but also system memory. Current network code uses struct page as system memory descriptor but struct page fields for page pool will be gone. So I had to reuse struct net_iov and I thought renaming it made more sense. It'd be welcome if you have better idea. Byungchul > reasons, maybe you can try adding a temporary alias of the struct > and patch io_uring later separately. > > -- > Pavel Begunkov >
On 5/12/25 14:29, Byungchul Park wrote: > On Mon, May 12, 2025 at 02:11:13PM +0100, Pavel Begunkov wrote: >> On 5/9/25 12:51, Byungchul Park wrote: >>> To simplify struct page, the page pool members of struct page should be >>> moved to other, allowing these members to be removed from struct page. >>> >>> Reuse struct net_iov for also system memory, that already mirrored the >>> page pool members. >>> >>> Signed-off-by: Byungchul Park <byungchul@sk.com> >>> --- >>> include/linux/skbuff.h | 4 +-- >>> include/net/netmem.h | 20 ++++++------ >>> include/net/page_pool/memory_provider.h | 6 ++-- >>> io_uring/zcrx.c | 42 ++++++++++++------------- >> >> You're unnecessarily complicating it for yourself. It'll certainly >> conflict with changes in the io_uring tree, and hence it can't >> be taken normally through the net tree. >> >> Why are you renaming it in the first place? If there are good > > It's because the struct should be used for not only io vetor things but > also system memory. Current network code uses struct page as system Not sure what you mean by "io vector things", but it can already point to system memory, and if anything, the use conceptually more resembles struct pages rather than iovec. IOW, it's just a name, neither gives a perfect understanding until you look up details, so you could just leave it net_iov. Or follow what Mina suggested, I like that option. > memory descriptor but struct page fields for page pool will be gone. > > So I had to reuse struct net_iov and I thought renaming it made more > sense. It'd be welcome if you have better idea. -- Pavel Begunkov
On Tue, May 13, 2025 at 01:49:56PM +0100, Pavel Begunkov wrote: > On 5/12/25 14:29, Byungchul Park wrote: > > On Mon, May 12, 2025 at 02:11:13PM +0100, Pavel Begunkov wrote: > > > On 5/9/25 12:51, Byungchul Park wrote: > > > > To simplify struct page, the page pool members of struct page should be > > > > moved to other, allowing these members to be removed from struct page. > > > > > > > > Reuse struct net_iov for also system memory, that already mirrored the > > > > page pool members. > > > > > > > > Signed-off-by: Byungchul Park <byungchul@sk.com> > > > > --- > > > > include/linux/skbuff.h | 4 +-- > > > > include/net/netmem.h | 20 ++++++------ > > > > include/net/page_pool/memory_provider.h | 6 ++-- > > > > io_uring/zcrx.c | 42 ++++++++++++------------- > > > > > > You're unnecessarily complicating it for yourself. It'll certainly > > > conflict with changes in the io_uring tree, and hence it can't > > > be taken normally through the net tree. > > > > > > Why are you renaming it in the first place? If there are good > > > > It's because the struct should be used for not only io vetor things but > > also system memory. Current network code uses struct page as system > > Not sure what you mean by "io vector things", but it can already > point to system memory, and if anything, the use conceptually more > resembles struct pages rather than iovec. IOW, it's just a name, > neither gives a perfect understanding until you look up details, > so you could just leave it net_iov. Or follow what Mina suggested, > I like that option. I appreciate all of your feedback and will try to apply them. Byungchul > > memory descriptor but struct page fields for page pool will be gone. > > > > So I had to reuse struct net_iov and I thought renaming it made more > > sense. It'd be welcome if you have better idea. > -- > Pavel Begunkov
On Mon, May 12, 2025 at 6:29 AM Byungchul Park <byungchul@sk.com> wrote: > > On Mon, May 12, 2025 at 02:11:13PM +0100, Pavel Begunkov wrote: > > On 5/9/25 12:51, Byungchul Park wrote: > > > To simplify struct page, the page pool members of struct page should be > > > moved to other, allowing these members to be removed from struct page. > > > > > > Reuse struct net_iov for also system memory, that already mirrored the > > > page pool members. > > > > > > Signed-off-by: Byungchul Park <byungchul@sk.com> > > > --- > > > include/linux/skbuff.h | 4 +-- > > > include/net/netmem.h | 20 ++++++------ > > > include/net/page_pool/memory_provider.h | 6 ++-- > > > io_uring/zcrx.c | 42 ++++++++++++------------- > > > > You're unnecessarily complicating it for yourself. It'll certainly > > conflict with changes in the io_uring tree, and hence it can't > > be taken normally through the net tree. > > > > Why are you renaming it in the first place? If there are good > > It's because the struct should be used for not only io vetor things but > also system memory. Current network code uses struct page as system > memory descriptor but struct page fields for page pool will be gone. > > So I had to reuse struct net_iov and I thought renaming it made more > sense. It'd be welcome if you have better idea. > As I said in another thread, struct page should not embed struct net_iov as-is. struct net_iov already has fields that are unrelated to page (like net_iov_owner) and more will be added in the future. I think what Matthew seems to agree with AFAIU in the other thread is creating a new struct, struct netmem_desc, and having struct net_iov embed netmem_desc. -- Thanks, Mina
On Mon, May 12, 2025 at 12:14:13PM -0700, Mina Almasry wrote: > On Mon, May 12, 2025 at 6:29 AM Byungchul Park <byungchul@sk.com> wrote: > > > > On Mon, May 12, 2025 at 02:11:13PM +0100, Pavel Begunkov wrote: > > > On 5/9/25 12:51, Byungchul Park wrote: > > > > To simplify struct page, the page pool members of struct page should be > > > > moved to other, allowing these members to be removed from struct page. > > > > > > > > Reuse struct net_iov for also system memory, that already mirrored the > > > > page pool members. > > > > > > > > Signed-off-by: Byungchul Park <byungchul@sk.com> > > > > --- > > > > include/linux/skbuff.h | 4 +-- > > > > include/net/netmem.h | 20 ++++++------ > > > > include/net/page_pool/memory_provider.h | 6 ++-- > > > > io_uring/zcrx.c | 42 ++++++++++++------------- > > > > > > You're unnecessarily complicating it for yourself. It'll certainly > > > conflict with changes in the io_uring tree, and hence it can't > > > be taken normally through the net tree. > > > > > > Why are you renaming it in the first place? If there are good > > > > It's because the struct should be used for not only io vetor things but > > also system memory. Current network code uses struct page as system > > memory descriptor but struct page fields for page pool will be gone. > > > > So I had to reuse struct net_iov and I thought renaming it made more > > sense. It'd be welcome if you have better idea. > > > > As I said in another thread, struct page should not embed struct I don't understand here. Can you explain more? Do you mean do not use place holder? > net_iov as-is. struct net_iov already has fields that are unrelated to > page (like net_iov_owner) and more will be added in the future. > > I think what Matthew seems to agree with AFAIU in the other thread is > creating a new struct, struct netmem_desc, and having struct net_iov > embed netmem_desc. This would look better. I will try. Byungchul > -- > Thanks, > Mina
On 5/13/25 03:00, Byungchul Park wrote:
> On Mon, May 12, 2025 at 12:14:13PM -0700, Mina Almasry wrote:
>> On Mon, May 12, 2025 at 6:29 AM Byungchul Park <byungchul@sk.com> wrote:
>>>
>>> On Mon, May 12, 2025 at 02:11:13PM +0100, Pavel Begunkov wrote:
>>>> On 5/9/25 12:51, Byungchul Park wrote:
>>>>> To simplify struct page, the page pool members of struct page should be
>>>>> moved to other, allowing these members to be removed from struct page.
>>>>>
>>>>> Reuse struct net_iov for also system memory, that already mirrored the
>>>>> page pool members.
>>>>>
>>>>> Signed-off-by: Byungchul Park <byungchul@sk.com>
>>>>> ---
>>>>> include/linux/skbuff.h | 4 +--
>>>>> include/net/netmem.h | 20 ++++++------
>>>>> include/net/page_pool/memory_provider.h | 6 ++--
>>>>> io_uring/zcrx.c | 42 ++++++++++++-------------
>>>>
>>>> You're unnecessarily complicating it for yourself. It'll certainly
>>>> conflict with changes in the io_uring tree, and hence it can't
>>>> be taken normally through the net tree.
>>>>
>>>> Why are you renaming it in the first place? If there are good
>>>
>>> It's because the struct should be used for not only io vetor things but
>>> also system memory. Current network code uses struct page as system
>>> memory descriptor but struct page fields for page pool will be gone.
>>>
>>> So I had to reuse struct net_iov and I thought renaming it made more
>>> sense. It'd be welcome if you have better idea.
>>>
>>
>> As I said in another thread, struct page should not embed struct
>
> I don't understand here. Can you explain more? Do you mean do not use
> place holder?
I assume this:
struct netmem_desc {
...
};
struct net_iov {
netmem_desc desc;
};
struct page {
union {
// eventually will go away
netmem_desc desc;
...
};
};
And to avoid conflicts with io_uring, you can send something like the
following to the net tree, and finish the io_uring conversion later.
struct net_iov {
struct_group_tagged(netmem_desc, desc,
...
);
};
--
Pavel Begunkov
© 2016 - 2025 Red Hat, Inc.