Convert all the legacy code directly accessing the pp fields in net_iov
to access them through @desc in net_iov.
Signed-off-by: Byungchul Park <byungchul@sk.com>
---
Changes from v1:
1. Drop 1/3 since it already has been worked in io-uring tree.
2. Drop 3/3 since it requires the io-uring change to be merged.
---
include/linux/skbuff.h | 4 ++--
net/core/devmem.c | 6 +++---
net/ipv4/tcp.c | 2 +-
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ff90281ddf90..86737076101d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3778,8 +3778,8 @@ static inline dma_addr_t __skb_frag_dma_map(struct device *dev,
enum dma_data_direction dir)
{
if (skb_frag_is_net_iov(frag)) {
- return netmem_to_net_iov(frag->netmem)->dma_addr + offset +
- frag->offset;
+ return netmem_to_net_iov(frag->netmem)->desc.dma_addr +
+ offset + frag->offset;
}
return dma_map_page(dev, skb_frag_page(frag),
skb_frag_off(frag) + offset, size, dir);
diff --git a/net/core/devmem.c b/net/core/devmem.c
index 1d04754bc756..ec4217d6c0b4 100644
--- a/net/core/devmem.c
+++ b/net/core/devmem.c
@@ -97,9 +97,9 @@ net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
index = offset / PAGE_SIZE;
niov = &owner->area.niovs[index];
- niov->pp_magic = 0;
- niov->pp = NULL;
- atomic_long_set(&niov->pp_ref_count, 0);
+ niov->desc.pp_magic = 0;
+ niov->desc.pp = NULL;
+ atomic_long_set(&niov->desc.pp_ref_count, 0);
return niov;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index dee578aad690..f035440c475a 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2587,7 +2587,7 @@ static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb,
if (err)
goto out;
- atomic_long_inc(&niov->pp_ref_count);
+ atomic_long_inc(&niov->desc.pp_ref_count);
tcp_xa_pool.netmems[tcp_xa_pool.idx++] = skb_frag_netmem(frag);
sent += copy;
--
2.17.1