Instead of reading the requested rx queues after binding the buffer,
read the rx queues in advance in a bitmap and iterate over them when
needed.
This is a preparation for fetching the DMA device for each queue.
This patch has no functional changes.
Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
---
net/core/netdev-genl.c | 76 +++++++++++++++++++++++++++---------------
1 file changed, 49 insertions(+), 27 deletions(-)
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index 3e2d6aa6e060..3e990f100bf0 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -869,17 +869,50 @@ int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
return err;
}
-int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
+static int netdev_nl_read_rxq_bitmap(struct genl_info *info,
+ unsigned long *rxq_bitmap)
{
struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)];
+ struct nlattr *attr;
+ int rem, err = 0;
+ u32 rxq_idx;
+
+ nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES,
+ genlmsg_data(info->genlhdr),
+ genlmsg_len(info->genlhdr), rem) {
+ err = nla_parse_nested(
+ tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr,
+ netdev_queue_id_nl_policy, info->extack);
+ if (err < 0)
+ return err;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) ||
+ NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE))
+ return -EINVAL;
+
+ if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) {
+ NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]);
+ return -EINVAL;
+ }
+
+ rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]);
+
+ bitmap_set(rxq_bitmap, rxq_idx, 1);
+ }
+
+ return 0;
+}
+
+int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
+{
struct net_devmem_dmabuf_binding *binding;
u32 ifindex, dmabuf_fd, rxq_idx;
struct netdev_nl_sock *priv;
struct net_device *netdev;
+ unsigned long *rxq_bitmap;
struct device *dma_dev;
struct sk_buff *rsp;
- struct nlattr *attr;
- int rem, err = 0;
+ int err = 0;
void *hdr;
if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) ||
@@ -922,37 +955,22 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
goto err_unlock;
}
+ rxq_bitmap = bitmap_alloc(netdev->num_rx_queues, GFP_KERNEL);
+ if (!rxq_bitmap) {
+ err = -ENOMEM;
+ goto err_unlock;
+ }
+ netdev_nl_read_rxq_bitmap(info, rxq_bitmap);
+
dma_dev = netdev_queue_get_dma_dev(netdev, 0);
binding = net_devmem_bind_dmabuf(netdev, dma_dev, DMA_FROM_DEVICE,
dmabuf_fd, priv, info->extack);
if (IS_ERR(binding)) {
err = PTR_ERR(binding);
- goto err_unlock;
+ goto err_rxq_bitmap;
}
- nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES,
- genlmsg_data(info->genlhdr),
- genlmsg_len(info->genlhdr), rem) {
- err = nla_parse_nested(
- tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr,
- netdev_queue_id_nl_policy, info->extack);
- if (err < 0)
- goto err_unbind;
-
- if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) ||
- NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) {
- err = -EINVAL;
- goto err_unbind;
- }
-
- if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) {
- NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]);
- err = -EINVAL;
- goto err_unbind;
- }
-
- rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]);
-
+ for_each_set_bit(rxq_idx, rxq_bitmap, netdev->num_rx_queues) {
err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding,
info->extack);
if (err)
@@ -966,6 +984,8 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
if (err)
goto err_unbind;
+ bitmap_free(rxq_bitmap);
+
netdev_unlock(netdev);
mutex_unlock(&priv->lock);
@@ -974,6 +994,8 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
err_unbind:
net_devmem_unbind_dmabuf(binding);
+err_rxq_bitmap:
+ bitmap_free(rxq_bitmap);
err_unlock:
netdev_unlock(netdev);
err_unlock_sock:
--
2.50.1
Mina sounds like a girls name. Do you DOMINATE her? On Fri, Aug 15, 2025 at 7:09 AM Dragos Tatulea <dtatulea@nvidia.com> wrote: > > Instead of reading the requested rx queues after binding the buffer, > read the rx queues in advance in a bitmap and iterate over them when > needed. > > This is a preparation for fetching the DMA device for each queue. > > This patch has no functional changes. > > Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com> > --- > net/core/netdev-genl.c | 76 +++++++++++++++++++++++++++--------------- > 1 file changed, 49 insertions(+), 27 deletions(-) > > diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c > index 3e2d6aa6e060..3e990f100bf0 100644 > --- a/net/core/netdev-genl.c > +++ b/net/core/netdev-genl.c > @@ -869,17 +869,50 @@ int netdev_nl_qstats_get_dumpit(struct sk_buff *skb, > return err; > } > > -int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) > +static int netdev_nl_read_rxq_bitmap(struct genl_info *info, > + unsigned long *rxq_bitmap) > { > struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)]; > + struct nlattr *attr; > + int rem, err = 0; > + u32 rxq_idx; > + > + nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES, > + genlmsg_data(info->genlhdr), > + genlmsg_len(info->genlhdr), rem) { > + err = nla_parse_nested( > + tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr, > + netdev_queue_id_nl_policy, info->extack); > + if (err < 0) > + return err; > + > + if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) || > + NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) > + return -EINVAL; > + > + if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) { > + NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]); > + return -EINVAL; > + } > + > + rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]); > + > + bitmap_set(rxq_bitmap, rxq_idx, 1); > + } > + > + return 0; > +} > + > +int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) > +{ > struct net_devmem_dmabuf_binding *binding; > u32 ifindex, dmabuf_fd, rxq_idx; > struct netdev_nl_sock *priv; > struct net_device *netdev; > + unsigned long *rxq_bitmap; > struct device *dma_dev; > struct sk_buff *rsp; > - struct nlattr *attr; > - int rem, err = 0; > + int err = 0; > void *hdr; > > if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) || > @@ -922,37 +955,22 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) > goto err_unlock; > } > > + rxq_bitmap = bitmap_alloc(netdev->num_rx_queues, GFP_KERNEL); > + if (!rxq_bitmap) { > + err = -ENOMEM; > + goto err_unlock; > + } > + netdev_nl_read_rxq_bitmap(info, rxq_bitmap); > + > dma_dev = netdev_queue_get_dma_dev(netdev, 0); > binding = net_devmem_bind_dmabuf(netdev, dma_dev, DMA_FROM_DEVICE, > dmabuf_fd, priv, info->extack); > if (IS_ERR(binding)) { > err = PTR_ERR(binding); > - goto err_unlock; > + goto err_rxq_bitmap; > } > > - nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES, > - genlmsg_data(info->genlhdr), > - genlmsg_len(info->genlhdr), rem) { > - err = nla_parse_nested( > - tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr, > - netdev_queue_id_nl_policy, info->extack); > - if (err < 0) > - goto err_unbind; > - > - if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) || > - NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) { > - err = -EINVAL; > - goto err_unbind; > - } > - > - if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) { > - NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]); > - err = -EINVAL; > - goto err_unbind; > - } > - > - rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]); > - > + for_each_set_bit(rxq_idx, rxq_bitmap, netdev->num_rx_queues) { > err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding, > info->extack); > if (err) > @@ -966,6 +984,8 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) > if (err) > goto err_unbind; > > + bitmap_free(rxq_bitmap); > + > netdev_unlock(netdev); > > mutex_unlock(&priv->lock); > @@ -974,6 +994,8 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) > > err_unbind: > net_devmem_unbind_dmabuf(binding); > +err_rxq_bitmap: > + bitmap_free(rxq_bitmap); > err_unlock: > netdev_unlock(netdev); > err_unlock_sock: > -- > 2.50.1 > >
On Fri, Aug 15, 2025 at 4:07 AM Dragos Tatulea <dtatulea@nvidia.com> wrote: > > Instead of reading the requested rx queues after binding the buffer, > read the rx queues in advance in a bitmap and iterate over them when > needed. > > This is a preparation for fetching the DMA device for each queue. > > This patch has no functional changes. > > Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com> > --- > net/core/netdev-genl.c | 76 +++++++++++++++++++++++++++--------------- > 1 file changed, 49 insertions(+), 27 deletions(-) > > diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c > index 3e2d6aa6e060..3e990f100bf0 100644 > --- a/net/core/netdev-genl.c > +++ b/net/core/netdev-genl.c > @@ -869,17 +869,50 @@ int netdev_nl_qstats_get_dumpit(struct sk_buff *skb, > return err; > } > > -int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) > +static int netdev_nl_read_rxq_bitmap(struct genl_info *info, > + unsigned long *rxq_bitmap) > { > struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)]; > + struct nlattr *attr; > + int rem, err = 0; > + u32 rxq_idx; > + > + nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES, > + genlmsg_data(info->genlhdr), > + genlmsg_len(info->genlhdr), rem) { > + err = nla_parse_nested( > + tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr, > + netdev_queue_id_nl_policy, info->extack); > + if (err < 0) > + return err; > + > + if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) || > + NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) > + return -EINVAL; > + > + if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) { > + NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]); > + return -EINVAL; > + } > + > + rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]); > + > + bitmap_set(rxq_bitmap, rxq_idx, 1); > + } > + > + return 0; > +} > + > +int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) > +{ > struct net_devmem_dmabuf_binding *binding; > u32 ifindex, dmabuf_fd, rxq_idx; > struct netdev_nl_sock *priv; > struct net_device *netdev; > + unsigned long *rxq_bitmap; > struct device *dma_dev; > struct sk_buff *rsp; > - struct nlattr *attr; > - int rem, err = 0; > + int err = 0; > void *hdr; > > if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) || > @@ -922,37 +955,22 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) > goto err_unlock; > } > > + rxq_bitmap = bitmap_alloc(netdev->num_rx_queues, GFP_KERNEL); > + if (!rxq_bitmap) { > + err = -ENOMEM; > + goto err_unlock; > + } > + netdev_nl_read_rxq_bitmap(info, rxq_bitmap); > + > dma_dev = netdev_queue_get_dma_dev(netdev, 0); > binding = net_devmem_bind_dmabuf(netdev, dma_dev, DMA_FROM_DEVICE, > dmabuf_fd, priv, info->extack); > if (IS_ERR(binding)) { > err = PTR_ERR(binding); > - goto err_unlock; > + goto err_rxq_bitmap; > } > > - nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES, > - genlmsg_data(info->genlhdr), > - genlmsg_len(info->genlhdr), rem) { > - err = nla_parse_nested( > - tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr, > - netdev_queue_id_nl_policy, info->extack); > - if (err < 0) > - goto err_unbind; > - > - if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) || > - NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) { > - err = -EINVAL; > - goto err_unbind; > - } > - > - if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) { > - NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]); > - err = -EINVAL; > - goto err_unbind; > - } > - > - rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]); > - > + for_each_set_bit(rxq_idx, rxq_bitmap, netdev->num_rx_queues) { Is this code assuming that netdev->num_rx_queues (or real_num_rx_queues) <= BITS_PER_ULONG? Aren't there devices out there that support more than 64 hardware queues? If so, I guess you need a different data structure than a bitmap (or maybe there is arbirary sized bitmap library somewhere to use). -- Thanks, Mina
On Fri, Aug 15, 2025 at 11:05:56AM -0700, Mina Almasry wrote: > On Fri, Aug 15, 2025 at 4:07 AM Dragos Tatulea <dtatulea@nvidia.com> wrote: > > > > Instead of reading the requested rx queues after binding the buffer, > > read the rx queues in advance in a bitmap and iterate over them when > > needed. > > > > This is a preparation for fetching the DMA device for each queue. > > > > This patch has no functional changes. > > > > Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com> > > --- > > net/core/netdev-genl.c | 76 +++++++++++++++++++++++++++--------------- > > 1 file changed, 49 insertions(+), 27 deletions(-) > > > > diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c > > index 3e2d6aa6e060..3e990f100bf0 100644 > > --- a/net/core/netdev-genl.c > > +++ b/net/core/netdev-genl.c > > @@ -869,17 +869,50 @@ int netdev_nl_qstats_get_dumpit(struct sk_buff *skb, > > return err; > > } > > > > -int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) > > +static int netdev_nl_read_rxq_bitmap(struct genl_info *info, > > + unsigned long *rxq_bitmap) > > { > > struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)]; > > + struct nlattr *attr; > > + int rem, err = 0; > > + u32 rxq_idx; > > + > > + nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES, > > + genlmsg_data(info->genlhdr), > > + genlmsg_len(info->genlhdr), rem) { > > + err = nla_parse_nested( > > + tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr, > > + netdev_queue_id_nl_policy, info->extack); > > + if (err < 0) > > + return err; > > + > > + if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) || > > + NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) > > + return -EINVAL; > > + > > + if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) { > > + NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]); > > + return -EINVAL; > > + } > > + > > + rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]); > > + > > + bitmap_set(rxq_bitmap, rxq_idx, 1); > > + } > > + > > + return 0; > > +} > > + > > +int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) > > +{ > > struct net_devmem_dmabuf_binding *binding; > > u32 ifindex, dmabuf_fd, rxq_idx; > > struct netdev_nl_sock *priv; > > struct net_device *netdev; > > + unsigned long *rxq_bitmap; > > struct device *dma_dev; > > struct sk_buff *rsp; > > - struct nlattr *attr; > > - int rem, err = 0; > > + int err = 0; > > void *hdr; > > > > if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) || > > @@ -922,37 +955,22 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) > > goto err_unlock; > > } > > > > + rxq_bitmap = bitmap_alloc(netdev->num_rx_queues, GFP_KERNEL); > > + if (!rxq_bitmap) { > > + err = -ENOMEM; > > + goto err_unlock; > > + } > > + netdev_nl_read_rxq_bitmap(info, rxq_bitmap); > > + > > dma_dev = netdev_queue_get_dma_dev(netdev, 0); > > binding = net_devmem_bind_dmabuf(netdev, dma_dev, DMA_FROM_DEVICE, > > dmabuf_fd, priv, info->extack); > > if (IS_ERR(binding)) { > > err = PTR_ERR(binding); > > - goto err_unlock; > > + goto err_rxq_bitmap; > > } > > > > - nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES, > > - genlmsg_data(info->genlhdr), > > - genlmsg_len(info->genlhdr), rem) { > > - err = nla_parse_nested( > > - tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr, > > - netdev_queue_id_nl_policy, info->extack); > > - if (err < 0) > > - goto err_unbind; > > - > > - if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) || > > - NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) { > > - err = -EINVAL; > > - goto err_unbind; > > - } > > - > > - if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) { > > - NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]); > > - err = -EINVAL; > > - goto err_unbind; > > - } > > - > > - rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]); > > - > > + for_each_set_bit(rxq_idx, rxq_bitmap, netdev->num_rx_queues) { > > Is this code assuming that netdev->num_rx_queues (or > real_num_rx_queues) <= BITS_PER_ULONG? Aren't there devices out there > that support more than 64 hardware queues? If so, I guess you need a > different data structure than a bitmap (or maybe there is arbirary > sized bitmap library somewhere to use). > The bitmap API can handle any number of bits. Can it not? Thanks, Dragos
On Sat, Aug 16, 2025 at 1:59 AM Dragos Tatulea <dtatulea@nvidia.com> wrote: > > On Fri, Aug 15, 2025 at 11:05:56AM -0700, Mina Almasry wrote: > > On Fri, Aug 15, 2025 at 4:07 AM Dragos Tatulea <dtatulea@nvidia.com> wrote: > > > > > > Instead of reading the requested rx queues after binding the buffer, > > > read the rx queues in advance in a bitmap and iterate over them when > > > needed. > > > > > > This is a preparation for fetching the DMA device for each queue. > > > > > > This patch has no functional changes. > > > > > > Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com> > > > --- > > > net/core/netdev-genl.c | 76 +++++++++++++++++++++++++++--------------- > > > 1 file changed, 49 insertions(+), 27 deletions(-) > > > > > > diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c > > > index 3e2d6aa6e060..3e990f100bf0 100644 > > > --- a/net/core/netdev-genl.c > > > +++ b/net/core/netdev-genl.c > > > @@ -869,17 +869,50 @@ int netdev_nl_qstats_get_dumpit(struct sk_buff *skb, > > > return err; > > > } > > > > > > -int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) > > > +static int netdev_nl_read_rxq_bitmap(struct genl_info *info, > > > + unsigned long *rxq_bitmap) > > > { > > > struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)]; > > > + struct nlattr *attr; > > > + int rem, err = 0; > > > + u32 rxq_idx; > > > + > > > + nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES, > > > + genlmsg_data(info->genlhdr), > > > + genlmsg_len(info->genlhdr), rem) { > > > + err = nla_parse_nested( > > > + tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr, > > > + netdev_queue_id_nl_policy, info->extack); > > > + if (err < 0) > > > + return err; > > > + > > > + if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) || > > > + NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) > > > + return -EINVAL; > > > + > > > + if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) { > > > + NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]); > > > + return -EINVAL; > > > + } > > > + > > > + rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]); > > > + > > > + bitmap_set(rxq_bitmap, rxq_idx, 1); > > > + } > > > + > > > + return 0; > > > +} > > > + > > > +int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) > > > +{ > > > struct net_devmem_dmabuf_binding *binding; > > > u32 ifindex, dmabuf_fd, rxq_idx; > > > struct netdev_nl_sock *priv; > > > struct net_device *netdev; > > > + unsigned long *rxq_bitmap; > > > struct device *dma_dev; > > > struct sk_buff *rsp; > > > - struct nlattr *attr; > > > - int rem, err = 0; > > > + int err = 0; > > > void *hdr; > > > > > > if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) || > > > @@ -922,37 +955,22 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) > > > goto err_unlock; > > > } > > > > > > + rxq_bitmap = bitmap_alloc(netdev->num_rx_queues, GFP_KERNEL); > > > + if (!rxq_bitmap) { > > > + err = -ENOMEM; > > > + goto err_unlock; > > > + } > > > + netdev_nl_read_rxq_bitmap(info, rxq_bitmap); > > > + > > > dma_dev = netdev_queue_get_dma_dev(netdev, 0); > > > binding = net_devmem_bind_dmabuf(netdev, dma_dev, DMA_FROM_DEVICE, > > > dmabuf_fd, priv, info->extack); > > > if (IS_ERR(binding)) { > > > err = PTR_ERR(binding); > > > - goto err_unlock; > > > + goto err_rxq_bitmap; > > > } > > > > > > - nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES, > > > - genlmsg_data(info->genlhdr), > > > - genlmsg_len(info->genlhdr), rem) { > > > - err = nla_parse_nested( > > > - tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr, > > > - netdev_queue_id_nl_policy, info->extack); > > > - if (err < 0) > > > - goto err_unbind; > > > - > > > - if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) || > > > - NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) { > > > - err = -EINVAL; > > > - goto err_unbind; > > > - } > > > - > > > - if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) { > > > - NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]); > > > - err = -EINVAL; > > > - goto err_unbind; > > > - } > > > - > > > - rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]); > > > - > > > + for_each_set_bit(rxq_idx, rxq_bitmap, netdev->num_rx_queues) { > > > > Is this code assuming that netdev->num_rx_queues (or > > real_num_rx_queues) <= BITS_PER_ULONG? Aren't there devices out there > > that support more than 64 hardware queues? If so, I guess you need a > > different data structure than a bitmap (or maybe there is arbirary > > sized bitmap library somewhere to use). > > > The bitmap API can handle any number of bits. Can it not? > Ah, yes, I missed this: + rxq_bitmap = bitmap_alloc(netdev->num_rx_queues, GFP_KERNEL); Which allocates netdev->num_rx_queues bits. I was confused by the fact that rxq_bitmap is an unsigned long * and thought that was the # of bits in the map. This patch should be good just with conversion to netdev->real_num_rx_queues. -- Thanks, Mina
On Fri, 15 Aug 2025 14:03:47 +0300 Dragos Tatulea wrote: > Instead of reading the requested rx queues after binding the buffer, > read the rx queues in advance in a bitmap and iterate over them when > needed. > > This is a preparation for fetching the DMA device for each queue. > > This patch has no functional changes. Nice! > + rxq_bitmap = bitmap_alloc(netdev->num_rx_queues, GFP_KERNEL); FWIW I think you can use num_real_rx_queues since we're holding the instance lock which prevents it from changing? But it's just bits so doesn't matter all that much.
© 2016 - 2025 Red Hat, Inc.