Use netif_queue_set_napi to map NAPIs to queue IDs so that the mapping
can be accessed by user apps.
$ ethtool -i ens4 | grep driver
driver: virtio_net
$ sudo ethtool -L ens4 combined 4
$ ./tools/net/ynl/pyynl/cli.py \
--spec Documentation/netlink/specs/netdev.yaml \
--dump queue-get --json='{"ifindex": 2}'
[{'id': 0, 'ifindex': 2, 'napi-id': 8289, 'type': 'rx'},
{'id': 1, 'ifindex': 2, 'napi-id': 8290, 'type': 'rx'},
{'id': 2, 'ifindex': 2, 'napi-id': 8291, 'type': 'rx'},
{'id': 3, 'ifindex': 2, 'napi-id': 8292, 'type': 'rx'},
{'id': 0, 'ifindex': 2, 'type': 'tx'},
{'id': 1, 'ifindex': 2, 'type': 'tx'},
{'id': 2, 'ifindex': 2, 'type': 'tx'},
{'id': 3, 'ifindex': 2, 'type': 'tx'}]
Note that virtio_net has TX-only NAPIs which do not have NAPI IDs, so
the lack of 'napi-id' in the above output is expected.
Signed-off-by: Joe Damato <jdamato@fastly.com>
---
v2:
- Eliminate RTNL code paths using the API Jakub introduced in patch 1
of this v2.
- Added virtnet_napi_disable to reduce code duplication as
suggested by Jason Wang.
drivers/net/virtio_net.c | 34 +++++++++++++++++++++++++++++-----
1 file changed, 29 insertions(+), 5 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index cff18c66b54a..c6fda756dd07 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2803,9 +2803,18 @@ static void virtnet_napi_do_enable(struct virtqueue *vq,
local_bh_enable();
}
-static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
+static void virtnet_napi_enable(struct virtqueue *vq,
+ struct napi_struct *napi)
{
+ struct virtnet_info *vi = vq->vdev->priv;
+ int q = vq2rxq(vq);
+ u16 curr_qs;
+
virtnet_napi_do_enable(vq, napi);
+
+ curr_qs = vi->curr_queue_pairs - vi->xdp_queue_pairs;
+ if (!vi->xdp_enabled || q < curr_qs)
+ netif_queue_set_napi(vi->dev, q, NETDEV_QUEUE_TYPE_RX, napi);
}
static void virtnet_napi_tx_enable(struct virtnet_info *vi,
@@ -2826,6 +2835,20 @@ static void virtnet_napi_tx_enable(struct virtnet_info *vi,
virtnet_napi_do_enable(vq, napi);
}
+static void virtnet_napi_disable(struct virtqueue *vq,
+ struct napi_struct *napi)
+{
+ struct virtnet_info *vi = vq->vdev->priv;
+ int q = vq2rxq(vq);
+ u16 curr_qs;
+
+ curr_qs = vi->curr_queue_pairs - vi->xdp_queue_pairs;
+ if (!vi->xdp_enabled || q < curr_qs)
+ netif_queue_set_napi(vi->dev, q, NETDEV_QUEUE_TYPE_RX, NULL);
+
+ napi_disable(napi);
+}
+
static void virtnet_napi_tx_disable(struct napi_struct *napi)
{
if (napi->weight)
@@ -2842,7 +2865,8 @@ static void refill_work(struct work_struct *work)
for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i];
- napi_disable(&rq->napi);
+ virtnet_napi_disable(rq->vq, &rq->napi);
+
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
virtnet_napi_enable(rq->vq, &rq->napi);
@@ -3042,7 +3066,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
{
virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
- napi_disable(&vi->rq[qp_index].napi);
+ virtnet_napi_disable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
}
@@ -3313,7 +3337,7 @@ static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
bool running = netif_running(vi->dev);
if (running) {
- napi_disable(&rq->napi);
+ virtnet_napi_disable(rq->vq, &rq->napi);
virtnet_cancel_dim(vi, &rq->dim);
}
}
@@ -5932,7 +5956,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
/* Make sure NAPI is not using any XDP TX queues for RX. */
if (netif_running(dev)) {
for (i = 0; i < vi->max_queue_pairs; i++) {
- napi_disable(&vi->rq[i].napi);
+ virtnet_napi_disable(vi->rq[i].vq, &vi->rq[i].napi);
virtnet_napi_tx_disable(&vi->sq[i].napi);
}
}
--
2.25.1
On Thu, 16 Jan 2025 05:52:58 +0000, Joe Damato <jdamato@fastly.com> wrote:
> Use netif_queue_set_napi to map NAPIs to queue IDs so that the mapping
> can be accessed by user apps.
>
> $ ethtool -i ens4 | grep driver
> driver: virtio_net
>
> $ sudo ethtool -L ens4 combined 4
>
> $ ./tools/net/ynl/pyynl/cli.py \
> --spec Documentation/netlink/specs/netdev.yaml \
> --dump queue-get --json='{"ifindex": 2}'
> [{'id': 0, 'ifindex': 2, 'napi-id': 8289, 'type': 'rx'},
> {'id': 1, 'ifindex': 2, 'napi-id': 8290, 'type': 'rx'},
> {'id': 2, 'ifindex': 2, 'napi-id': 8291, 'type': 'rx'},
> {'id': 3, 'ifindex': 2, 'napi-id': 8292, 'type': 'rx'},
> {'id': 0, 'ifindex': 2, 'type': 'tx'},
> {'id': 1, 'ifindex': 2, 'type': 'tx'},
> {'id': 2, 'ifindex': 2, 'type': 'tx'},
> {'id': 3, 'ifindex': 2, 'type': 'tx'}]
>
> Note that virtio_net has TX-only NAPIs which do not have NAPI IDs, so
> the lack of 'napi-id' in the above output is expected.
>
> Signed-off-by: Joe Damato <jdamato@fastly.com>
> ---
> v2:
> - Eliminate RTNL code paths using the API Jakub introduced in patch 1
> of this v2.
> - Added virtnet_napi_disable to reduce code duplication as
> suggested by Jason Wang.
>
> drivers/net/virtio_net.c | 34 +++++++++++++++++++++++++++++-----
> 1 file changed, 29 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index cff18c66b54a..c6fda756dd07 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -2803,9 +2803,18 @@ static void virtnet_napi_do_enable(struct virtqueue *vq,
> local_bh_enable();
> }
>
> -static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
> +static void virtnet_napi_enable(struct virtqueue *vq,
> + struct napi_struct *napi)
> {
> + struct virtnet_info *vi = vq->vdev->priv;
> + int q = vq2rxq(vq);
> + u16 curr_qs;
> +
> virtnet_napi_do_enable(vq, napi);
> +
> + curr_qs = vi->curr_queue_pairs - vi->xdp_queue_pairs;
> + if (!vi->xdp_enabled || q < curr_qs)
> + netif_queue_set_napi(vi->dev, q, NETDEV_QUEUE_TYPE_RX, napi);
So what case the check of xdp_enabled is for?
And I think we should merge this to last commit.
Thanks.
> }
>
> static void virtnet_napi_tx_enable(struct virtnet_info *vi,
> @@ -2826,6 +2835,20 @@ static void virtnet_napi_tx_enable(struct virtnet_info *vi,
> virtnet_napi_do_enable(vq, napi);
> }
>
> +static void virtnet_napi_disable(struct virtqueue *vq,
> + struct napi_struct *napi)
> +{
> + struct virtnet_info *vi = vq->vdev->priv;
> + int q = vq2rxq(vq);
> + u16 curr_qs;
> +
> + curr_qs = vi->curr_queue_pairs - vi->xdp_queue_pairs;
> + if (!vi->xdp_enabled || q < curr_qs)
> + netif_queue_set_napi(vi->dev, q, NETDEV_QUEUE_TYPE_RX, NULL);
> +
> + napi_disable(napi);
> +}
> +
> static void virtnet_napi_tx_disable(struct napi_struct *napi)
> {
> if (napi->weight)
> @@ -2842,7 +2865,8 @@ static void refill_work(struct work_struct *work)
> for (i = 0; i < vi->curr_queue_pairs; i++) {
> struct receive_queue *rq = &vi->rq[i];
>
> - napi_disable(&rq->napi);
> + virtnet_napi_disable(rq->vq, &rq->napi);
> +
> still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
> virtnet_napi_enable(rq->vq, &rq->napi);
>
> @@ -3042,7 +3066,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
> static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
> {
> virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
> - napi_disable(&vi->rq[qp_index].napi);
> + virtnet_napi_disable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
> xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
> }
>
> @@ -3313,7 +3337,7 @@ static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
> bool running = netif_running(vi->dev);
>
> if (running) {
> - napi_disable(&rq->napi);
> + virtnet_napi_disable(rq->vq, &rq->napi);
> virtnet_cancel_dim(vi, &rq->dim);
> }
> }
> @@ -5932,7 +5956,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
> /* Make sure NAPI is not using any XDP TX queues for RX. */
> if (netif_running(dev)) {
> for (i = 0; i < vi->max_queue_pairs; i++) {
> - napi_disable(&vi->rq[i].napi);
> + virtnet_napi_disable(vi->rq[i].vq, &vi->rq[i].napi);
> virtnet_napi_tx_disable(&vi->sq[i].napi);
> }
> }
> --
> 2.25.1
>
On Thu, Jan 16, 2025 at 3:57 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> On Thu, 16 Jan 2025 05:52:58 +0000, Joe Damato <jdamato@fastly.com> wrote:
> > Use netif_queue_set_napi to map NAPIs to queue IDs so that the mapping
> > can be accessed by user apps.
> >
> > $ ethtool -i ens4 | grep driver
> > driver: virtio_net
> >
> > $ sudo ethtool -L ens4 combined 4
> >
> > $ ./tools/net/ynl/pyynl/cli.py \
> > --spec Documentation/netlink/specs/netdev.yaml \
> > --dump queue-get --json='{"ifindex": 2}'
> > [{'id': 0, 'ifindex': 2, 'napi-id': 8289, 'type': 'rx'},
> > {'id': 1, 'ifindex': 2, 'napi-id': 8290, 'type': 'rx'},
> > {'id': 2, 'ifindex': 2, 'napi-id': 8291, 'type': 'rx'},
> > {'id': 3, 'ifindex': 2, 'napi-id': 8292, 'type': 'rx'},
> > {'id': 0, 'ifindex': 2, 'type': 'tx'},
> > {'id': 1, 'ifindex': 2, 'type': 'tx'},
> > {'id': 2, 'ifindex': 2, 'type': 'tx'},
> > {'id': 3, 'ifindex': 2, 'type': 'tx'}]
> >
> > Note that virtio_net has TX-only NAPIs which do not have NAPI IDs, so
> > the lack of 'napi-id' in the above output is expected.
> >
> > Signed-off-by: Joe Damato <jdamato@fastly.com>
> > ---
> > v2:
> > - Eliminate RTNL code paths using the API Jakub introduced in patch 1
> > of this v2.
> > - Added virtnet_napi_disable to reduce code duplication as
> > suggested by Jason Wang.
> >
> > drivers/net/virtio_net.c | 34 +++++++++++++++++++++++++++++-----
> > 1 file changed, 29 insertions(+), 5 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index cff18c66b54a..c6fda756dd07 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -2803,9 +2803,18 @@ static void virtnet_napi_do_enable(struct virtqueue *vq,
> > local_bh_enable();
> > }
> >
> > -static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
> > +static void virtnet_napi_enable(struct virtqueue *vq,
> > + struct napi_struct *napi)
> > {
> > + struct virtnet_info *vi = vq->vdev->priv;
> > + int q = vq2rxq(vq);
> > + u16 curr_qs;
> > +
> > virtnet_napi_do_enable(vq, napi);
> > +
> > + curr_qs = vi->curr_queue_pairs - vi->xdp_queue_pairs;
> > + if (!vi->xdp_enabled || q < curr_qs)
> > + netif_queue_set_napi(vi->dev, q, NETDEV_QUEUE_TYPE_RX, napi);
>
> So what case the check of xdp_enabled is for?
+1 and I think the XDP related checks should be done by the caller not here.
>
> And I think we should merge this to last commit.
>
> Thanks.
>
Thanks
> > }
> >
> > static void virtnet_napi_tx_enable(struct virtnet_info *vi,
> > @@ -2826,6 +2835,20 @@ static void virtnet_napi_tx_enable(struct virtnet_info *vi,
> > virtnet_napi_do_enable(vq, napi);
> > }
> >
> > +static void virtnet_napi_disable(struct virtqueue *vq,
> > + struct napi_struct *napi)
> > +{
> > + struct virtnet_info *vi = vq->vdev->priv;
> > + int q = vq2rxq(vq);
> > + u16 curr_qs;
> > +
> > + curr_qs = vi->curr_queue_pairs - vi->xdp_queue_pairs;
> > + if (!vi->xdp_enabled || q < curr_qs)
> > + netif_queue_set_napi(vi->dev, q, NETDEV_QUEUE_TYPE_RX, NULL);
> > +
> > + napi_disable(napi);
> > +}
> > +
> > static void virtnet_napi_tx_disable(struct napi_struct *napi)
> > {
> > if (napi->weight)
> > @@ -2842,7 +2865,8 @@ static void refill_work(struct work_struct *work)
> > for (i = 0; i < vi->curr_queue_pairs; i++) {
> > struct receive_queue *rq = &vi->rq[i];
> >
> > - napi_disable(&rq->napi);
> > + virtnet_napi_disable(rq->vq, &rq->napi);
> > +
> > still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
> > virtnet_napi_enable(rq->vq, &rq->napi);
> >
> > @@ -3042,7 +3066,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
> > static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
> > {
> > virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
> > - napi_disable(&vi->rq[qp_index].napi);
> > + virtnet_napi_disable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
> > xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
> > }
> >
> > @@ -3313,7 +3337,7 @@ static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
> > bool running = netif_running(vi->dev);
> >
> > if (running) {
> > - napi_disable(&rq->napi);
> > + virtnet_napi_disable(rq->vq, &rq->napi);
> > virtnet_cancel_dim(vi, &rq->dim);
> > }
> > }
> > @@ -5932,7 +5956,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
> > /* Make sure NAPI is not using any XDP TX queues for RX. */
> > if (netif_running(dev)) {
> > for (i = 0; i < vi->max_queue_pairs; i++) {
> > - napi_disable(&vi->rq[i].napi);
> > + virtnet_napi_disable(vi->rq[i].vq, &vi->rq[i].napi);
> > virtnet_napi_tx_disable(&vi->sq[i].napi);
> > }
> > }
> > --
> > 2.25.1
> >
>
On Mon, Jan 20, 2025 at 09:58:13AM +0800, Jason Wang wrote:
> On Thu, Jan 16, 2025 at 3:57 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > On Thu, 16 Jan 2025 05:52:58 +0000, Joe Damato <jdamato@fastly.com> wrote:
> > > Use netif_queue_set_napi to map NAPIs to queue IDs so that the mapping
> > > can be accessed by user apps.
> > >
> > > $ ethtool -i ens4 | grep driver
> > > driver: virtio_net
> > >
> > > $ sudo ethtool -L ens4 combined 4
> > >
> > > $ ./tools/net/ynl/pyynl/cli.py \
> > > --spec Documentation/netlink/specs/netdev.yaml \
> > > --dump queue-get --json='{"ifindex": 2}'
> > > [{'id': 0, 'ifindex': 2, 'napi-id': 8289, 'type': 'rx'},
> > > {'id': 1, 'ifindex': 2, 'napi-id': 8290, 'type': 'rx'},
> > > {'id': 2, 'ifindex': 2, 'napi-id': 8291, 'type': 'rx'},
> > > {'id': 3, 'ifindex': 2, 'napi-id': 8292, 'type': 'rx'},
> > > {'id': 0, 'ifindex': 2, 'type': 'tx'},
> > > {'id': 1, 'ifindex': 2, 'type': 'tx'},
> > > {'id': 2, 'ifindex': 2, 'type': 'tx'},
> > > {'id': 3, 'ifindex': 2, 'type': 'tx'}]
> > >
> > > Note that virtio_net has TX-only NAPIs which do not have NAPI IDs, so
> > > the lack of 'napi-id' in the above output is expected.
> > >
> > > Signed-off-by: Joe Damato <jdamato@fastly.com>
> > > ---
> > > v2:
> > > - Eliminate RTNL code paths using the API Jakub introduced in patch 1
> > > of this v2.
> > > - Added virtnet_napi_disable to reduce code duplication as
> > > suggested by Jason Wang.
> > >
> > > drivers/net/virtio_net.c | 34 +++++++++++++++++++++++++++++-----
> > > 1 file changed, 29 insertions(+), 5 deletions(-)
> > >
> > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > index cff18c66b54a..c6fda756dd07 100644
> > > --- a/drivers/net/virtio_net.c
> > > +++ b/drivers/net/virtio_net.c
> > > @@ -2803,9 +2803,18 @@ static void virtnet_napi_do_enable(struct virtqueue *vq,
> > > local_bh_enable();
> > > }
> > >
> > > -static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
> > > +static void virtnet_napi_enable(struct virtqueue *vq,
> > > + struct napi_struct *napi)
> > > {
> > > + struct virtnet_info *vi = vq->vdev->priv;
> > > + int q = vq2rxq(vq);
> > > + u16 curr_qs;
> > > +
> > > virtnet_napi_do_enable(vq, napi);
> > > +
> > > + curr_qs = vi->curr_queue_pairs - vi->xdp_queue_pairs;
> > > + if (!vi->xdp_enabled || q < curr_qs)
> > > + netif_queue_set_napi(vi->dev, q, NETDEV_QUEUE_TYPE_RX, napi);
> >
> > So what case the check of xdp_enabled is for?
>
> +1 and I think the XDP related checks should be done by the caller not here.
Based on the reply further down in the thread, it seems that these
queues should be mapped regardless of whether an XDP program is
attached or not, IIUC.
Feel free to reply there, if you disagree/have comments.
> >
> > And I think we should merge this to last commit.
> >
> > Thanks.
> >
>
> Thanks
FWIW, I don't plan to merge the commits, due to the reason mentioned
further down in the thread.
Thanks.
On Thu, Jan 16, 2025 at 03:53:14PM +0800, Xuan Zhuo wrote:
> On Thu, 16 Jan 2025 05:52:58 +0000, Joe Damato <jdamato@fastly.com> wrote:
> > Use netif_queue_set_napi to map NAPIs to queue IDs so that the mapping
> > can be accessed by user apps.
> >
> > $ ethtool -i ens4 | grep driver
> > driver: virtio_net
> >
> > $ sudo ethtool -L ens4 combined 4
> >
> > $ ./tools/net/ynl/pyynl/cli.py \
> > --spec Documentation/netlink/specs/netdev.yaml \
> > --dump queue-get --json='{"ifindex": 2}'
> > [{'id': 0, 'ifindex': 2, 'napi-id': 8289, 'type': 'rx'},
> > {'id': 1, 'ifindex': 2, 'napi-id': 8290, 'type': 'rx'},
> > {'id': 2, 'ifindex': 2, 'napi-id': 8291, 'type': 'rx'},
> > {'id': 3, 'ifindex': 2, 'napi-id': 8292, 'type': 'rx'},
> > {'id': 0, 'ifindex': 2, 'type': 'tx'},
> > {'id': 1, 'ifindex': 2, 'type': 'tx'},
> > {'id': 2, 'ifindex': 2, 'type': 'tx'},
> > {'id': 3, 'ifindex': 2, 'type': 'tx'}]
> >
> > Note that virtio_net has TX-only NAPIs which do not have NAPI IDs, so
> > the lack of 'napi-id' in the above output is expected.
> >
> > Signed-off-by: Joe Damato <jdamato@fastly.com>
> > ---
> > v2:
> > - Eliminate RTNL code paths using the API Jakub introduced in patch 1
> > of this v2.
> > - Added virtnet_napi_disable to reduce code duplication as
> > suggested by Jason Wang.
> >
> > drivers/net/virtio_net.c | 34 +++++++++++++++++++++++++++++-----
> > 1 file changed, 29 insertions(+), 5 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index cff18c66b54a..c6fda756dd07 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -2803,9 +2803,18 @@ static void virtnet_napi_do_enable(struct virtqueue *vq,
> > local_bh_enable();
> > }
> >
> > -static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
> > +static void virtnet_napi_enable(struct virtqueue *vq,
> > + struct napi_struct *napi)
> > {
> > + struct virtnet_info *vi = vq->vdev->priv;
> > + int q = vq2rxq(vq);
> > + u16 curr_qs;
> > +
> > virtnet_napi_do_enable(vq, napi);
> > +
> > + curr_qs = vi->curr_queue_pairs - vi->xdp_queue_pairs;
> > + if (!vi->xdp_enabled || q < curr_qs)
> > + netif_queue_set_napi(vi->dev, q, NETDEV_QUEUE_TYPE_RX, napi);
>
> So what case the check of xdp_enabled is for?
Based on a previous discussion [1], the NAPIs should not be linked
for in-kernel XDP, but they _should_ be linked for XSK.
I could certainly have misread the virtio_net code (please let me
know if I've gotten it wrong, I'm not an expert), but the three
cases I have in mind are:
- vi->xdp_enabled = false, which happens when no XDP is being
used, so the queue number will be < vi->curr_queue_pairs.
- vi->xdp_enabled = false, which I believe is what happens in the
XSK case. In this case, the NAPI is linked.
- vi->xdp_enabled = true, which I believe only happens for
in-kernel XDP - but not XSK - and in this case, the NAPI should
NOT be linked.
Thank you for your review and questions about this, I definitely
want to make sure I've gotten it right :)
> And I think we should merge this to last commit.
I kept them separate for two reasons:
1. Easier to review :)
2. If a bug were to appear, it'll be easier to bisect the code to
determine if the bug is being caused either from linking the
queues to NAPIs or from adding support for persistent NAPI
config parameters.
Having the two features separated makes it easier to understand and
fix, as there have been minor bugs in other drivers with NAPI config
[2].
[1]: https://lore.kernel.org/netdev/20250113135609.13883897@kernel.org/
[2]: https://lore.kernel.org/lkml/38d019dd-b876-4fc1-ba7e-f1eb85ad7360@nvidia.com/
On 16.01.25 17:09, Joe Damato wrote:
> On Thu, Jan 16, 2025 at 03:53:14PM +0800, Xuan Zhuo wrote:
>> On Thu, 16 Jan 2025 05:52:58 +0000, Joe Damato <jdamato@fastly.com> wrote:
>>> Use netif_queue_set_napi to map NAPIs to queue IDs so that the mapping
>>> can be accessed by user apps.
>>>
>>> $ ethtool -i ens4 | grep driver
>>> driver: virtio_net
>>>
>>> $ sudo ethtool -L ens4 combined 4
>>>
>>> $ ./tools/net/ynl/pyynl/cli.py \
>>> --spec Documentation/netlink/specs/netdev.yaml \
>>> --dump queue-get --json='{"ifindex": 2}'
>>> [{'id': 0, 'ifindex': 2, 'napi-id': 8289, 'type': 'rx'},
>>> {'id': 1, 'ifindex': 2, 'napi-id': 8290, 'type': 'rx'},
>>> {'id': 2, 'ifindex': 2, 'napi-id': 8291, 'type': 'rx'},
>>> {'id': 3, 'ifindex': 2, 'napi-id': 8292, 'type': 'rx'},
>>> {'id': 0, 'ifindex': 2, 'type': 'tx'},
>>> {'id': 1, 'ifindex': 2, 'type': 'tx'},
>>> {'id': 2, 'ifindex': 2, 'type': 'tx'},
>>> {'id': 3, 'ifindex': 2, 'type': 'tx'}]
>>>
>>> Note that virtio_net has TX-only NAPIs which do not have NAPI IDs, so
>>> the lack of 'napi-id' in the above output is expected.
>>>
>>> Signed-off-by: Joe Damato <jdamato@fastly.com>
>>> ---
>>> v2:
>>> - Eliminate RTNL code paths using the API Jakub introduced in patch 1
>>> of this v2.
>>> - Added virtnet_napi_disable to reduce code duplication as
>>> suggested by Jason Wang.
>>>
>>> drivers/net/virtio_net.c | 34 +++++++++++++++++++++++++++++-----
>>> 1 file changed, 29 insertions(+), 5 deletions(-)
>>>
>>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>>> index cff18c66b54a..c6fda756dd07 100644
>>> --- a/drivers/net/virtio_net.c
>>> +++ b/drivers/net/virtio_net.c
>>> @@ -2803,9 +2803,18 @@ static void virtnet_napi_do_enable(struct virtqueue *vq,
>>> local_bh_enable();
>>> }
>>>
>>> -static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
>>> +static void virtnet_napi_enable(struct virtqueue *vq,
>>> + struct napi_struct *napi)
>>> {
>>> + struct virtnet_info *vi = vq->vdev->priv;
>>> + int q = vq2rxq(vq);
>>> + u16 curr_qs;
>>> +
>>> virtnet_napi_do_enable(vq, napi);
>>> +
>>> + curr_qs = vi->curr_queue_pairs - vi->xdp_queue_pairs;
>>> + if (!vi->xdp_enabled || q < curr_qs)
>>> + netif_queue_set_napi(vi->dev, q, NETDEV_QUEUE_TYPE_RX, napi);
>>
>> So what case the check of xdp_enabled is for?
>
> Based on a previous discussion [1], the NAPIs should not be linked
> for in-kernel XDP, but they _should_ be linked for XSK.
>
> I could certainly have misread the virtio_net code (please let me
> know if I've gotten it wrong, I'm not an expert), but the three
> cases I have in mind are:
>
> - vi->xdp_enabled = false, which happens when no XDP is being
> used, so the queue number will be < vi->curr_queue_pairs.
>
> - vi->xdp_enabled = false, which I believe is what happens in the
> XSK case. In this case, the NAPI is linked.
>
> - vi->xdp_enabled = true, which I believe only happens for
> in-kernel XDP - but not XSK - and in this case, the NAPI should
> NOT be linked.
My interpretation based on [1] is that an in-kernel XDP Tx queue is a
queue that is only used if XDP is attached and is not visible to
userspace. The in-kernel XDP Tx queue is used to not load stack Tx
queues with XDP packets. IIRC fbnic has additional queues only for
XDP Tx. So for stack RX queues I would always link napi, no matter if
XDP is attached or not. I think most driver do not have in-kernel XDP
Tx queues. But I'm also not an expert.
Gerhard
On Thu, Jan 16, 2025 at 09:28:07PM +0100, Gerhard Engleder wrote:
> On 16.01.25 17:09, Joe Damato wrote:
> > On Thu, Jan 16, 2025 at 03:53:14PM +0800, Xuan Zhuo wrote:
> > > On Thu, 16 Jan 2025 05:52:58 +0000, Joe Damato <jdamato@fastly.com> wrote:
> > > > Use netif_queue_set_napi to map NAPIs to queue IDs so that the mapping
> > > > can be accessed by user apps.
> > > >
> > > > $ ethtool -i ens4 | grep driver
> > > > driver: virtio_net
> > > >
> > > > $ sudo ethtool -L ens4 combined 4
> > > >
> > > > $ ./tools/net/ynl/pyynl/cli.py \
> > > > --spec Documentation/netlink/specs/netdev.yaml \
> > > > --dump queue-get --json='{"ifindex": 2}'
> > > > [{'id': 0, 'ifindex': 2, 'napi-id': 8289, 'type': 'rx'},
> > > > {'id': 1, 'ifindex': 2, 'napi-id': 8290, 'type': 'rx'},
> > > > {'id': 2, 'ifindex': 2, 'napi-id': 8291, 'type': 'rx'},
> > > > {'id': 3, 'ifindex': 2, 'napi-id': 8292, 'type': 'rx'},
> > > > {'id': 0, 'ifindex': 2, 'type': 'tx'},
> > > > {'id': 1, 'ifindex': 2, 'type': 'tx'},
> > > > {'id': 2, 'ifindex': 2, 'type': 'tx'},
> > > > {'id': 3, 'ifindex': 2, 'type': 'tx'}]
> > > >
> > > > Note that virtio_net has TX-only NAPIs which do not have NAPI IDs, so
> > > > the lack of 'napi-id' in the above output is expected.
> > > >
> > > > Signed-off-by: Joe Damato <jdamato@fastly.com>
> > > > ---
> > > > v2:
> > > > - Eliminate RTNL code paths using the API Jakub introduced in patch 1
> > > > of this v2.
> > > > - Added virtnet_napi_disable to reduce code duplication as
> > > > suggested by Jason Wang.
> > > >
> > > > drivers/net/virtio_net.c | 34 +++++++++++++++++++++++++++++-----
> > > > 1 file changed, 29 insertions(+), 5 deletions(-)
> > > >
> > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > index cff18c66b54a..c6fda756dd07 100644
> > > > --- a/drivers/net/virtio_net.c
> > > > +++ b/drivers/net/virtio_net.c
> > > > @@ -2803,9 +2803,18 @@ static void virtnet_napi_do_enable(struct virtqueue *vq,
> > > > local_bh_enable();
> > > > }
> > > >
> > > > -static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
> > > > +static void virtnet_napi_enable(struct virtqueue *vq,
> > > > + struct napi_struct *napi)
> > > > {
> > > > + struct virtnet_info *vi = vq->vdev->priv;
> > > > + int q = vq2rxq(vq);
> > > > + u16 curr_qs;
> > > > +
> > > > virtnet_napi_do_enable(vq, napi);
> > > > +
> > > > + curr_qs = vi->curr_queue_pairs - vi->xdp_queue_pairs;
> > > > + if (!vi->xdp_enabled || q < curr_qs)
> > > > + netif_queue_set_napi(vi->dev, q, NETDEV_QUEUE_TYPE_RX, napi);
> > >
> > > So what case the check of xdp_enabled is for?
> >
> > Based on a previous discussion [1], the NAPIs should not be linked
> > for in-kernel XDP, but they _should_ be linked for XSK.
> >
> > I could certainly have misread the virtio_net code (please let me
> > know if I've gotten it wrong, I'm not an expert), but the three
> > cases I have in mind are:
> >
> > - vi->xdp_enabled = false, which happens when no XDP is being
> > used, so the queue number will be < vi->curr_queue_pairs.
> >
> > - vi->xdp_enabled = false, which I believe is what happens in the
> > XSK case. In this case, the NAPI is linked.
> >
> > - vi->xdp_enabled = true, which I believe only happens for
> > in-kernel XDP - but not XSK - and in this case, the NAPI should
> > NOT be linked.
>
> My interpretation based on [1] is that an in-kernel XDP Tx queue is a
> queue that is only used if XDP is attached and is not visible to
> userspace. The in-kernel XDP Tx queue is used to not load stack Tx
> queues with XDP packets. IIRC fbnic has additional queues only for
> XDP Tx. So for stack RX queues I would always link napi, no matter if
> XDP is attached or not. I think most driver do not have in-kernel XDP
> Tx queues. But I'm also not an expert.
I think you are probably right, so I'll send an RFC (since net-next
is now closed) with a change as you've suggested after I test it.
In this case, it'll be simply removing the if statement altogether
and mapping the NAPIs to queues.
© 2016 - 2025 Red Hat, Inc.