Those variants are used internally so let's switch to use
vring_virtqueue as parameter to be consistent with other internal
virtqueue helpers.
Acked-by: Eugenio Pérez <eperezma@redhat.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
drivers/virtio/virtio_ring.c | 40 +++++++++++++++++-------------------
1 file changed, 19 insertions(+), 21 deletions(-)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index aadeab66e57c..93c36314b5e7 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -476,7 +476,7 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
return extra->next;
}
-static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
+static struct vring_desc *alloc_indirect_split(struct vring_virtqueue *vq,
unsigned int total_sg,
gfp_t gfp)
{
@@ -505,7 +505,7 @@ static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
return desc;
}
-static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
+static inline unsigned int virtqueue_add_desc_split(struct vring_virtqueue *vq,
struct vring_desc *desc,
struct vring_desc_extra *extra,
unsigned int i,
@@ -513,11 +513,12 @@ static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
unsigned int len,
u16 flags, bool premapped)
{
+ struct virtio_device *vdev = vq->vq.vdev;
u16 next;
- desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
- desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
- desc[i].len = cpu_to_virtio32(vq->vdev, len);
+ desc[i].flags = cpu_to_virtio16(vdev, flags);
+ desc[i].addr = cpu_to_virtio64(vdev, addr);
+ desc[i].len = cpu_to_virtio32(vdev, len);
extra[i].addr = premapped ? DMA_MAPPING_ERROR : addr;
extra[i].len = len;
@@ -525,12 +526,12 @@ static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
next = extra[i].next;
- desc[i].next = cpu_to_virtio16(vq->vdev, next);
+ desc[i].next = cpu_to_virtio16(vdev, next);
return next;
}
-static inline int virtqueue_add_split(struct virtqueue *_vq,
+static inline int virtqueue_add_split(struct vring_virtqueue *vq,
struct scatterlist *sgs[],
unsigned int total_sg,
unsigned int out_sgs,
@@ -540,7 +541,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
bool premapped,
gfp_t gfp)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
struct vring_desc_extra *extra;
struct scatterlist *sg;
struct vring_desc *desc;
@@ -565,7 +565,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
head = vq->free_head;
if (virtqueue_use_indirect(vq, total_sg))
- desc = alloc_indirect_split(_vq, total_sg, gfp);
+ desc = alloc_indirect_split(vq, total_sg, gfp);
else {
desc = NULL;
WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
@@ -612,7 +612,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
/* Note that we trust indirect descriptor
* table since it use stream DMA mapping.
*/
- i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, len,
+ i = virtqueue_add_desc_split(vq, desc, extra, i, addr, len,
VRING_DESC_F_NEXT,
premapped);
}
@@ -629,14 +629,14 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
/* Note that we trust indirect descriptor
* table since it use stream DMA mapping.
*/
- i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, len,
+ i = virtqueue_add_desc_split(vq, desc, extra, i, addr, len,
VRING_DESC_F_NEXT |
VRING_DESC_F_WRITE,
premapped);
}
}
/* Last one doesn't continue. */
- desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
+ desc[prev].flags &= cpu_to_virtio16(vq->vq.vdev, ~VRING_DESC_F_NEXT);
if (!indirect && vring_need_unmap_buffer(vq, &extra[prev]))
vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
~VRING_DESC_F_NEXT;
@@ -649,7 +649,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
if (vring_mapping_error(vq, addr))
goto unmap_release;
- virtqueue_add_desc_split(_vq, vq->split.vring.desc,
+ virtqueue_add_desc_split(vq, vq->split.vring.desc,
vq->split.desc_extra,
head, addr,
total_sg * sizeof(struct vring_desc),
@@ -675,13 +675,13 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
/* Put entry in available array (but don't update avail->idx until they
* do sync). */
avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
- vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
+ vq->split.vring.avail->ring[avail] = cpu_to_virtio16(vq->vq.vdev, head);
/* Descriptors and available array need to be set before we expose the
* new available array entries. */
virtio_wmb(vq->weak_barriers);
vq->split.avail_idx_shadow++;
- vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
+ vq->split.vring.avail->idx = cpu_to_virtio16(vq->vq.vdev,
vq->split.avail_idx_shadow);
vq->num_added++;
@@ -691,7 +691,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
/* This is very unlikely, but theoretically possible. Kick
* just in case. */
if (unlikely(vq->num_added == (1 << 16) - 1))
- virtqueue_kick(_vq);
+ virtqueue_kick(&vq->vq);
return 0;
@@ -706,7 +706,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
for (n = 0; n < total_sg; n++) {
if (i == err_idx)
break;
-
i = vring_unmap_one_split(vq, &extra[i]);
}
@@ -1440,7 +1439,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
return -ENOMEM;
}
-static inline int virtqueue_add_packed(struct virtqueue *_vq,
+static inline int virtqueue_add_packed(struct vring_virtqueue *vq,
struct scatterlist *sgs[],
unsigned int total_sg,
unsigned int out_sgs,
@@ -1450,7 +1449,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
bool premapped,
gfp_t gfp)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
struct vring_packed_desc *desc;
struct scatterlist *sg;
unsigned int i, n, c, descs_used, err_idx, len;
@@ -2262,9 +2260,9 @@ static inline int virtqueue_add(struct virtqueue *_vq,
{
struct vring_virtqueue *vq = to_vvq(_vq);
- return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
+ return vq->packed_ring ? virtqueue_add_packed(vq, sgs, total_sg,
out_sgs, in_sgs, data, ctx, premapped, gfp) :
- virtqueue_add_split(_vq, sgs, total_sg,
+ virtqueue_add_split(vq, sgs, total_sg,
out_sgs, in_sgs, data, ctx, premapped, gfp);
}
--
2.31.1
On Fri, Sep 19, 2025 at 03:31:41PM +0800, Jason Wang wrote: > Those variants are used internally so let's switch to use > vring_virtqueue as parameter to be consistent with other internal > virtqueue helpers. > > Acked-by: Eugenio Pérez <eperezma@redhat.com> > Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> > Signed-off-by: Jason Wang <jasowang@redhat.com> > --- > drivers/virtio/virtio_ring.c | 40 +++++++++++++++++------------------- > 1 file changed, 19 insertions(+), 21 deletions(-) > > diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c > index aadeab66e57c..93c36314b5e7 100644 > --- a/drivers/virtio/virtio_ring.c > +++ b/drivers/virtio/virtio_ring.c > @@ -476,7 +476,7 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, > return extra->next; > } > > -static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq, > +static struct vring_desc *alloc_indirect_split(struct vring_virtqueue *vq, > unsigned int total_sg, > gfp_t gfp) > { > @@ -505,7 +505,7 @@ static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq, > return desc; > } > > -static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq, > +static inline unsigned int virtqueue_add_desc_split(struct vring_virtqueue *vq, > struct vring_desc *desc, > struct vring_desc_extra *extra, > unsigned int i, > @@ -513,11 +513,12 @@ static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq, > unsigned int len, > u16 flags, bool premapped) > { > + struct virtio_device *vdev = vq->vq.vdev; > u16 next; > > - desc[i].flags = cpu_to_virtio16(vq->vdev, flags); > - desc[i].addr = cpu_to_virtio64(vq->vdev, addr); > - desc[i].len = cpu_to_virtio32(vq->vdev, len); > + desc[i].flags = cpu_to_virtio16(vdev, flags); > + desc[i].addr = cpu_to_virtio64(vdev, addr); > + desc[i].len = cpu_to_virtio32(vdev, len); > > extra[i].addr = premapped ? DMA_MAPPING_ERROR : addr; > extra[i].len = len; > @@ -525,12 +526,12 @@ static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq, > > next = extra[i].next; > > - desc[i].next = cpu_to_virtio16(vq->vdev, next); > + desc[i].next = cpu_to_virtio16(vdev, next); > > return next; > } > > -static inline int virtqueue_add_split(struct virtqueue *_vq, > +static inline int virtqueue_add_split(struct vring_virtqueue *vq, > struct scatterlist *sgs[], > unsigned int total_sg, > unsigned int out_sgs, > @@ -540,7 +541,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > bool premapped, > gfp_t gfp) > { > - struct vring_virtqueue *vq = to_vvq(_vq); > struct vring_desc_extra *extra; > struct scatterlist *sg; > struct vring_desc *desc; > @@ -565,7 +565,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > head = vq->free_head; > > if (virtqueue_use_indirect(vq, total_sg)) > - desc = alloc_indirect_split(_vq, total_sg, gfp); > + desc = alloc_indirect_split(vq, total_sg, gfp); > else { > desc = NULL; > WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect); > @@ -612,7 +612,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > /* Note that we trust indirect descriptor > * table since it use stream DMA mapping. > */ > - i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, len, > + i = virtqueue_add_desc_split(vq, desc, extra, i, addr, len, > VRING_DESC_F_NEXT, > premapped); > } > @@ -629,14 +629,14 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > /* Note that we trust indirect descriptor > * table since it use stream DMA mapping. > */ > - i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, len, > + i = virtqueue_add_desc_split(vq, desc, extra, i, addr, len, > VRING_DESC_F_NEXT | > VRING_DESC_F_WRITE, > premapped); > } > } > /* Last one doesn't continue. */ > - desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); > + desc[prev].flags &= cpu_to_virtio16(vq->vq.vdev, ~VRING_DESC_F_NEXT); > if (!indirect && vring_need_unmap_buffer(vq, &extra[prev])) > vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &= > ~VRING_DESC_F_NEXT; > @@ -649,7 +649,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > if (vring_mapping_error(vq, addr)) > goto unmap_release; > > - virtqueue_add_desc_split(_vq, vq->split.vring.desc, > + virtqueue_add_desc_split(vq, vq->split.vring.desc, > vq->split.desc_extra, > head, addr, > total_sg * sizeof(struct vring_desc), > @@ -675,13 +675,13 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > /* Put entry in available array (but don't update avail->idx until they > * do sync). */ > avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); > - vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); > + vq->split.vring.avail->ring[avail] = cpu_to_virtio16(vq->vq.vdev, head); > > /* Descriptors and available array need to be set before we expose the > * new available array entries. */ > virtio_wmb(vq->weak_barriers); > vq->split.avail_idx_shadow++; > - vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, > + vq->split.vring.avail->idx = cpu_to_virtio16(vq->vq.vdev, > vq->split.avail_idx_shadow); > vq->num_added++; > > @@ -691,7 +691,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > /* This is very unlikely, but theoretically possible. Kick > * just in case. */ > if (unlikely(vq->num_added == (1 << 16) - 1)) > - virtqueue_kick(_vq); > + virtqueue_kick(&vq->vq); > > return 0; > > @@ -706,7 +706,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > for (n = 0; n < total_sg; n++) { > if (i == err_idx) > break; > - > i = vring_unmap_one_split(vq, &extra[i]); > } > can't say I like this, error handling is better separated visually from good path. > @@ -1440,7 +1439,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, > return -ENOMEM; > } > > -static inline int virtqueue_add_packed(struct virtqueue *_vq, > +static inline int virtqueue_add_packed(struct vring_virtqueue *vq, > struct scatterlist *sgs[], > unsigned int total_sg, > unsigned int out_sgs, > @@ -1450,7 +1449,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, > bool premapped, > gfp_t gfp) > { > - struct vring_virtqueue *vq = to_vvq(_vq); > struct vring_packed_desc *desc; > struct scatterlist *sg; > unsigned int i, n, c, descs_used, err_idx, len; > @@ -2262,9 +2260,9 @@ static inline int virtqueue_add(struct virtqueue *_vq, > { > struct vring_virtqueue *vq = to_vvq(_vq); > > - return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg, > + return vq->packed_ring ? virtqueue_add_packed(vq, sgs, total_sg, > out_sgs, in_sgs, data, ctx, premapped, gfp) : > - virtqueue_add_split(_vq, sgs, total_sg, > + virtqueue_add_split(vq, sgs, total_sg, > out_sgs, in_sgs, data, ctx, premapped, gfp); > } > > -- > 2.31.1
On Mon, Sep 22, 2025 at 1:47 AM Michael S. Tsirkin <mst@redhat.com> wrote: > > On Fri, Sep 19, 2025 at 03:31:41PM +0800, Jason Wang wrote: > > Those variants are used internally so let's switch to use > > vring_virtqueue as parameter to be consistent with other internal > > virtqueue helpers. > > > > Acked-by: Eugenio Pérez <eperezma@redhat.com> > > Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> > > Signed-off-by: Jason Wang <jasowang@redhat.com> > > --- > > drivers/virtio/virtio_ring.c | 40 +++++++++++++++++------------------- > > 1 file changed, 19 insertions(+), 21 deletions(-) > > > > diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c > > index aadeab66e57c..93c36314b5e7 100644 > > --- a/drivers/virtio/virtio_ring.c > > +++ b/drivers/virtio/virtio_ring.c > > @@ -476,7 +476,7 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, > > return extra->next; > > } > > > > -static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq, > > +static struct vring_desc *alloc_indirect_split(struct vring_virtqueue *vq, > > unsigned int total_sg, > > gfp_t gfp) > > { > > @@ -505,7 +505,7 @@ static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq, > > return desc; > > } > > > > -static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq, > > +static inline unsigned int virtqueue_add_desc_split(struct vring_virtqueue *vq, > > struct vring_desc *desc, > > struct vring_desc_extra *extra, > > unsigned int i, > > @@ -513,11 +513,12 @@ static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq, > > unsigned int len, > > u16 flags, bool premapped) > > { > > + struct virtio_device *vdev = vq->vq.vdev; > > u16 next; > > > > - desc[i].flags = cpu_to_virtio16(vq->vdev, flags); > > - desc[i].addr = cpu_to_virtio64(vq->vdev, addr); > > - desc[i].len = cpu_to_virtio32(vq->vdev, len); > > + desc[i].flags = cpu_to_virtio16(vdev, flags); > > + desc[i].addr = cpu_to_virtio64(vdev, addr); > > + desc[i].len = cpu_to_virtio32(vdev, len); > > > > extra[i].addr = premapped ? DMA_MAPPING_ERROR : addr; > > extra[i].len = len; > > @@ -525,12 +526,12 @@ static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq, > > > > next = extra[i].next; > > > > - desc[i].next = cpu_to_virtio16(vq->vdev, next); > > + desc[i].next = cpu_to_virtio16(vdev, next); > > > > return next; > > } > > > > -static inline int virtqueue_add_split(struct virtqueue *_vq, > > +static inline int virtqueue_add_split(struct vring_virtqueue *vq, > > struct scatterlist *sgs[], > > unsigned int total_sg, > > unsigned int out_sgs, > > @@ -540,7 +541,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > > bool premapped, > > gfp_t gfp) > > { > > - struct vring_virtqueue *vq = to_vvq(_vq); > > struct vring_desc_extra *extra; > > struct scatterlist *sg; > > struct vring_desc *desc; > > @@ -565,7 +565,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > > head = vq->free_head; > > > > if (virtqueue_use_indirect(vq, total_sg)) > > - desc = alloc_indirect_split(_vq, total_sg, gfp); > > + desc = alloc_indirect_split(vq, total_sg, gfp); > > else { > > desc = NULL; > > WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect); > > @@ -612,7 +612,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > > /* Note that we trust indirect descriptor > > * table since it use stream DMA mapping. > > */ > > - i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, len, > > + i = virtqueue_add_desc_split(vq, desc, extra, i, addr, len, > > VRING_DESC_F_NEXT, > > premapped); > > } > > @@ -629,14 +629,14 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > > /* Note that we trust indirect descriptor > > * table since it use stream DMA mapping. > > */ > > - i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, len, > > + i = virtqueue_add_desc_split(vq, desc, extra, i, addr, len, > > VRING_DESC_F_NEXT | > > VRING_DESC_F_WRITE, > > premapped); > > } > > } > > /* Last one doesn't continue. */ > > - desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); > > + desc[prev].flags &= cpu_to_virtio16(vq->vq.vdev, ~VRING_DESC_F_NEXT); > > if (!indirect && vring_need_unmap_buffer(vq, &extra[prev])) > > vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &= > > ~VRING_DESC_F_NEXT; > > @@ -649,7 +649,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > > if (vring_mapping_error(vq, addr)) > > goto unmap_release; > > > > - virtqueue_add_desc_split(_vq, vq->split.vring.desc, > > + virtqueue_add_desc_split(vq, vq->split.vring.desc, > > vq->split.desc_extra, > > head, addr, > > total_sg * sizeof(struct vring_desc), > > @@ -675,13 +675,13 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > > /* Put entry in available array (but don't update avail->idx until they > > * do sync). */ > > avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); > > - vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); > > + vq->split.vring.avail->ring[avail] = cpu_to_virtio16(vq->vq.vdev, head); > > > > /* Descriptors and available array need to be set before we expose the > > * new available array entries. */ > > virtio_wmb(vq->weak_barriers); > > vq->split.avail_idx_shadow++; > > - vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, > > + vq->split.vring.avail->idx = cpu_to_virtio16(vq->vq.vdev, > > vq->split.avail_idx_shadow); > > vq->num_added++; > > > > @@ -691,7 +691,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > > /* This is very unlikely, but theoretically possible. Kick > > * just in case. */ > > if (unlikely(vq->num_added == (1 << 16) - 1)) > > - virtqueue_kick(_vq); > > + virtqueue_kick(&vq->vq); > > > > return 0; > > > > @@ -706,7 +706,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, > > for (n = 0; n < total_sg; n++) { > > if (i == err_idx) > > break; > > - > > i = vring_unmap_one_split(vq, &extra[i]); > > } > > > > can't say I like this, error handling is better separated visually from > good path. I don't see a connection to this commnet, this patch doesn't touch the error handling. We can try to optimize the error handling on top. But I will remove the unnecessary newline change here. Thanks
© 2016 - 2025 Red Hat, Inc.