Lacking the support of device specific mapping supported in virtio,
VDUSE must trick the DMA API in order to make virtio-vdpa transport
work. This is done by advertising vDPA device as dma device with a
VDUSE specific dma_ops even if it doesn't do DMA at all.
This will be fixed by this patch. Thanks to the new mapping operations
support by virtio and vDPA. VDUSE can simply switch to advertise its
specific mappings operations to virtio via virtio-vdpa then DMA API is
not needed for VDUSE any more.
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
drivers/vdpa/vdpa_user/iova_domain.c | 2 +-
drivers/vdpa/vdpa_user/iova_domain.h | 2 +-
drivers/vdpa/vdpa_user/vduse_dev.c | 31 ++++++++++++++++------------
3 files changed, 20 insertions(+), 15 deletions(-)
diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
index 019f3305c0ac..8ea311692545 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -447,7 +447,7 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
size_t size, dma_addr_t *dma_addr,
- gfp_t flag, unsigned long attrs)
+ gfp_t flag)
{
struct iova_domain *iovad = &domain->consistent_iovad;
unsigned long limit = domain->iova_limit;
diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
index 846572b95c23..a2316571671f 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.h
+++ b/drivers/vdpa/vdpa_user/iova_domain.h
@@ -64,7 +64,7 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
size_t size, dma_addr_t *dma_addr,
- gfp_t flag, unsigned long attrs);
+ gfp_t flag);
void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
void *vaddr, dma_addr_t dma_addr,
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 64bc39722007..f86d7111e103 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -814,51 +814,55 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = {
.free = vduse_vdpa_free,
};
-static void vduse_dev_sync_single_for_device(struct device *dev,
+static void vduse_dev_sync_single_for_device(void *token,
dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
+ struct device *dev = token;
struct vduse_dev *vdev = dev_to_vduse(dev);
struct vduse_iova_domain *domain = vdev->domain;
vduse_domain_sync_single_for_device(domain, dma_addr, size, dir);
}
-static void vduse_dev_sync_single_for_cpu(struct device *dev,
+static void vduse_dev_sync_single_for_cpu(void *token,
dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
+ struct device *dev = token;
struct vduse_dev *vdev = dev_to_vduse(dev);
struct vduse_iova_domain *domain = vdev->domain;
vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir);
}
-static dma_addr_t vduse_dev_map_page(struct device *dev, struct page *page,
+static dma_addr_t vduse_dev_map_page(void *token, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
+ struct device *dev = token;
struct vduse_dev *vdev = dev_to_vduse(dev);
struct vduse_iova_domain *domain = vdev->domain;
return vduse_domain_map_page(domain, page, offset, size, dir, attrs);
}
-static void vduse_dev_unmap_page(struct device *dev, dma_addr_t dma_addr,
+static void vduse_dev_unmap_page(void *token, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
+ struct device *dev = token;
struct vduse_dev *vdev = dev_to_vduse(dev);
struct vduse_iova_domain *domain = vdev->domain;
return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs);
}
-static void *vduse_dev_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_addr, gfp_t flag,
- unsigned long attrs)
+static void *vduse_dev_alloc_coherent(void *token, size_t size,
+ dma_addr_t *dma_addr, gfp_t flag)
{
+ struct device *dev = token;
struct vduse_dev *vdev = dev_to_vduse(dev);
struct vduse_iova_domain *domain = vdev->domain;
unsigned long iova;
@@ -866,7 +870,7 @@ static void *vduse_dev_alloc_coherent(struct device *dev, size_t size,
*dma_addr = DMA_MAPPING_ERROR;
addr = vduse_domain_alloc_coherent(domain, size,
- (dma_addr_t *)&iova, flag, attrs);
+ (dma_addr_t *)&iova, flag);
if (!addr)
return NULL;
@@ -875,25 +879,27 @@ static void *vduse_dev_alloc_coherent(struct device *dev, size_t size,
return addr;
}
-static void vduse_dev_free_coherent(struct device *dev, size_t size,
+static void vduse_dev_free_coherent(void *token, size_t size,
void *vaddr, dma_addr_t dma_addr,
unsigned long attrs)
{
+ struct device *dev = token;
struct vduse_dev *vdev = dev_to_vduse(dev);
struct vduse_iova_domain *domain = vdev->domain;
vduse_domain_free_coherent(domain, size, vaddr, dma_addr, attrs);
}
-static size_t vduse_dev_max_mapping_size(struct device *dev)
+static size_t vduse_dev_max_mapping_size(void *token)
{
+ struct device *dev = token;
struct vduse_dev *vdev = dev_to_vduse(dev);
struct vduse_iova_domain *domain = vdev->domain;
return domain->bounce_size;
}
-static const struct dma_map_ops vduse_dev_dma_ops = {
+static const struct virtio_map_ops vduse_map_ops = {
.sync_single_for_device = vduse_dev_sync_single_for_device,
.sync_single_for_cpu = vduse_dev_sync_single_for_cpu,
.map_page = vduse_dev_map_page,
@@ -2009,7 +2015,7 @@ static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
return -EEXIST;
vdev = vdpa_alloc_device(struct vduse_vdpa, vdpa, dev->dev,
- &vduse_vdpa_config_ops, NULL,
+ &vduse_vdpa_config_ops, &vduse_map_ops,
1, 1, name, true);
if (IS_ERR(vdev))
return PTR_ERR(vdev);
@@ -2022,7 +2028,6 @@ static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
put_device(&vdev->vdpa.dev);
return ret;
}
- set_dma_ops(&vdev->vdpa.dev, &vduse_dev_dma_ops);
vdev->vdpa.map_token = &vdev->vdpa.dev;
vdev->vdpa.mdev = &vduse_mgmt->mgmt_dev;
--
2.34.1
On Tue, Jul 01, 2025 at 09:14:01AM +0800, Jason Wang wrote: > Lacking the support of device specific mapping supported in virtio, > VDUSE must trick the DMA API in order to make virtio-vdpa transport > work. This is done by advertising vDPA device as dma device with a > VDUSE specific dma_ops even if it doesn't do DMA at all. > > This will be fixed by this patch. Thanks to the new mapping operations > support by virtio and vDPA. VDUSE can simply switch to advertise its > specific mappings operations to virtio via virtio-vdpa then DMA API is > not needed for VDUSE any more. > > Signed-off-by: Jason Wang <jasowang@redhat.com> so what exactly is the issue fixed by all this pile of code? I just don't really see it. yes the existing thing is a hack but at least it is isolated within vduse which let's be frank is not it's only issue. > --- > drivers/vdpa/vdpa_user/iova_domain.c | 2 +- > drivers/vdpa/vdpa_user/iova_domain.h | 2 +- > drivers/vdpa/vdpa_user/vduse_dev.c | 31 ++++++++++++++++------------ > 3 files changed, 20 insertions(+), 15 deletions(-) > > diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c > index 019f3305c0ac..8ea311692545 100644 > --- a/drivers/vdpa/vdpa_user/iova_domain.c > +++ b/drivers/vdpa/vdpa_user/iova_domain.c > @@ -447,7 +447,7 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain, > > void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain, > size_t size, dma_addr_t *dma_addr, > - gfp_t flag, unsigned long attrs) > + gfp_t flag) > { > struct iova_domain *iovad = &domain->consistent_iovad; > unsigned long limit = domain->iova_limit; > diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h > index 846572b95c23..a2316571671f 100644 > --- a/drivers/vdpa/vdpa_user/iova_domain.h > +++ b/drivers/vdpa/vdpa_user/iova_domain.h > @@ -64,7 +64,7 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain, > > void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain, > size_t size, dma_addr_t *dma_addr, > - gfp_t flag, unsigned long attrs); > + gfp_t flag); > > void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size, > void *vaddr, dma_addr_t dma_addr, > diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c > index 64bc39722007..f86d7111e103 100644 > --- a/drivers/vdpa/vdpa_user/vduse_dev.c > +++ b/drivers/vdpa/vdpa_user/vduse_dev.c > @@ -814,51 +814,55 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = { > .free = vduse_vdpa_free, > }; > > -static void vduse_dev_sync_single_for_device(struct device *dev, > +static void vduse_dev_sync_single_for_device(void *token, > dma_addr_t dma_addr, size_t size, > enum dma_data_direction dir) > { > + struct device *dev = token; > struct vduse_dev *vdev = dev_to_vduse(dev); > struct vduse_iova_domain *domain = vdev->domain; > > vduse_domain_sync_single_for_device(domain, dma_addr, size, dir); > } > > -static void vduse_dev_sync_single_for_cpu(struct device *dev, > +static void vduse_dev_sync_single_for_cpu(void *token, > dma_addr_t dma_addr, size_t size, > enum dma_data_direction dir) > { > + struct device *dev = token; > struct vduse_dev *vdev = dev_to_vduse(dev); > struct vduse_iova_domain *domain = vdev->domain; > > vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir); > } > > -static dma_addr_t vduse_dev_map_page(struct device *dev, struct page *page, > +static dma_addr_t vduse_dev_map_page(void *token, struct page *page, > unsigned long offset, size_t size, > enum dma_data_direction dir, > unsigned long attrs) > { > + struct device *dev = token; > struct vduse_dev *vdev = dev_to_vduse(dev); > struct vduse_iova_domain *domain = vdev->domain; > > return vduse_domain_map_page(domain, page, offset, size, dir, attrs); > } > > -static void vduse_dev_unmap_page(struct device *dev, dma_addr_t dma_addr, > +static void vduse_dev_unmap_page(void *token, dma_addr_t dma_addr, > size_t size, enum dma_data_direction dir, > unsigned long attrs) > { > + struct device *dev = token; > struct vduse_dev *vdev = dev_to_vduse(dev); > struct vduse_iova_domain *domain = vdev->domain; > > return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs); > } > > -static void *vduse_dev_alloc_coherent(struct device *dev, size_t size, > - dma_addr_t *dma_addr, gfp_t flag, > - unsigned long attrs) > +static void *vduse_dev_alloc_coherent(void *token, size_t size, > + dma_addr_t *dma_addr, gfp_t flag) > { > + struct device *dev = token; > struct vduse_dev *vdev = dev_to_vduse(dev); > struct vduse_iova_domain *domain = vdev->domain; > unsigned long iova; > @@ -866,7 +870,7 @@ static void *vduse_dev_alloc_coherent(struct device *dev, size_t size, > > *dma_addr = DMA_MAPPING_ERROR; > addr = vduse_domain_alloc_coherent(domain, size, > - (dma_addr_t *)&iova, flag, attrs); > + (dma_addr_t *)&iova, flag); > if (!addr) > return NULL; > > @@ -875,25 +879,27 @@ static void *vduse_dev_alloc_coherent(struct device *dev, size_t size, > return addr; > } > > -static void vduse_dev_free_coherent(struct device *dev, size_t size, > +static void vduse_dev_free_coherent(void *token, size_t size, > void *vaddr, dma_addr_t dma_addr, > unsigned long attrs) > { > + struct device *dev = token; > struct vduse_dev *vdev = dev_to_vduse(dev); > struct vduse_iova_domain *domain = vdev->domain; > > vduse_domain_free_coherent(domain, size, vaddr, dma_addr, attrs); > } > > -static size_t vduse_dev_max_mapping_size(struct device *dev) > +static size_t vduse_dev_max_mapping_size(void *token) > { > + struct device *dev = token; > struct vduse_dev *vdev = dev_to_vduse(dev); > struct vduse_iova_domain *domain = vdev->domain; > > return domain->bounce_size; > } > > -static const struct dma_map_ops vduse_dev_dma_ops = { > +static const struct virtio_map_ops vduse_map_ops = { > .sync_single_for_device = vduse_dev_sync_single_for_device, > .sync_single_for_cpu = vduse_dev_sync_single_for_cpu, > .map_page = vduse_dev_map_page, > @@ -2009,7 +2015,7 @@ static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name) > return -EEXIST; > > vdev = vdpa_alloc_device(struct vduse_vdpa, vdpa, dev->dev, > - &vduse_vdpa_config_ops, NULL, > + &vduse_vdpa_config_ops, &vduse_map_ops, > 1, 1, name, true); > if (IS_ERR(vdev)) > return PTR_ERR(vdev); > @@ -2022,7 +2028,6 @@ static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name) > put_device(&vdev->vdpa.dev); > return ret; > } > - set_dma_ops(&vdev->vdpa.dev, &vduse_dev_dma_ops); > vdev->vdpa.map_token = &vdev->vdpa.dev; > vdev->vdpa.mdev = &vduse_mgmt->mgmt_dev; > > -- > 2.34.1
On Tue, Jul 1, 2025 at 3:50 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > On Tue, Jul 01, 2025 at 09:14:01AM +0800, Jason Wang wrote: > > Lacking the support of device specific mapping supported in virtio, > > VDUSE must trick the DMA API in order to make virtio-vdpa transport > > work. This is done by advertising vDPA device as dma device with a > > VDUSE specific dma_ops even if it doesn't do DMA at all. > > > > This will be fixed by this patch. Thanks to the new mapping operations > > support by virtio and vDPA. VDUSE can simply switch to advertise its > > specific mappings operations to virtio via virtio-vdpa then DMA API is > > not needed for VDUSE any more. > > > > Signed-off-by: Jason Wang <jasowang@redhat.com> > > so what exactly is the issue fixed by all this pile of code? Avoiding using DMA API for VDUSE. > I just don't really see it. yes the existing thing is a hack > but at least it is isolated within vduse which let's be > frank is not it's only issue. Christoph shows concerns when Eugenio is trying to extend VDUSE for multiple AS support: https://lists.openwall.net/linux-kernel/2025/06/23/133 I think we need to reach some agreement here. I'm fine to leave the current code as is. But we may have a problem: Technically, we want the ability of allowing control virtqueue to be backed by an isolated iova domain in order to make shadow virtqueue work. Though this might be only useful for vhost-vdpa, technically we should allow virtio-vdpa to work in this case as well. This means cvq should have its own dma device which might be tricky to VDUSE to implement (for example it needs a hack on top of the existing hack, e.g creating a child device to that which looks more like an overkill). Thanks > > > > --- > > drivers/vdpa/vdpa_user/iova_domain.c | 2 +- > > drivers/vdpa/vdpa_user/iova_domain.h | 2 +- > > drivers/vdpa/vdpa_user/vduse_dev.c | 31 ++++++++++++++++------------ > > 3 files changed, 20 insertions(+), 15 deletions(-) > > > > diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c > > index 019f3305c0ac..8ea311692545 100644 > > --- a/drivers/vdpa/vdpa_user/iova_domain.c > > +++ b/drivers/vdpa/vdpa_user/iova_domain.c > > @@ -447,7 +447,7 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain, > > > > void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain, > > size_t size, dma_addr_t *dma_addr, > > - gfp_t flag, unsigned long attrs) > > + gfp_t flag) > > { > > struct iova_domain *iovad = &domain->consistent_iovad; > > unsigned long limit = domain->iova_limit; > > diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h > > index 846572b95c23..a2316571671f 100644 > > --- a/drivers/vdpa/vdpa_user/iova_domain.h > > +++ b/drivers/vdpa/vdpa_user/iova_domain.h > > @@ -64,7 +64,7 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain, > > > > void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain, > > size_t size, dma_addr_t *dma_addr, > > - gfp_t flag, unsigned long attrs); > > + gfp_t flag); > > > > void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size, > > void *vaddr, dma_addr_t dma_addr, > > diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c > > index 64bc39722007..f86d7111e103 100644 > > --- a/drivers/vdpa/vdpa_user/vduse_dev.c > > +++ b/drivers/vdpa/vdpa_user/vduse_dev.c > > @@ -814,51 +814,55 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = { > > .free = vduse_vdpa_free, > > }; > > > > -static void vduse_dev_sync_single_for_device(struct device *dev, > > +static void vduse_dev_sync_single_for_device(void *token, > > dma_addr_t dma_addr, size_t size, > > enum dma_data_direction dir) > > { > > + struct device *dev = token; > > struct vduse_dev *vdev = dev_to_vduse(dev); > > struct vduse_iova_domain *domain = vdev->domain; > > > > vduse_domain_sync_single_for_device(domain, dma_addr, size, dir); > > } > > > > -static void vduse_dev_sync_single_for_cpu(struct device *dev, > > +static void vduse_dev_sync_single_for_cpu(void *token, > > dma_addr_t dma_addr, size_t size, > > enum dma_data_direction dir) > > { > > + struct device *dev = token; > > struct vduse_dev *vdev = dev_to_vduse(dev); > > struct vduse_iova_domain *domain = vdev->domain; > > > > vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir); > > } > > > > -static dma_addr_t vduse_dev_map_page(struct device *dev, struct page *page, > > +static dma_addr_t vduse_dev_map_page(void *token, struct page *page, > > unsigned long offset, size_t size, > > enum dma_data_direction dir, > > unsigned long attrs) > > { > > + struct device *dev = token; > > struct vduse_dev *vdev = dev_to_vduse(dev); > > struct vduse_iova_domain *domain = vdev->domain; > > > > return vduse_domain_map_page(domain, page, offset, size, dir, attrs); > > } > > > > -static void vduse_dev_unmap_page(struct device *dev, dma_addr_t dma_addr, > > +static void vduse_dev_unmap_page(void *token, dma_addr_t dma_addr, > > size_t size, enum dma_data_direction dir, > > unsigned long attrs) > > { > > + struct device *dev = token; > > struct vduse_dev *vdev = dev_to_vduse(dev); > > struct vduse_iova_domain *domain = vdev->domain; > > > > return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs); > > } > > > > -static void *vduse_dev_alloc_coherent(struct device *dev, size_t size, > > - dma_addr_t *dma_addr, gfp_t flag, > > - unsigned long attrs) > > +static void *vduse_dev_alloc_coherent(void *token, size_t size, > > + dma_addr_t *dma_addr, gfp_t flag) > > { > > + struct device *dev = token; > > struct vduse_dev *vdev = dev_to_vduse(dev); > > struct vduse_iova_domain *domain = vdev->domain; > > unsigned long iova; > > @@ -866,7 +870,7 @@ static void *vduse_dev_alloc_coherent(struct device *dev, size_t size, > > > > *dma_addr = DMA_MAPPING_ERROR; > > addr = vduse_domain_alloc_coherent(domain, size, > > - (dma_addr_t *)&iova, flag, attrs); > > + (dma_addr_t *)&iova, flag); > > if (!addr) > > return NULL; > > > > @@ -875,25 +879,27 @@ static void *vduse_dev_alloc_coherent(struct device *dev, size_t size, > > return addr; > > } > > > > -static void vduse_dev_free_coherent(struct device *dev, size_t size, > > +static void vduse_dev_free_coherent(void *token, size_t size, > > void *vaddr, dma_addr_t dma_addr, > > unsigned long attrs) > > { > > + struct device *dev = token; > > struct vduse_dev *vdev = dev_to_vduse(dev); > > struct vduse_iova_domain *domain = vdev->domain; > > > > vduse_domain_free_coherent(domain, size, vaddr, dma_addr, attrs); > > } > > > > -static size_t vduse_dev_max_mapping_size(struct device *dev) > > +static size_t vduse_dev_max_mapping_size(void *token) > > { > > + struct device *dev = token; > > struct vduse_dev *vdev = dev_to_vduse(dev); > > struct vduse_iova_domain *domain = vdev->domain; > > > > return domain->bounce_size; > > } > > > > -static const struct dma_map_ops vduse_dev_dma_ops = { > > +static const struct virtio_map_ops vduse_map_ops = { > > .sync_single_for_device = vduse_dev_sync_single_for_device, > > .sync_single_for_cpu = vduse_dev_sync_single_for_cpu, > > .map_page = vduse_dev_map_page, > > @@ -2009,7 +2015,7 @@ static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name) > > return -EEXIST; > > > > vdev = vdpa_alloc_device(struct vduse_vdpa, vdpa, dev->dev, > > - &vduse_vdpa_config_ops, NULL, > > + &vduse_vdpa_config_ops, &vduse_map_ops, > > 1, 1, name, true); > > if (IS_ERR(vdev)) > > return PTR_ERR(vdev); > > @@ -2022,7 +2028,6 @@ static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name) > > put_device(&vdev->vdpa.dev); > > return ret; > > } > > - set_dma_ops(&vdev->vdpa.dev, &vduse_dev_dma_ops); > > vdev->vdpa.map_token = &vdev->vdpa.dev; > > vdev->vdpa.mdev = &vduse_mgmt->mgmt_dev; > > > > -- > > 2.34.1 >
© 2016 - 2025 Red Hat, Inc.