drivers/accel/amdxdna/amdxdna_gem.c | 47 ++++++++++++----------------- 1 file changed, 20 insertions(+), 27 deletions(-)
In amdxdna_gem_obj_vmap(), calling dma_buf_vmap() triggers a kernel
warning if LOCKDEP is enabled. So for imported object, use
dma_buf_vmap_unlocked(). Then, use drm_gem_vmap() for other objects.
The similar change applies to vunmap code.
Fixes: bd72d4acda10 ("accel/amdxdna: Support user space allocated buffer")
Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
---
drivers/accel/amdxdna/amdxdna_gem.c | 47 ++++++++++++-----------------
1 file changed, 20 insertions(+), 27 deletions(-)
diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c
index d407a36eb412..7f91863c3f24 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.c
+++ b/drivers/accel/amdxdna/amdxdna_gem.c
@@ -392,35 +392,33 @@ static const struct dma_buf_ops amdxdna_dmabuf_ops = {
.vunmap = drm_gem_dmabuf_vunmap,
};
-static int amdxdna_gem_obj_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+static int amdxdna_gem_obj_vmap(struct amdxdna_gem_obj *abo, void **vaddr)
{
- struct amdxdna_gem_obj *abo = to_xdna_obj(obj);
-
- iosys_map_clear(map);
-
- dma_resv_assert_held(obj->resv);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
+ int ret;
if (is_import_bo(abo))
- dma_buf_vmap(abo->dma_buf, map);
+ ret = dma_buf_vmap_unlocked(abo->dma_buf, &map);
else
- drm_gem_shmem_object_vmap(obj, map);
-
- if (!map->vaddr)
- return -ENOMEM;
+ ret = drm_gem_vmap(to_gobj(abo), &map);
- return 0;
+ *vaddr = map.vaddr;
+ return ret;
}
-static void amdxdna_gem_obj_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+static void amdxdna_gem_obj_vunmap(struct amdxdna_gem_obj *abo)
{
- struct amdxdna_gem_obj *abo = to_xdna_obj(obj);
+ struct iosys_map map;
+
+ if (!abo->mem.kva)
+ return;
- dma_resv_assert_held(obj->resv);
+ iosys_map_set_vaddr(&map, abo->mem.kva);
if (is_import_bo(abo))
- dma_buf_vunmap(abo->dma_buf, map);
+ dma_buf_vunmap_unlocked(abo->dma_buf, &map);
else
- drm_gem_shmem_object_vunmap(obj, map);
+ drm_gem_vunmap(to_gobj(abo), &map);
}
static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags)
@@ -455,7 +453,6 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
{
struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
- struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva);
XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
@@ -468,7 +465,7 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
if (abo->type == AMDXDNA_BO_DEV_HEAP)
drm_mm_takedown(&abo->mm);
- drm_gem_vunmap(gobj, &map);
+ amdxdna_gem_obj_vunmap(abo);
mutex_destroy(&abo->lock);
if (is_import_bo(abo)) {
@@ -489,8 +486,8 @@ static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
.pin = drm_gem_shmem_object_pin,
.unpin = drm_gem_shmem_object_unpin,
.get_sg_table = drm_gem_shmem_object_get_sg_table,
- .vmap = amdxdna_gem_obj_vmap,
- .vunmap = amdxdna_gem_obj_vunmap,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
.mmap = amdxdna_gem_obj_mmap,
.vm_ops = &drm_gem_shmem_vm_ops,
.export = amdxdna_gem_prime_export,
@@ -663,7 +660,6 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
struct drm_file *filp)
{
struct amdxdna_client *client = filp->driver_priv;
- struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
struct amdxdna_dev *xdna = to_xdna_dev(dev);
struct amdxdna_gem_obj *abo;
int ret;
@@ -692,12 +688,11 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base;
drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size);
- ret = drm_gem_vmap(to_gobj(abo), &map);
+ ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
if (ret) {
XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret);
goto release_obj;
}
- abo->mem.kva = map.vaddr;
client->dev_heap = abo;
drm_gem_object_get(to_gobj(abo));
@@ -748,7 +743,6 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
struct amdxdna_drm_create_bo *args,
struct drm_file *filp)
{
- struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
struct amdxdna_dev *xdna = to_xdna_dev(dev);
struct amdxdna_gem_obj *abo;
int ret;
@@ -770,12 +764,11 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
abo->type = AMDXDNA_BO_CMD;
abo->client = filp->driver_priv;
- ret = drm_gem_vmap(to_gobj(abo), &map);
+ ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
if (ret) {
XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
goto release_obj;
}
- abo->mem.kva = map.vaddr;
return abo;
--
2.34.1
Reviewed-by: Maciej Falkowski <maciej.falkowski@linux.intel.com> On 9/16/2025 7:48 PM, Lizhi Hou wrote: > In amdxdna_gem_obj_vmap(), calling dma_buf_vmap() triggers a kernel > warning if LOCKDEP is enabled. So for imported object, use > dma_buf_vmap_unlocked(). Then, use drm_gem_vmap() for other objects. > The similar change applies to vunmap code. > > Fixes: bd72d4acda10 ("accel/amdxdna: Support user space allocated buffer") > Signed-off-by: Lizhi Hou <lizhi.hou@amd.com> > --- > drivers/accel/amdxdna/amdxdna_gem.c | 47 ++++++++++++----------------- > 1 file changed, 20 insertions(+), 27 deletions(-) > > diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c > index d407a36eb412..7f91863c3f24 100644 > --- a/drivers/accel/amdxdna/amdxdna_gem.c > +++ b/drivers/accel/amdxdna/amdxdna_gem.c > @@ -392,35 +392,33 @@ static const struct dma_buf_ops amdxdna_dmabuf_ops = { > .vunmap = drm_gem_dmabuf_vunmap, > }; > > -static int amdxdna_gem_obj_vmap(struct drm_gem_object *obj, struct iosys_map *map) > +static int amdxdna_gem_obj_vmap(struct amdxdna_gem_obj *abo, void **vaddr) > { > - struct amdxdna_gem_obj *abo = to_xdna_obj(obj); > - > - iosys_map_clear(map); > - > - dma_resv_assert_held(obj->resv); > + struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); > + int ret; > > if (is_import_bo(abo)) > - dma_buf_vmap(abo->dma_buf, map); > + ret = dma_buf_vmap_unlocked(abo->dma_buf, &map); > else > - drm_gem_shmem_object_vmap(obj, map); > - > - if (!map->vaddr) > - return -ENOMEM; > + ret = drm_gem_vmap(to_gobj(abo), &map); > > - return 0; > + *vaddr = map.vaddr; > + return ret; > } > > -static void amdxdna_gem_obj_vunmap(struct drm_gem_object *obj, struct iosys_map *map) > +static void amdxdna_gem_obj_vunmap(struct amdxdna_gem_obj *abo) > { > - struct amdxdna_gem_obj *abo = to_xdna_obj(obj); > + struct iosys_map map; > + > + if (!abo->mem.kva) > + return; > > - dma_resv_assert_held(obj->resv); > + iosys_map_set_vaddr(&map, abo->mem.kva); > > if (is_import_bo(abo)) > - dma_buf_vunmap(abo->dma_buf, map); > + dma_buf_vunmap_unlocked(abo->dma_buf, &map); > else > - drm_gem_shmem_object_vunmap(obj, map); > + drm_gem_vunmap(to_gobj(abo), &map); > } > > static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags) > @@ -455,7 +453,6 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj) > { > struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev); > struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); > - struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva); > > XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr); > > @@ -468,7 +465,7 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj) > if (abo->type == AMDXDNA_BO_DEV_HEAP) > drm_mm_takedown(&abo->mm); > > - drm_gem_vunmap(gobj, &map); > + amdxdna_gem_obj_vunmap(abo); > mutex_destroy(&abo->lock); > > if (is_import_bo(abo)) { > @@ -489,8 +486,8 @@ static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = { > .pin = drm_gem_shmem_object_pin, > .unpin = drm_gem_shmem_object_unpin, > .get_sg_table = drm_gem_shmem_object_get_sg_table, > - .vmap = amdxdna_gem_obj_vmap, > - .vunmap = amdxdna_gem_obj_vunmap, > + .vmap = drm_gem_shmem_object_vmap, > + .vunmap = drm_gem_shmem_object_vunmap, > .mmap = amdxdna_gem_obj_mmap, > .vm_ops = &drm_gem_shmem_vm_ops, > .export = amdxdna_gem_prime_export, > @@ -663,7 +660,6 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev, > struct drm_file *filp) > { > struct amdxdna_client *client = filp->driver_priv; > - struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); > struct amdxdna_dev *xdna = to_xdna_dev(dev); > struct amdxdna_gem_obj *abo; > int ret; > @@ -692,12 +688,11 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev, > abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base; > drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size); > > - ret = drm_gem_vmap(to_gobj(abo), &map); > + ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva); > if (ret) { > XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret); > goto release_obj; > } > - abo->mem.kva = map.vaddr; > > client->dev_heap = abo; > drm_gem_object_get(to_gobj(abo)); > @@ -748,7 +743,6 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev, > struct amdxdna_drm_create_bo *args, > struct drm_file *filp) > { > - struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); > struct amdxdna_dev *xdna = to_xdna_dev(dev); > struct amdxdna_gem_obj *abo; > int ret; > @@ -770,12 +764,11 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev, > abo->type = AMDXDNA_BO_CMD; > abo->client = filp->driver_priv; > > - ret = drm_gem_vmap(to_gobj(abo), &map); > + ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva); > if (ret) { > XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret); > goto release_obj; > } > - abo->mem.kva = map.vaddr; > > return abo; >
Applied to drm-misc-next On 9/17/25 07:15, Falkowski, Maciej wrote: > Reviewed-by: Maciej Falkowski <maciej.falkowski@linux.intel.com> > > On 9/16/2025 7:48 PM, Lizhi Hou wrote: >> In amdxdna_gem_obj_vmap(), calling dma_buf_vmap() triggers a kernel >> warning if LOCKDEP is enabled. So for imported object, use >> dma_buf_vmap_unlocked(). Then, use drm_gem_vmap() for other objects. >> The similar change applies to vunmap code. >> >> Fixes: bd72d4acda10 ("accel/amdxdna: Support user space allocated >> buffer") >> Signed-off-by: Lizhi Hou <lizhi.hou@amd.com> >> --- >> drivers/accel/amdxdna/amdxdna_gem.c | 47 ++++++++++++----------------- >> 1 file changed, 20 insertions(+), 27 deletions(-) >> >> diff --git a/drivers/accel/amdxdna/amdxdna_gem.c >> b/drivers/accel/amdxdna/amdxdna_gem.c >> index d407a36eb412..7f91863c3f24 100644 >> --- a/drivers/accel/amdxdna/amdxdna_gem.c >> +++ b/drivers/accel/amdxdna/amdxdna_gem.c >> @@ -392,35 +392,33 @@ static const struct dma_buf_ops >> amdxdna_dmabuf_ops = { >> .vunmap = drm_gem_dmabuf_vunmap, >> }; >> -static int amdxdna_gem_obj_vmap(struct drm_gem_object *obj, struct >> iosys_map *map) >> +static int amdxdna_gem_obj_vmap(struct amdxdna_gem_obj *abo, void >> **vaddr) >> { >> - struct amdxdna_gem_obj *abo = to_xdna_obj(obj); >> - >> - iosys_map_clear(map); >> - >> - dma_resv_assert_held(obj->resv); >> + struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); >> + int ret; >> if (is_import_bo(abo)) >> - dma_buf_vmap(abo->dma_buf, map); >> + ret = dma_buf_vmap_unlocked(abo->dma_buf, &map); >> else >> - drm_gem_shmem_object_vmap(obj, map); >> - >> - if (!map->vaddr) >> - return -ENOMEM; >> + ret = drm_gem_vmap(to_gobj(abo), &map); >> - return 0; >> + *vaddr = map.vaddr; >> + return ret; >> } >> -static void amdxdna_gem_obj_vunmap(struct drm_gem_object *obj, >> struct iosys_map *map) >> +static void amdxdna_gem_obj_vunmap(struct amdxdna_gem_obj *abo) >> { >> - struct amdxdna_gem_obj *abo = to_xdna_obj(obj); >> + struct iosys_map map; >> + >> + if (!abo->mem.kva) >> + return; >> - dma_resv_assert_held(obj->resv); >> + iosys_map_set_vaddr(&map, abo->mem.kva); >> if (is_import_bo(abo)) >> - dma_buf_vunmap(abo->dma_buf, map); >> + dma_buf_vunmap_unlocked(abo->dma_buf, &map); >> else >> - drm_gem_shmem_object_vunmap(obj, map); >> + drm_gem_vunmap(to_gobj(abo), &map); >> } >> static struct dma_buf *amdxdna_gem_prime_export(struct >> drm_gem_object *gobj, int flags) >> @@ -455,7 +453,6 @@ static void amdxdna_gem_obj_free(struct >> drm_gem_object *gobj) >> { >> struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev); >> struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); >> - struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva); >> XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, >> abo->mem.dev_addr); >> @@ -468,7 +465,7 @@ static void amdxdna_gem_obj_free(struct >> drm_gem_object *gobj) >> if (abo->type == AMDXDNA_BO_DEV_HEAP) >> drm_mm_takedown(&abo->mm); >> - drm_gem_vunmap(gobj, &map); >> + amdxdna_gem_obj_vunmap(abo); >> mutex_destroy(&abo->lock); >> if (is_import_bo(abo)) { >> @@ -489,8 +486,8 @@ static const struct drm_gem_object_funcs >> amdxdna_gem_shmem_funcs = { >> .pin = drm_gem_shmem_object_pin, >> .unpin = drm_gem_shmem_object_unpin, >> .get_sg_table = drm_gem_shmem_object_get_sg_table, >> - .vmap = amdxdna_gem_obj_vmap, >> - .vunmap = amdxdna_gem_obj_vunmap, >> + .vmap = drm_gem_shmem_object_vmap, >> + .vunmap = drm_gem_shmem_object_vunmap, >> .mmap = amdxdna_gem_obj_mmap, >> .vm_ops = &drm_gem_shmem_vm_ops, >> .export = amdxdna_gem_prime_export, >> @@ -663,7 +660,6 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev, >> struct drm_file *filp) >> { >> struct amdxdna_client *client = filp->driver_priv; >> - struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); >> struct amdxdna_dev *xdna = to_xdna_dev(dev); >> struct amdxdna_gem_obj *abo; >> int ret; >> @@ -692,12 +688,11 @@ amdxdna_drm_create_dev_heap(struct drm_device >> *dev, >> abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base; >> drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size); >> - ret = drm_gem_vmap(to_gobj(abo), &map); >> + ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva); >> if (ret) { >> XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret); >> goto release_obj; >> } >> - abo->mem.kva = map.vaddr; >> client->dev_heap = abo; >> drm_gem_object_get(to_gobj(abo)); >> @@ -748,7 +743,6 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev, >> struct amdxdna_drm_create_bo *args, >> struct drm_file *filp) >> { >> - struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); >> struct amdxdna_dev *xdna = to_xdna_dev(dev); >> struct amdxdna_gem_obj *abo; >> int ret; >> @@ -770,12 +764,11 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev, >> abo->type = AMDXDNA_BO_CMD; >> abo->client = filp->driver_priv; >> - ret = drm_gem_vmap(to_gobj(abo), &map); >> + ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva); >> if (ret) { >> XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret); >> goto release_obj; >> } >> - abo->mem.kva = map.vaddr; >> return abo;
© 2016 - 2025 Red Hat, Inc.