When using GPUVM in immediate mode, it is necessary to call
drm_gpuvm_unlink() from the fence signalling critical path. However,
unlink may call drm_gpuvm_bo_put(), which causes some challenges:
1. drm_gpuvm_bo_put() often requires you to take resv locks, which you
can't do from the fence signalling critical path.
2. drm_gpuvm_bo_put() calls drm_gem_object_put(), which is often going
to be unsafe to call from the fence signalling critical path.
To solve these issues, add a deferred version of drm_gpuvm_unlink() that
adds the vm_bo to a deferred cleanup list, and then clean it up later.
The new methods take the GEMs GPUVA lock internally rather than letting
the caller do it because it also needs to perform an operation after
releasing the mutex again. This is to prevent freeing the GEM while
holding the mutex (more info as comments in the patch). This means that
the new methods can only be used with DRM_GPUVM_IMMEDIATE_MODE.
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
---
drivers/gpu/drm/drm_gpuvm.c | 184 ++++++++++++++++++++++++++++++++++++++++++++
include/drm/drm_gpuvm.h | 16 ++++
2 files changed, 200 insertions(+)
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index a52e95555549a16c062168253477035679d4775b..a530cf0864c5dd837840f31d3e698d4a82c58d3c 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -876,6 +876,27 @@ __drm_gpuvm_bo_list_add(struct drm_gpuvm *gpuvm, spinlock_t *lock,
cond_spin_unlock(lock, !!lock);
}
+/**
+ * drm_gpuvm_bo_is_zombie() - check whether this vm_bo is scheduled for cleanup
+ * @vm_bo: the &drm_gpuvm_bo
+ *
+ * When a vm_bo is scheduled for cleanup using the bo_defer list, it is not
+ * immediately removed from the evict and extobj lists if they are protected by
+ * the resv lock, as we can't take that lock during run_job() in immediate
+ * mode. Therefore, anyone iterating these lists should skip entries that are
+ * being destroyed.
+ *
+ * Checking the refcount without incrementing it is okay as long as the lock
+ * protecting the evict/extobj list is held for as long as you are using the
+ * vm_bo, because even if the refcount hits zero while you are using it, freeing
+ * the vm_bo requires taking the list's lock.
+ */
+static bool
+drm_gpuvm_bo_is_zombie(struct drm_gpuvm_bo *vm_bo)
+{
+ return !kref_read(&vm_bo->kref);
+}
+
/**
* drm_gpuvm_bo_list_add() - insert a vm_bo into the given list
* @__vm_bo: the &drm_gpuvm_bo
@@ -1081,6 +1102,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
INIT_LIST_HEAD(&gpuvm->evict.list);
spin_lock_init(&gpuvm->evict.lock);
+ init_llist_head(&gpuvm->bo_defer);
+
kref_init(&gpuvm->kref);
gpuvm->name = name ? name : "unknown";
@@ -1122,6 +1145,8 @@ drm_gpuvm_fini(struct drm_gpuvm *gpuvm)
"Extobj list should be empty.\n");
drm_WARN(gpuvm->drm, !list_empty(&gpuvm->evict.list),
"Evict list should be empty.\n");
+ drm_WARN(gpuvm->drm, !llist_empty(&gpuvm->bo_defer),
+ "VM BO cleanup list should be empty.\n");
drm_gem_object_put(gpuvm->r_obj);
}
@@ -1217,6 +1242,9 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
drm_gpuvm_resv_assert_held(gpuvm);
list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) {
+ if (drm_gpuvm_bo_is_zombie(vm_bo))
+ continue;
+
ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
if (ret)
break;
@@ -1460,6 +1488,9 @@ drm_gpuvm_validate_locked(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
list_for_each_entry_safe(vm_bo, next, &gpuvm->evict.list,
list.entry.evict) {
+ if (drm_gpuvm_bo_is_zombie(vm_bo))
+ continue;
+
ret = ops->vm_bo_validate(vm_bo, exec);
if (ret)
break;
@@ -1560,6 +1591,7 @@ drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
INIT_LIST_HEAD(&vm_bo->list.entry.extobj);
INIT_LIST_HEAD(&vm_bo->list.entry.evict);
+ init_llist_node(&vm_bo->list.entry.bo_defer);
return vm_bo;
}
@@ -1621,6 +1653,124 @@ drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo)
}
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put);
+/*
+ * Must be called with GEM mutex held. After releasing GEM mutex,
+ * drm_gpuvm_bo_defer_free_unlocked() must be called.
+ */
+static void
+drm_gpuvm_bo_defer_free_locked(struct kref *kref)
+{
+ struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
+ kref);
+ struct drm_gpuvm *gpuvm = vm_bo->vm;
+
+ if (!drm_gpuvm_resv_protected(gpuvm)) {
+ drm_gpuvm_bo_list_del(vm_bo, extobj, true);
+ drm_gpuvm_bo_list_del(vm_bo, evict, true);
+ }
+
+ list_del(&vm_bo->list.entry.gem);
+}
+
+/*
+ * GEM mutex must not be held. Called after drm_gpuvm_bo_defer_free_locked().
+ */
+static void
+drm_gpuvm_bo_defer_free_unlocked(struct drm_gpuvm_bo *vm_bo)
+{
+ struct drm_gpuvm *gpuvm = vm_bo->vm;
+
+ llist_add(&vm_bo->list.entry.bo_defer, &gpuvm->bo_defer);
+}
+
+static void
+drm_gpuvm_bo_defer_free(struct kref *kref)
+{
+ struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
+ kref);
+
+ mutex_lock(&vm_bo->obj->gpuva.lock);
+ drm_gpuvm_bo_defer_free_locked(kref);
+ mutex_unlock(&vm_bo->obj->gpuva.lock);
+
+ /*
+ * It's important that the GEM stays alive for the duration in which we
+ * hold the mutex, but the instant we add the vm_bo to bo_defer,
+ * another thread might call drm_gpuvm_bo_deferred_cleanup() and put
+ * the GEM. Therefore, to avoid kfreeing a mutex we are holding, we add
+ * the vm_bo to bo_defer *after* releasing the GEM's mutex.
+ */
+ drm_gpuvm_bo_defer_free_unlocked(vm_bo);
+}
+
+/**
+ * drm_gpuvm_bo_put_deferred() - drop a struct drm_gpuvm_bo reference with
+ * deferred cleanup
+ * @vm_bo: the &drm_gpuvm_bo to release the reference of
+ *
+ * This releases a reference to @vm_bo.
+ *
+ * This might take and release the GEMs GPUVA lock. You should call
+ * drm_gpuvm_bo_deferred_cleanup() later to complete the cleanup process.
+ *
+ * Returns: true if vm_bo is being destroyed, false otherwise.
+ */
+bool
+drm_gpuvm_bo_put_deferred(struct drm_gpuvm_bo *vm_bo)
+{
+ if (!vm_bo)
+ return false;
+
+ drm_WARN_ON(vm_bo->vm->drm, !drm_gpuvm_immediate_mode(vm_bo->vm));
+
+ return !!kref_put(&vm_bo->kref, drm_gpuvm_bo_defer_free);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put_deferred);
+
+/**
+ * drm_gpuvm_bo_deferred_cleanup() - clean up BOs in the deferred list
+ * deferred cleanup
+ * @gpuvm: the VM to clean up
+ *
+ * Cleans up &drm_gpuvm_bo instances in the deferred cleanup list.
+ */
+void
+drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm)
+{
+ const struct drm_gpuvm_ops *ops = gpuvm->ops;
+ struct drm_gpuvm_bo *vm_bo;
+ struct drm_gem_object *obj;
+ struct llist_node *bo_defer;
+
+ bo_defer = llist_del_all(&gpuvm->bo_defer);
+ if (!bo_defer)
+ return;
+
+ if (drm_gpuvm_resv_protected(gpuvm)) {
+ dma_resv_lock(drm_gpuvm_resv(gpuvm), NULL);
+ llist_for_each_entry(vm_bo, bo_defer, list.entry.bo_defer) {
+ drm_gpuvm_bo_list_del(vm_bo, extobj, false);
+ drm_gpuvm_bo_list_del(vm_bo, evict, false);
+ }
+ dma_resv_unlock(drm_gpuvm_resv(gpuvm));
+ }
+
+ while (bo_defer) {
+ vm_bo = llist_entry(bo_defer,
+ struct drm_gpuvm_bo, list.entry.bo_defer);
+ bo_defer = bo_defer->next;
+ obj = vm_bo->obj;
+ if (ops && ops->vm_bo_free)
+ ops->vm_bo_free(vm_bo);
+ else
+ kfree(vm_bo);
+
+ drm_gpuvm_put(gpuvm);
+ drm_gem_object_put(obj);
+ }
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_deferred_cleanup);
+
static struct drm_gpuvm_bo *
__drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
struct drm_gem_object *obj)
@@ -1948,6 +2098,40 @@ drm_gpuva_unlink(struct drm_gpuva *va)
}
EXPORT_SYMBOL_GPL(drm_gpuva_unlink);
+/**
+ * drm_gpuva_unlink_defer() - unlink a &drm_gpuva with deferred vm_bo cleanup
+ * @va: the &drm_gpuva to unlink
+ *
+ * Similar to drm_gpuva_unlink(), but uses drm_gpuvm_bo_put_deferred() and takes
+ * the lock for the caller.
+ */
+void
+drm_gpuva_unlink_defer(struct drm_gpuva *va)
+{
+ struct drm_gem_object *obj = va->gem.obj;
+ struct drm_gpuvm_bo *vm_bo = va->vm_bo;
+ bool should_defer_bo;
+
+ if (unlikely(!obj))
+ return;
+
+ drm_WARN_ON(vm_bo->vm->drm, !drm_gpuvm_immediate_mode(vm_bo->vm));
+
+ mutex_lock(&obj->gpuva.lock);
+ list_del_init(&va->gem.entry);
+
+ /*
+ * This is drm_gpuvm_bo_put_deferred() except we already hold the mutex.
+ */
+ should_defer_bo = kref_put(&vm_bo->kref, drm_gpuvm_bo_defer_free_locked);
+ mutex_unlock(&obj->gpuva.lock);
+ if (should_defer_bo)
+ drm_gpuvm_bo_defer_free_unlocked(vm_bo);
+
+ va->vm_bo = NULL;
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_unlink_defer);
+
/**
* drm_gpuva_find_first() - find the first &drm_gpuva in the given range
* @gpuvm: the &drm_gpuvm to search in
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index 8890ded1d90752a2acbb564f697aa5ab03b5d052..81cc7672cf2d5362c637abfa2a75471e5274ed08 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -27,6 +27,7 @@
#include <linux/dma-resv.h>
#include <linux/list.h>
+#include <linux/llist.h>
#include <linux/rbtree.h>
#include <linux/types.h>
@@ -152,6 +153,7 @@ void drm_gpuva_remove(struct drm_gpuva *va);
void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo);
void drm_gpuva_unlink(struct drm_gpuva *va);
+void drm_gpuva_unlink_defer(struct drm_gpuva *va);
struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm,
u64 addr, u64 range);
@@ -331,6 +333,11 @@ struct drm_gpuvm {
*/
spinlock_t lock;
} evict;
+
+ /**
+ * @bo_defer: structure holding vm_bos that need to be destroyed
+ */
+ struct llist_head bo_defer;
};
void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
@@ -714,6 +721,12 @@ struct drm_gpuvm_bo {
* &drm_gpuvms evict list.
*/
struct list_head evict;
+
+ /**
+ * @list.entry.bo_defer: List entry to attach to
+ * the &drm_gpuvms bo_defer list.
+ */
+ struct llist_node bo_defer;
} entry;
} list;
};
@@ -746,6 +759,9 @@ drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo);
+bool drm_gpuvm_bo_put_deferred(struct drm_gpuvm_bo *vm_bo);
+void drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm);
+
struct drm_gpuvm_bo *
drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
struct drm_gem_object *obj);
--
2.51.0.618.g983fd99d29-goog
On Wed Oct 1, 2025 at 12:41 PM CEST, Alice Ryhl wrote: > +/* > + * Must be called with GEM mutex held. After releasing GEM mutex, > + * drm_gpuvm_bo_defer_free_unlocked() must be called. > + */ > +static void > +drm_gpuvm_bo_defer_free_locked(struct kref *kref) > +{ > + struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo, > + kref); > + struct drm_gpuvm *gpuvm = vm_bo->vm; > + > + if (!drm_gpuvm_resv_protected(gpuvm)) { > + drm_gpuvm_bo_list_del(vm_bo, extobj, true); > + drm_gpuvm_bo_list_del(vm_bo, evict, true); > + } > + > + list_del(&vm_bo->list.entry.gem); > +} > + > +/* > + * GEM mutex must not be held. Called after drm_gpuvm_bo_defer_free_locked(). > + */ > +static void > +drm_gpuvm_bo_defer_free_unlocked(struct drm_gpuvm_bo *vm_bo) > +{ > + struct drm_gpuvm *gpuvm = vm_bo->vm; > + > + llist_add(&vm_bo->list.entry.bo_defer, &gpuvm->bo_defer); > +} > + > +static void > +drm_gpuvm_bo_defer_free(struct kref *kref) > +{ > + struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo, > + kref); > + > + mutex_lock(&vm_bo->obj->gpuva.lock); > + drm_gpuvm_bo_defer_free_locked(kref); > + mutex_unlock(&vm_bo->obj->gpuva.lock); > + > + /* > + * It's important that the GEM stays alive for the duration in which we > + * hold the mutex, but the instant we add the vm_bo to bo_defer, > + * another thread might call drm_gpuvm_bo_deferred_cleanup() and put > + * the GEM. Therefore, to avoid kfreeing a mutex we are holding, we add > + * the vm_bo to bo_defer *after* releasing the GEM's mutex. > + */ > + drm_gpuvm_bo_defer_free_unlocked(vm_bo); > +} So, you're splitting drm_gpuvm_bo_defer_free() into two functions, one doing the work that is required to be called with the gpuva lock held and one that does the work that does not require a lock, which makes perfect sense. However, the naming chosen for the two functions, i.e. drm_gpuvm_bo_defer_free_unlocked() and drm_gpuvm_bo_defer_free_locked() is confusing: What you mean semantically mean is "do part 1 with lock held" and "do part 2 without lock held", but the the chosen names suggest that both functions are identical, with the only difference that one takes the lock internally and the other one requires the caller to take the lock. It's probably better to name them after what they do and not what they're part of. If you prefer the latter, that's fine with me too, but please choose a name that makes this circumstance obvious. With that addressed, Acked-by: Danilo Krummrich <dakr@kernel.org>
On Wed, Oct 1, 2025 at 4:01 PM Danilo Krummrich <dakr@kernel.org> wrote: > > On Wed Oct 1, 2025 at 12:41 PM CEST, Alice Ryhl wrote: > > +/* > > + * Must be called with GEM mutex held. After releasing GEM mutex, > > + * drm_gpuvm_bo_defer_free_unlocked() must be called. > > + */ > > +static void > > +drm_gpuvm_bo_defer_free_locked(struct kref *kref) > > +{ > > + struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo, > > + kref); > > + struct drm_gpuvm *gpuvm = vm_bo->vm; > > + > > + if (!drm_gpuvm_resv_protected(gpuvm)) { > > + drm_gpuvm_bo_list_del(vm_bo, extobj, true); > > + drm_gpuvm_bo_list_del(vm_bo, evict, true); > > + } > > + > > + list_del(&vm_bo->list.entry.gem); > > +} > > + > > +/* > > + * GEM mutex must not be held. Called after drm_gpuvm_bo_defer_free_locked(). > > + */ > > +static void > > +drm_gpuvm_bo_defer_free_unlocked(struct drm_gpuvm_bo *vm_bo) > > +{ > > + struct drm_gpuvm *gpuvm = vm_bo->vm; > > + > > + llist_add(&vm_bo->list.entry.bo_defer, &gpuvm->bo_defer); > > +} > > + > > +static void > > +drm_gpuvm_bo_defer_free(struct kref *kref) > > +{ > > + struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo, > > + kref); > > + > > + mutex_lock(&vm_bo->obj->gpuva.lock); > > + drm_gpuvm_bo_defer_free_locked(kref); > > + mutex_unlock(&vm_bo->obj->gpuva.lock); > > + > > + /* > > + * It's important that the GEM stays alive for the duration in which we > > + * hold the mutex, but the instant we add the vm_bo to bo_defer, > > + * another thread might call drm_gpuvm_bo_deferred_cleanup() and put > > + * the GEM. Therefore, to avoid kfreeing a mutex we are holding, we add > > + * the vm_bo to bo_defer *after* releasing the GEM's mutex. > > + */ > > + drm_gpuvm_bo_defer_free_unlocked(vm_bo); > > +} > > So, you're splitting drm_gpuvm_bo_defer_free() into two functions, one doing the > work that is required to be called with the gpuva lock held and one that does > the work that does not require a lock, which makes perfect sense. > > However, the naming chosen for the two functions, i.e. > drm_gpuvm_bo_defer_free_unlocked() and drm_gpuvm_bo_defer_free_locked() is > confusing: > > What you mean semantically mean is "do part 1 with lock held" and "do part 2 > without lock held", but the the chosen names suggest that both functions are > identical, with the only difference that one takes the lock internally and the > other one requires the caller to take the lock. > > It's probably better to name them after what they do and not what they're part > of. If you prefer the latter, that's fine with me too, but please choose a name > that makes this circumstance obvious. Fair point. Do you have naming suggestions? Otherwise I can name them drm_gpuvm_bo_defer_free_part1() and drm_gpuvm_bo_defer_free_part2(). :) Alice
On Wed, 1 Oct 2025 16:42:35 +0200 Alice Ryhl <aliceryhl@google.com> wrote: > On Wed, Oct 1, 2025 at 4:01 PM Danilo Krummrich <dakr@kernel.org> wrote: > > > > On Wed Oct 1, 2025 at 12:41 PM CEST, Alice Ryhl wrote: > > > +/* > > > + * Must be called with GEM mutex held. After releasing GEM mutex, > > > + * drm_gpuvm_bo_defer_free_unlocked() must be called. > > > + */ > > > +static void > > > +drm_gpuvm_bo_defer_free_locked(struct kref *kref) > > > +{ > > > + struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo, > > > + kref); > > > + struct drm_gpuvm *gpuvm = vm_bo->vm; > > > + > > > + if (!drm_gpuvm_resv_protected(gpuvm)) { > > > + drm_gpuvm_bo_list_del(vm_bo, extobj, true); > > > + drm_gpuvm_bo_list_del(vm_bo, evict, true); > > > + } > > > + > > > + list_del(&vm_bo->list.entry.gem); > > > +} > > > + > > > +/* > > > + * GEM mutex must not be held. Called after drm_gpuvm_bo_defer_free_locked(). > > > + */ > > > +static void > > > +drm_gpuvm_bo_defer_free_unlocked(struct drm_gpuvm_bo *vm_bo) > > > +{ > > > + struct drm_gpuvm *gpuvm = vm_bo->vm; > > > + > > > + llist_add(&vm_bo->list.entry.bo_defer, &gpuvm->bo_defer); > > > +} > > > + > > > +static void > > > +drm_gpuvm_bo_defer_free(struct kref *kref) > > > +{ > > > + struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo, > > > + kref); > > > + > > > + mutex_lock(&vm_bo->obj->gpuva.lock); > > > + drm_gpuvm_bo_defer_free_locked(kref); > > > + mutex_unlock(&vm_bo->obj->gpuva.lock); > > > + > > > + /* > > > + * It's important that the GEM stays alive for the duration in which we > > > + * hold the mutex, but the instant we add the vm_bo to bo_defer, > > > + * another thread might call drm_gpuvm_bo_deferred_cleanup() and put > > > + * the GEM. Therefore, to avoid kfreeing a mutex we are holding, we add > > > + * the vm_bo to bo_defer *after* releasing the GEM's mutex. > > > + */ > > > + drm_gpuvm_bo_defer_free_unlocked(vm_bo); > > > +} > > > > So, you're splitting drm_gpuvm_bo_defer_free() into two functions, one doing the > > work that is required to be called with the gpuva lock held and one that does > > the work that does not require a lock, which makes perfect sense. > > > > However, the naming chosen for the two functions, i.e. > > drm_gpuvm_bo_defer_free_unlocked() and drm_gpuvm_bo_defer_free_locked() is > > confusing: > > > > What you mean semantically mean is "do part 1 with lock held" and "do part 2 > > without lock held", but the the chosen names suggest that both functions are > > identical, with the only difference that one takes the lock internally and the > > other one requires the caller to take the lock. > > > > It's probably better to name them after what they do and not what they're part > > of. If you prefer the latter, that's fine with me too, but please choose a name > > that makes this circumstance obvious. > > Fair point. Do you have naming suggestions? Otherwise I can name them > drm_gpuvm_bo_defer_free_part1() and drm_gpuvm_bo_defer_free_part2(). > :) drm_gpuvm_bo_free_deferral_extract_locked() and drm_gpuvm_bo_free_deferral_enqueue()? Definitely not short names though.
On Wed, 01 Oct 2025 10:41:36 +0000 Alice Ryhl <aliceryhl@google.com> wrote: > When using GPUVM in immediate mode, it is necessary to call > drm_gpuvm_unlink() from the fence signalling critical path. However, > unlink may call drm_gpuvm_bo_put(), which causes some challenges: > > 1. drm_gpuvm_bo_put() often requires you to take resv locks, which you > can't do from the fence signalling critical path. > 2. drm_gpuvm_bo_put() calls drm_gem_object_put(), which is often going > to be unsafe to call from the fence signalling critical path. > > To solve these issues, add a deferred version of drm_gpuvm_unlink() that > adds the vm_bo to a deferred cleanup list, and then clean it up later. > > The new methods take the GEMs GPUVA lock internally rather than letting > the caller do it because it also needs to perform an operation after > releasing the mutex again. This is to prevent freeing the GEM while > holding the mutex (more info as comments in the patch). This means that > the new methods can only be used with DRM_GPUVM_IMMEDIATE_MODE. > > Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> > Signed-off-by: Alice Ryhl <aliceryhl@google.com> > --- > drivers/gpu/drm/drm_gpuvm.c | 184 ++++++++++++++++++++++++++++++++++++++++++++ > include/drm/drm_gpuvm.h | 16 ++++ > 2 files changed, 200 insertions(+) > > diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c > index a52e95555549a16c062168253477035679d4775b..a530cf0864c5dd837840f31d3e698d4a82c58d3c 100644 > --- a/drivers/gpu/drm/drm_gpuvm.c > +++ b/drivers/gpu/drm/drm_gpuvm.c > @@ -876,6 +876,27 @@ __drm_gpuvm_bo_list_add(struct drm_gpuvm *gpuvm, spinlock_t *lock, > cond_spin_unlock(lock, !!lock); > } > > +/** > + * drm_gpuvm_bo_is_zombie() - check whether this vm_bo is scheduled for cleanup > + * @vm_bo: the &drm_gpuvm_bo > + * > + * When a vm_bo is scheduled for cleanup using the bo_defer list, it is not > + * immediately removed from the evict and extobj lists if they are protected by > + * the resv lock, as we can't take that lock during run_job() in immediate > + * mode. Therefore, anyone iterating these lists should skip entries that are > + * being destroyed. > + * > + * Checking the refcount without incrementing it is okay as long as the lock > + * protecting the evict/extobj list is held for as long as you are using the > + * vm_bo, because even if the refcount hits zero while you are using it, freeing > + * the vm_bo requires taking the list's lock. > + */ > +static bool > +drm_gpuvm_bo_is_zombie(struct drm_gpuvm_bo *vm_bo) > +{ > + return !kref_read(&vm_bo->kref); > +} > + > /** > * drm_gpuvm_bo_list_add() - insert a vm_bo into the given list > * @__vm_bo: the &drm_gpuvm_bo > @@ -1081,6 +1102,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, > INIT_LIST_HEAD(&gpuvm->evict.list); > spin_lock_init(&gpuvm->evict.lock); > > + init_llist_head(&gpuvm->bo_defer); > + > kref_init(&gpuvm->kref); > > gpuvm->name = name ? name : "unknown"; > @@ -1122,6 +1145,8 @@ drm_gpuvm_fini(struct drm_gpuvm *gpuvm) > "Extobj list should be empty.\n"); > drm_WARN(gpuvm->drm, !list_empty(&gpuvm->evict.list), > "Evict list should be empty.\n"); > + drm_WARN(gpuvm->drm, !llist_empty(&gpuvm->bo_defer), > + "VM BO cleanup list should be empty.\n"); > > drm_gem_object_put(gpuvm->r_obj); > } > @@ -1217,6 +1242,9 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm, > > drm_gpuvm_resv_assert_held(gpuvm); > list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) { > + if (drm_gpuvm_bo_is_zombie(vm_bo)) > + continue; > + > ret = exec_prepare_obj(exec, vm_bo->obj, num_fences); > if (ret) > break; > @@ -1460,6 +1488,9 @@ drm_gpuvm_validate_locked(struct drm_gpuvm *gpuvm, struct drm_exec *exec) > > list_for_each_entry_safe(vm_bo, next, &gpuvm->evict.list, > list.entry.evict) { > + if (drm_gpuvm_bo_is_zombie(vm_bo)) > + continue; > + > ret = ops->vm_bo_validate(vm_bo, exec); > if (ret) > break; > @@ -1560,6 +1591,7 @@ drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm, > > INIT_LIST_HEAD(&vm_bo->list.entry.extobj); > INIT_LIST_HEAD(&vm_bo->list.entry.evict); > + init_llist_node(&vm_bo->list.entry.bo_defer); > > return vm_bo; > } > @@ -1621,6 +1653,124 @@ drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo) > } > EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put); > > +/* > + * Must be called with GEM mutex held. After releasing GEM mutex, > + * drm_gpuvm_bo_defer_free_unlocked() must be called. > + */ > +static void > +drm_gpuvm_bo_defer_free_locked(struct kref *kref) > +{ > + struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo, > + kref); > + struct drm_gpuvm *gpuvm = vm_bo->vm; > + > + if (!drm_gpuvm_resv_protected(gpuvm)) { > + drm_gpuvm_bo_list_del(vm_bo, extobj, true); > + drm_gpuvm_bo_list_del(vm_bo, evict, true); > + } > + > + list_del(&vm_bo->list.entry.gem); > +} > + > +/* > + * GEM mutex must not be held. Called after drm_gpuvm_bo_defer_free_locked(). > + */ > +static void > +drm_gpuvm_bo_defer_free_unlocked(struct drm_gpuvm_bo *vm_bo) > +{ > + struct drm_gpuvm *gpuvm = vm_bo->vm; > + > + llist_add(&vm_bo->list.entry.bo_defer, &gpuvm->bo_defer); Could we simply move this line to drm_gpuvm_bo_defer_free_locked()? I might be missing something, but I don't really see a reason to have it exposed as a separate operation. > +} > + > +static void > +drm_gpuvm_bo_defer_free(struct kref *kref) > +{ > + struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo, > + kref); > + > + mutex_lock(&vm_bo->obj->gpuva.lock); > + drm_gpuvm_bo_defer_free_locked(kref); > + mutex_unlock(&vm_bo->obj->gpuva.lock); > + > + /* > + * It's important that the GEM stays alive for the duration in which we > + * hold the mutex, but the instant we add the vm_bo to bo_defer, > + * another thread might call drm_gpuvm_bo_deferred_cleanup() and put > + * the GEM. Therefore, to avoid kfreeing a mutex we are holding, we add > + * the vm_bo to bo_defer *after* releasing the GEM's mutex. > + */ > + drm_gpuvm_bo_defer_free_unlocked(vm_bo); > +} > + > +/** > + * drm_gpuvm_bo_put_deferred() - drop a struct drm_gpuvm_bo reference with > + * deferred cleanup > + * @vm_bo: the &drm_gpuvm_bo to release the reference of > + * > + * This releases a reference to @vm_bo. > + * > + * This might take and release the GEMs GPUVA lock. You should call > + * drm_gpuvm_bo_deferred_cleanup() later to complete the cleanup process. > + * > + * Returns: true if vm_bo is being destroyed, false otherwise. > + */ > +bool > +drm_gpuvm_bo_put_deferred(struct drm_gpuvm_bo *vm_bo) > +{ > + if (!vm_bo) > + return false; > + > + drm_WARN_ON(vm_bo->vm->drm, !drm_gpuvm_immediate_mode(vm_bo->vm)); > + > + return !!kref_put(&vm_bo->kref, drm_gpuvm_bo_defer_free); > +} > +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put_deferred); > + > +/** > + * drm_gpuvm_bo_deferred_cleanup() - clean up BOs in the deferred list > + * deferred cleanup > + * @gpuvm: the VM to clean up > + * > + * Cleans up &drm_gpuvm_bo instances in the deferred cleanup list. > + */ > +void > +drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm) > +{ > + const struct drm_gpuvm_ops *ops = gpuvm->ops; > + struct drm_gpuvm_bo *vm_bo; > + struct drm_gem_object *obj; > + struct llist_node *bo_defer; > + > + bo_defer = llist_del_all(&gpuvm->bo_defer); > + if (!bo_defer) > + return; > + > + if (drm_gpuvm_resv_protected(gpuvm)) { > + dma_resv_lock(drm_gpuvm_resv(gpuvm), NULL); > + llist_for_each_entry(vm_bo, bo_defer, list.entry.bo_defer) { > + drm_gpuvm_bo_list_del(vm_bo, extobj, false); > + drm_gpuvm_bo_list_del(vm_bo, evict, false); > + } > + dma_resv_unlock(drm_gpuvm_resv(gpuvm)); > + } > + > + while (bo_defer) { > + vm_bo = llist_entry(bo_defer, > + struct drm_gpuvm_bo, list.entry.bo_defer); nit: second line of arguments should be aligned on the open parenthesis. vm_bo = llist_entry(bo_defer, struct drm_gpuvm_bo, list.entry.bo_defer); > + bo_defer = bo_defer->next; > + obj = vm_bo->obj; > + if (ops && ops->vm_bo_free) > + ops->vm_bo_free(vm_bo); > + else > + kfree(vm_bo); > + > + drm_gpuvm_put(gpuvm); > + drm_gem_object_put(obj); > + } > +} > +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_deferred_cleanup); > + > static struct drm_gpuvm_bo * > __drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm, > struct drm_gem_object *obj) > @@ -1948,6 +2098,40 @@ drm_gpuva_unlink(struct drm_gpuva *va) > } > EXPORT_SYMBOL_GPL(drm_gpuva_unlink); > > +/** > + * drm_gpuva_unlink_defer() - unlink a &drm_gpuva with deferred vm_bo cleanup > + * @va: the &drm_gpuva to unlink > + * > + * Similar to drm_gpuva_unlink(), but uses drm_gpuvm_bo_put_deferred() and takes > + * the lock for the caller. > + */ > +void > +drm_gpuva_unlink_defer(struct drm_gpuva *va) > +{ > + struct drm_gem_object *obj = va->gem.obj; > + struct drm_gpuvm_bo *vm_bo = va->vm_bo; > + bool should_defer_bo; > + > + if (unlikely(!obj)) > + return; > + > + drm_WARN_ON(vm_bo->vm->drm, !drm_gpuvm_immediate_mode(vm_bo->vm)); > + > + mutex_lock(&obj->gpuva.lock); > + list_del_init(&va->gem.entry); > + > + /* > + * This is drm_gpuvm_bo_put_deferred() except we already hold the mutex. > + */ > + should_defer_bo = kref_put(&vm_bo->kref, drm_gpuvm_bo_defer_free_locked); > + mutex_unlock(&obj->gpuva.lock); > + if (should_defer_bo) > + drm_gpuvm_bo_defer_free_unlocked(vm_bo); > + > + va->vm_bo = NULL; > +} > +EXPORT_SYMBOL_GPL(drm_gpuva_unlink_defer); > + > /** > * drm_gpuva_find_first() - find the first &drm_gpuva in the given range > * @gpuvm: the &drm_gpuvm to search in > diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h > index 8890ded1d90752a2acbb564f697aa5ab03b5d052..81cc7672cf2d5362c637abfa2a75471e5274ed08 100644 > --- a/include/drm/drm_gpuvm.h > +++ b/include/drm/drm_gpuvm.h > @@ -27,6 +27,7 @@ > > #include <linux/dma-resv.h> > #include <linux/list.h> > +#include <linux/llist.h> > #include <linux/rbtree.h> > #include <linux/types.h> > > @@ -152,6 +153,7 @@ void drm_gpuva_remove(struct drm_gpuva *va); > > void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo); > void drm_gpuva_unlink(struct drm_gpuva *va); > +void drm_gpuva_unlink_defer(struct drm_gpuva *va); > > struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm, > u64 addr, u64 range); > @@ -331,6 +333,11 @@ struct drm_gpuvm { > */ > spinlock_t lock; > } evict; > + > + /** > + * @bo_defer: structure holding vm_bos that need to be destroyed > + */ > + struct llist_head bo_defer; > }; > > void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, > @@ -714,6 +721,12 @@ struct drm_gpuvm_bo { > * &drm_gpuvms evict list. > */ > struct list_head evict; > + > + /** > + * @list.entry.bo_defer: List entry to attach to > + * the &drm_gpuvms bo_defer list. > + */ > + struct llist_node bo_defer; > } entry; > } list; > }; > @@ -746,6 +759,9 @@ drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo) > > bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo); > > +bool drm_gpuvm_bo_put_deferred(struct drm_gpuvm_bo *vm_bo); > +void drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm); > + > struct drm_gpuvm_bo * > drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm, > struct drm_gem_object *obj); >
On Wed, Oct 1, 2025 at 1:27 PM Boris Brezillon <boris.brezillon@collabora.com> wrote: > > On Wed, 01 Oct 2025 10:41:36 +0000 > Alice Ryhl <aliceryhl@google.com> wrote: > > > When using GPUVM in immediate mode, it is necessary to call > > drm_gpuvm_unlink() from the fence signalling critical path. However, > > unlink may call drm_gpuvm_bo_put(), which causes some challenges: > > > > 1. drm_gpuvm_bo_put() often requires you to take resv locks, which you > > can't do from the fence signalling critical path. > > 2. drm_gpuvm_bo_put() calls drm_gem_object_put(), which is often going > > to be unsafe to call from the fence signalling critical path. > > > > To solve these issues, add a deferred version of drm_gpuvm_unlink() that > > adds the vm_bo to a deferred cleanup list, and then clean it up later. > > > > The new methods take the GEMs GPUVA lock internally rather than letting > > the caller do it because it also needs to perform an operation after > > releasing the mutex again. This is to prevent freeing the GEM while > > holding the mutex (more info as comments in the patch). This means that > > the new methods can only be used with DRM_GPUVM_IMMEDIATE_MODE. > > > > Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> > > Signed-off-by: Alice Ryhl <aliceryhl@google.com> > > +/* > > + * Must be called with GEM mutex held. After releasing GEM mutex, > > + * drm_gpuvm_bo_defer_free_unlocked() must be called. > > + */ > > +static void > > +drm_gpuvm_bo_defer_free_locked(struct kref *kref) > > +{ > > + struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo, > > + kref); > > + struct drm_gpuvm *gpuvm = vm_bo->vm; > > + > > + if (!drm_gpuvm_resv_protected(gpuvm)) { > > + drm_gpuvm_bo_list_del(vm_bo, extobj, true); > > + drm_gpuvm_bo_list_del(vm_bo, evict, true); > > + } > > + > > + list_del(&vm_bo->list.entry.gem); > > +} > > + > > +/* > > + * GEM mutex must not be held. Called after drm_gpuvm_bo_defer_free_locked(). > > + */ > > +static void > > +drm_gpuvm_bo_defer_free_unlocked(struct drm_gpuvm_bo *vm_bo) > > +{ > > + struct drm_gpuvm *gpuvm = vm_bo->vm; > > + > > + llist_add(&vm_bo->list.entry.bo_defer, &gpuvm->bo_defer); > > Could we simply move this line to drm_gpuvm_bo_defer_free_locked()? > I might be missing something, but I don't really see a reason to > have it exposed as a separate operation. No, if drm_gpuvm_bo_deferred_cleanup() is called in parallel (e.g. from a workqueue as we discussed), then this can lead to kfreeing the GEM while we hold the mutex. We must not add the vm_bo until it's safe to kfree the GEM. See the comment on drm_gpuvm_bo_defer_free_unlocked() below. > > +} > > + > > +static void > > +drm_gpuvm_bo_defer_free(struct kref *kref) > > +{ > > + struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo, > > + kref); > > + > > + mutex_lock(&vm_bo->obj->gpuva.lock); > > + drm_gpuvm_bo_defer_free_locked(kref); > > + mutex_unlock(&vm_bo->obj->gpuva.lock); > > + > > + /* > > + * It's important that the GEM stays alive for the duration in which we > > + * hold the mutex, but the instant we add the vm_bo to bo_defer, > > + * another thread might call drm_gpuvm_bo_deferred_cleanup() and put > > + * the GEM. Therefore, to avoid kfreeing a mutex we are holding, we add > > + * the vm_bo to bo_defer *after* releasing the GEM's mutex. > > + */ > > + drm_gpuvm_bo_defer_free_unlocked(vm_bo); > > +} Alice
On Wed, 1 Oct 2025 13:45:36 +0200 Alice Ryhl <aliceryhl@google.com> wrote: > On Wed, Oct 1, 2025 at 1:27 PM Boris Brezillon > <boris.brezillon@collabora.com> wrote: > > > > On Wed, 01 Oct 2025 10:41:36 +0000 > > Alice Ryhl <aliceryhl@google.com> wrote: > > > > > When using GPUVM in immediate mode, it is necessary to call > > > drm_gpuvm_unlink() from the fence signalling critical path. However, > > > unlink may call drm_gpuvm_bo_put(), which causes some challenges: > > > > > > 1. drm_gpuvm_bo_put() often requires you to take resv locks, which you > > > can't do from the fence signalling critical path. > > > 2. drm_gpuvm_bo_put() calls drm_gem_object_put(), which is often going > > > to be unsafe to call from the fence signalling critical path. > > > > > > To solve these issues, add a deferred version of drm_gpuvm_unlink() that > > > adds the vm_bo to a deferred cleanup list, and then clean it up later. > > > > > > The new methods take the GEMs GPUVA lock internally rather than letting > > > the caller do it because it also needs to perform an operation after > > > releasing the mutex again. This is to prevent freeing the GEM while > > > holding the mutex (more info as comments in the patch). This means that > > > the new methods can only be used with DRM_GPUVM_IMMEDIATE_MODE. > > > > > > Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> > > > Signed-off-by: Alice Ryhl <aliceryhl@google.com> > > > > +/* > > > + * Must be called with GEM mutex held. After releasing GEM mutex, > > > + * drm_gpuvm_bo_defer_free_unlocked() must be called. > > > + */ > > > +static void > > > +drm_gpuvm_bo_defer_free_locked(struct kref *kref) > > > +{ > > > + struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo, > > > + kref); > > > + struct drm_gpuvm *gpuvm = vm_bo->vm; > > > + > > > + if (!drm_gpuvm_resv_protected(gpuvm)) { > > > + drm_gpuvm_bo_list_del(vm_bo, extobj, true); > > > + drm_gpuvm_bo_list_del(vm_bo, evict, true); > > > + } > > > + > > > + list_del(&vm_bo->list.entry.gem); > > > +} > > > + > > > +/* > > > + * GEM mutex must not be held. Called after drm_gpuvm_bo_defer_free_locked(). > > > + */ > > > +static void > > > +drm_gpuvm_bo_defer_free_unlocked(struct drm_gpuvm_bo *vm_bo) > > > +{ > > > + struct drm_gpuvm *gpuvm = vm_bo->vm; > > > + > > > + llist_add(&vm_bo->list.entry.bo_defer, &gpuvm->bo_defer); > > > > Could we simply move this line to drm_gpuvm_bo_defer_free_locked()? > > I might be missing something, but I don't really see a reason to > > have it exposed as a separate operation. > > No, if drm_gpuvm_bo_deferred_cleanup() is called in parallel (e.g. > from a workqueue as we discussed), then this can lead to kfreeing the > GEM while we hold the mutex. We must not add the vm_bo until it's safe > to kfree the GEM. See the comment on > drm_gpuvm_bo_defer_free_unlocked() below. Uh, right, I forgot that the lock was embedded in the BO, which we're releasing a ref on in the cleanup path.
On Wed, 1 Oct 2025 14:04:18 +0200 Boris Brezillon <boris.brezillon@collabora.com> wrote: > On Wed, 1 Oct 2025 13:45:36 +0200 > Alice Ryhl <aliceryhl@google.com> wrote: > > > On Wed, Oct 1, 2025 at 1:27 PM Boris Brezillon > > <boris.brezillon@collabora.com> wrote: > > > > > > On Wed, 01 Oct 2025 10:41:36 +0000 > > > Alice Ryhl <aliceryhl@google.com> wrote: > > > > > > > When using GPUVM in immediate mode, it is necessary to call > > > > drm_gpuvm_unlink() from the fence signalling critical path. However, > > > > unlink may call drm_gpuvm_bo_put(), which causes some challenges: > > > > > > > > 1. drm_gpuvm_bo_put() often requires you to take resv locks, which you > > > > can't do from the fence signalling critical path. > > > > 2. drm_gpuvm_bo_put() calls drm_gem_object_put(), which is often going > > > > to be unsafe to call from the fence signalling critical path. > > > > > > > > To solve these issues, add a deferred version of drm_gpuvm_unlink() that > > > > adds the vm_bo to a deferred cleanup list, and then clean it up later. > > > > > > > > The new methods take the GEMs GPUVA lock internally rather than letting > > > > the caller do it because it also needs to perform an operation after > > > > releasing the mutex again. This is to prevent freeing the GEM while > > > > holding the mutex (more info as comments in the patch). This means that > > > > the new methods can only be used with DRM_GPUVM_IMMEDIATE_MODE. > > > > > > > > Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> > > > > Signed-off-by: Alice Ryhl <aliceryhl@google.com> > > > > > > +/* > > > > + * Must be called with GEM mutex held. After releasing GEM mutex, > > > > + * drm_gpuvm_bo_defer_free_unlocked() must be called. > > > > + */ > > > > +static void > > > > +drm_gpuvm_bo_defer_free_locked(struct kref *kref) > > > > +{ > > > > + struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo, > > > > + kref); > > > > + struct drm_gpuvm *gpuvm = vm_bo->vm; > > > > + > > > > + if (!drm_gpuvm_resv_protected(gpuvm)) { > > > > + drm_gpuvm_bo_list_del(vm_bo, extobj, true); > > > > + drm_gpuvm_bo_list_del(vm_bo, evict, true); > > > > + } > > > > + > > > > + list_del(&vm_bo->list.entry.gem); > > > > +} > > > > + > > > > +/* > > > > + * GEM mutex must not be held. Called after drm_gpuvm_bo_defer_free_locked(). > > > > + */ > > > > +static void > > > > +drm_gpuvm_bo_defer_free_unlocked(struct drm_gpuvm_bo *vm_bo) > > > > +{ > > > > + struct drm_gpuvm *gpuvm = vm_bo->vm; > > > > + > > > > + llist_add(&vm_bo->list.entry.bo_defer, &gpuvm->bo_defer); > > > > > > Could we simply move this line to drm_gpuvm_bo_defer_free_locked()? > > > I might be missing something, but I don't really see a reason to > > > have it exposed as a separate operation. > > > > No, if drm_gpuvm_bo_deferred_cleanup() is called in parallel (e.g. > > from a workqueue as we discussed), then this can lead to kfreeing the > > GEM while we hold the mutex. We must not add the vm_bo until it's safe > > to kfree the GEM. See the comment on > > drm_gpuvm_bo_defer_free_unlocked() below. > > Uh, right, I forgot that the lock was embedded in the BO, which we're > releasing a ref on in the cleanup path. Would be good to document the race in the comment saying that gpuva.lock shouldn't be held though.
On Wed, Oct 1, 2025 at 2:13 PM Boris Brezillon <boris.brezillon@collabora.com> wrote: > > On Wed, 1 Oct 2025 14:04:18 +0200 > Boris Brezillon <boris.brezillon@collabora.com> wrote: > > > On Wed, 1 Oct 2025 13:45:36 +0200 > > Alice Ryhl <aliceryhl@google.com> wrote: > > > > > On Wed, Oct 1, 2025 at 1:27 PM Boris Brezillon > > > <boris.brezillon@collabora.com> wrote: > > > > > > > > On Wed, 01 Oct 2025 10:41:36 +0000 > > > > Alice Ryhl <aliceryhl@google.com> wrote: > > > > > > > > > When using GPUVM in immediate mode, it is necessary to call > > > > > drm_gpuvm_unlink() from the fence signalling critical path. However, > > > > > unlink may call drm_gpuvm_bo_put(), which causes some challenges: > > > > > > > > > > 1. drm_gpuvm_bo_put() often requires you to take resv locks, which you > > > > > can't do from the fence signalling critical path. > > > > > 2. drm_gpuvm_bo_put() calls drm_gem_object_put(), which is often going > > > > > to be unsafe to call from the fence signalling critical path. > > > > > > > > > > To solve these issues, add a deferred version of drm_gpuvm_unlink() that > > > > > adds the vm_bo to a deferred cleanup list, and then clean it up later. > > > > > > > > > > The new methods take the GEMs GPUVA lock internally rather than letting > > > > > the caller do it because it also needs to perform an operation after > > > > > releasing the mutex again. This is to prevent freeing the GEM while > > > > > holding the mutex (more info as comments in the patch). This means that > > > > > the new methods can only be used with DRM_GPUVM_IMMEDIATE_MODE. > > > > > > > > > > Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> > > > > > Signed-off-by: Alice Ryhl <aliceryhl@google.com> > > > > > > > > +/* > > > > > + * Must be called with GEM mutex held. After releasing GEM mutex, > > > > > + * drm_gpuvm_bo_defer_free_unlocked() must be called. > > > > > + */ > > > > > +static void > > > > > +drm_gpuvm_bo_defer_free_locked(struct kref *kref) > > > > > +{ > > > > > + struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo, > > > > > + kref); > > > > > + struct drm_gpuvm *gpuvm = vm_bo->vm; > > > > > + > > > > > + if (!drm_gpuvm_resv_protected(gpuvm)) { > > > > > + drm_gpuvm_bo_list_del(vm_bo, extobj, true); > > > > > + drm_gpuvm_bo_list_del(vm_bo, evict, true); > > > > > + } > > > > > + > > > > > + list_del(&vm_bo->list.entry.gem); > > > > > +} > > > > > + > > > > > +/* > > > > > + * GEM mutex must not be held. Called after drm_gpuvm_bo_defer_free_locked(). > > > > > + */ > > > > > +static void > > > > > +drm_gpuvm_bo_defer_free_unlocked(struct drm_gpuvm_bo *vm_bo) > > > > > +{ > > > > > + struct drm_gpuvm *gpuvm = vm_bo->vm; > > > > > + > > > > > + llist_add(&vm_bo->list.entry.bo_defer, &gpuvm->bo_defer); > > > > > > > > Could we simply move this line to drm_gpuvm_bo_defer_free_locked()? > > > > I might be missing something, but I don't really see a reason to > > > > have it exposed as a separate operation. > > > > > > No, if drm_gpuvm_bo_deferred_cleanup() is called in parallel (e.g. > > > from a workqueue as we discussed), then this can lead to kfreeing the > > > GEM while we hold the mutex. We must not add the vm_bo until it's safe > > > to kfree the GEM. See the comment on > > > drm_gpuvm_bo_defer_free_unlocked() below. > > > > Uh, right, I forgot that the lock was embedded in the BO, which we're > > releasing a ref on in the cleanup path. > > Would be good to document the race in the comment saying that > gpuva.lock shouldn't be held though. I can move the comment in drm_gpuvm_bo_defer_free() to the function comment. Alice
On Wed, 1 Oct 2025 14:22:06 +0200 Alice Ryhl <aliceryhl@google.com> wrote: > On Wed, Oct 1, 2025 at 2:13 PM Boris Brezillon > <boris.brezillon@collabora.com> wrote: > > > > On Wed, 1 Oct 2025 14:04:18 +0200 > > Boris Brezillon <boris.brezillon@collabora.com> wrote: > > > > > On Wed, 1 Oct 2025 13:45:36 +0200 > > > Alice Ryhl <aliceryhl@google.com> wrote: > > > > > > > On Wed, Oct 1, 2025 at 1:27 PM Boris Brezillon > > > > <boris.brezillon@collabora.com> wrote: > > > > > > > > > > On Wed, 01 Oct 2025 10:41:36 +0000 > > > > > Alice Ryhl <aliceryhl@google.com> wrote: > > > > > > > > > > > When using GPUVM in immediate mode, it is necessary to call > > > > > > drm_gpuvm_unlink() from the fence signalling critical path. However, > > > > > > unlink may call drm_gpuvm_bo_put(), which causes some challenges: > > > > > > > > > > > > 1. drm_gpuvm_bo_put() often requires you to take resv locks, which you > > > > > > can't do from the fence signalling critical path. > > > > > > 2. drm_gpuvm_bo_put() calls drm_gem_object_put(), which is often going > > > > > > to be unsafe to call from the fence signalling critical path. > > > > > > > > > > > > To solve these issues, add a deferred version of drm_gpuvm_unlink() that > > > > > > adds the vm_bo to a deferred cleanup list, and then clean it up later. > > > > > > > > > > > > The new methods take the GEMs GPUVA lock internally rather than letting > > > > > > the caller do it because it also needs to perform an operation after > > > > > > releasing the mutex again. This is to prevent freeing the GEM while > > > > > > holding the mutex (more info as comments in the patch). This means that > > > > > > the new methods can only be used with DRM_GPUVM_IMMEDIATE_MODE. > > > > > > > > > > > > Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> > > > > > > Signed-off-by: Alice Ryhl <aliceryhl@google.com> > > > > > > > > > > +/* > > > > > > + * Must be called with GEM mutex held. After releasing GEM mutex, > > > > > > + * drm_gpuvm_bo_defer_free_unlocked() must be called. > > > > > > + */ > > > > > > +static void > > > > > > +drm_gpuvm_bo_defer_free_locked(struct kref *kref) > > > > > > +{ > > > > > > + struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo, > > > > > > + kref); > > > > > > + struct drm_gpuvm *gpuvm = vm_bo->vm; > > > > > > + > > > > > > + if (!drm_gpuvm_resv_protected(gpuvm)) { > > > > > > + drm_gpuvm_bo_list_del(vm_bo, extobj, true); > > > > > > + drm_gpuvm_bo_list_del(vm_bo, evict, true); > > > > > > + } > > > > > > + > > > > > > + list_del(&vm_bo->list.entry.gem); > > > > > > +} > > > > > > + > > > > > > +/* > > > > > > + * GEM mutex must not be held. Called after drm_gpuvm_bo_defer_free_locked(). > > > > > > + */ > > > > > > +static void > > > > > > +drm_gpuvm_bo_defer_free_unlocked(struct drm_gpuvm_bo *vm_bo) > > > > > > +{ > > > > > > + struct drm_gpuvm *gpuvm = vm_bo->vm; > > > > > > + > > > > > > + llist_add(&vm_bo->list.entry.bo_defer, &gpuvm->bo_defer); > > > > > > > > > > Could we simply move this line to drm_gpuvm_bo_defer_free_locked()? > > > > > I might be missing something, but I don't really see a reason to > > > > > have it exposed as a separate operation. > > > > > > > > No, if drm_gpuvm_bo_deferred_cleanup() is called in parallel (e.g. > > > > from a workqueue as we discussed), then this can lead to kfreeing the > > > > GEM while we hold the mutex. We must not add the vm_bo until it's safe > > > > to kfree the GEM. See the comment on > > > > drm_gpuvm_bo_defer_free_unlocked() below. > > > > > > Uh, right, I forgot that the lock was embedded in the BO, which we're > > > releasing a ref on in the cleanup path. > > > > Would be good to document the race in the comment saying that > > gpuva.lock shouldn't be held though. > > I can move the comment in drm_gpuvm_bo_defer_free() to the function comment. That, or you refer to the function where it's documented in the comment.
© 2016 - 2025 Red Hat, Inc.