The GPU Scheduler now supports a new callback, cancel_job(), which lets
the scheduler cancel all jobs which might not yet be freed when
drm_sched_fini() runs. Using this callback allows for significantly
simplifying the mock scheduler teardown code.
Implement the cancel_job() callback and adjust the code where necessary.
Signed-off-by: Philipp Stanner <phasta@kernel.org>
---
.../gpu/drm/scheduler/tests/mock_scheduler.c | 68 +++++++------------
drivers/gpu/drm/scheduler/tests/sched_tests.h | 1 -
2 files changed, 25 insertions(+), 44 deletions(-)
diff --git a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
index 49d067fecd67..0d1d57213e05 100644
--- a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
+++ b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
@@ -63,7 +63,7 @@ static void drm_mock_sched_job_complete(struct drm_mock_sched_job *job)
lockdep_assert_held(&sched->lock);
job->flags |= DRM_MOCK_SCHED_JOB_DONE;
- list_move_tail(&job->link, &sched->done_list);
+ list_del(&job->link);
dma_fence_signal_locked(&job->hw_fence);
complete(&job->done);
}
@@ -236,26 +236,41 @@ mock_sched_timedout_job(struct drm_sched_job *sched_job)
static void mock_sched_free_job(struct drm_sched_job *sched_job)
{
- struct drm_mock_scheduler *sched =
- drm_sched_to_mock_sched(sched_job->sched);
struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
- unsigned long flags;
- /* Remove from the scheduler done list. */
- spin_lock_irqsave(&sched->lock, flags);
- list_del(&job->link);
- spin_unlock_irqrestore(&sched->lock, flags);
dma_fence_put(&job->hw_fence);
-
drm_sched_job_cleanup(sched_job);
/* Mock job itself is freed by the kunit framework. */
}
+static void mock_sched_cancel_job(struct drm_sched_job *sched_job)
+{
+ struct drm_mock_scheduler *sched = drm_sched_to_mock_sched(sched_job->sched);
+ struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
+ unsigned long flags;
+
+ hrtimer_cancel(&job->timer);
+
+ spin_lock_irqsave(&sched->lock, flags);
+ if (!dma_fence_is_signaled_locked(&job->hw_fence)) {
+ list_del(&job->link);
+ dma_fence_set_error(&job->hw_fence, -ECANCELED);
+ dma_fence_signal_locked(&job->hw_fence);
+ }
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ /*
+ * The GPU Scheduler will call drm_sched_backend_ops.free_job(), still.
+ * Mock job itself is freed by the kunit framework.
+ */
+}
+
static const struct drm_sched_backend_ops drm_mock_scheduler_ops = {
.run_job = mock_sched_run_job,
.timedout_job = mock_sched_timedout_job,
- .free_job = mock_sched_free_job
+ .free_job = mock_sched_free_job,
+ .cancel_job = mock_sched_cancel_job,
};
/**
@@ -289,7 +304,6 @@ struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout)
sched->hw_timeline.context = dma_fence_context_alloc(1);
atomic_set(&sched->hw_timeline.next_seqno, 0);
INIT_LIST_HEAD(&sched->job_list);
- INIT_LIST_HEAD(&sched->done_list);
spin_lock_init(&sched->lock);
return sched;
@@ -304,38 +318,6 @@ struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout)
*/
void drm_mock_sched_fini(struct drm_mock_scheduler *sched)
{
- struct drm_mock_sched_job *job, *next;
- unsigned long flags;
- LIST_HEAD(list);
-
- drm_sched_wqueue_stop(&sched->base);
-
- /* Force complete all unfinished jobs. */
- spin_lock_irqsave(&sched->lock, flags);
- list_for_each_entry_safe(job, next, &sched->job_list, link)
- list_move_tail(&job->link, &list);
- spin_unlock_irqrestore(&sched->lock, flags);
-
- list_for_each_entry(job, &list, link)
- hrtimer_cancel(&job->timer);
-
- spin_lock_irqsave(&sched->lock, flags);
- list_for_each_entry_safe(job, next, &list, link)
- drm_mock_sched_job_complete(job);
- spin_unlock_irqrestore(&sched->lock, flags);
-
- /*
- * Free completed jobs and jobs not yet processed by the DRM scheduler
- * free worker.
- */
- spin_lock_irqsave(&sched->lock, flags);
- list_for_each_entry_safe(job, next, &sched->done_list, link)
- list_move_tail(&job->link, &list);
- spin_unlock_irqrestore(&sched->lock, flags);
-
- list_for_each_entry_safe(job, next, &list, link)
- mock_sched_free_job(&job->base);
-
drm_sched_fini(&sched->base);
}
diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h
index fbba38137f0c..0eddfb8d89e6 100644
--- a/drivers/gpu/drm/scheduler/tests/sched_tests.h
+++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h
@@ -49,7 +49,6 @@ struct drm_mock_scheduler {
spinlock_t lock;
struct list_head job_list;
- struct list_head done_list;
struct {
u64 context;
--
2.49.0
On 09/07/2025 12:52, Philipp Stanner wrote: > The GPU Scheduler now supports a new callback, cancel_job(), which lets > the scheduler cancel all jobs which might not yet be freed when > drm_sched_fini() runs. Using this callback allows for significantly > simplifying the mock scheduler teardown code. > > Implement the cancel_job() callback and adjust the code where necessary. > > Signed-off-by: Philipp Stanner <phasta@kernel.org> > --- > .../gpu/drm/scheduler/tests/mock_scheduler.c | 68 +++++++------------ > drivers/gpu/drm/scheduler/tests/sched_tests.h | 1 - > 2 files changed, 25 insertions(+), 44 deletions(-) > > diff --git a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c > index 49d067fecd67..0d1d57213e05 100644 > --- a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c > +++ b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c > @@ -63,7 +63,7 @@ static void drm_mock_sched_job_complete(struct drm_mock_sched_job *job) > lockdep_assert_held(&sched->lock); > > job->flags |= DRM_MOCK_SCHED_JOB_DONE; > - list_move_tail(&job->link, &sched->done_list); > + list_del(&job->link); > dma_fence_signal_locked(&job->hw_fence); > complete(&job->done); > } > @@ -236,26 +236,41 @@ mock_sched_timedout_job(struct drm_sched_job *sched_job) > > static void mock_sched_free_job(struct drm_sched_job *sched_job) > { > - struct drm_mock_scheduler *sched = > - drm_sched_to_mock_sched(sched_job->sched); > struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job); > - unsigned long flags; > > - /* Remove from the scheduler done list. */ > - spin_lock_irqsave(&sched->lock, flags); > - list_del(&job->link); > - spin_unlock_irqrestore(&sched->lock, flags); > dma_fence_put(&job->hw_fence); > - > drm_sched_job_cleanup(sched_job); > > /* Mock job itself is freed by the kunit framework. */ > } > > +static void mock_sched_cancel_job(struct drm_sched_job *sched_job) > +{ > + struct drm_mock_scheduler *sched = drm_sched_to_mock_sched(sched_job->sched); > + struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job); > + unsigned long flags; > + > + hrtimer_cancel(&job->timer); > + > + spin_lock_irqsave(&sched->lock, flags); > + if (!dma_fence_is_signaled_locked(&job->hw_fence)) { > + list_del(&job->link); > + dma_fence_set_error(&job->hw_fence, -ECANCELED); > + dma_fence_signal_locked(&job->hw_fence); > + } > + spin_unlock_irqrestore(&sched->lock, flags); > + > + /* > + * The GPU Scheduler will call drm_sched_backend_ops.free_job(), still. > + * Mock job itself is freed by the kunit framework. > + */ > +} > + > static const struct drm_sched_backend_ops drm_mock_scheduler_ops = { > .run_job = mock_sched_run_job, > .timedout_job = mock_sched_timedout_job, > - .free_job = mock_sched_free_job > + .free_job = mock_sched_free_job, > + .cancel_job = mock_sched_cancel_job, > }; > > /** > @@ -289,7 +304,6 @@ struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout) > sched->hw_timeline.context = dma_fence_context_alloc(1); > atomic_set(&sched->hw_timeline.next_seqno, 0); > INIT_LIST_HEAD(&sched->job_list); > - INIT_LIST_HEAD(&sched->done_list); > spin_lock_init(&sched->lock); > > return sched; > @@ -304,38 +318,6 @@ struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout) > */ > void drm_mock_sched_fini(struct drm_mock_scheduler *sched) > { > - struct drm_mock_sched_job *job, *next; > - unsigned long flags; > - LIST_HEAD(list); > - > - drm_sched_wqueue_stop(&sched->base); > - > - /* Force complete all unfinished jobs. */ > - spin_lock_irqsave(&sched->lock, flags); > - list_for_each_entry_safe(job, next, &sched->job_list, link) > - list_move_tail(&job->link, &list); > - spin_unlock_irqrestore(&sched->lock, flags); > - > - list_for_each_entry(job, &list, link) > - hrtimer_cancel(&job->timer); > - > - spin_lock_irqsave(&sched->lock, flags); > - list_for_each_entry_safe(job, next, &list, link) > - drm_mock_sched_job_complete(job); > - spin_unlock_irqrestore(&sched->lock, flags); > - > - /* > - * Free completed jobs and jobs not yet processed by the DRM scheduler > - * free worker. > - */ > - spin_lock_irqsave(&sched->lock, flags); > - list_for_each_entry_safe(job, next, &sched->done_list, link) > - list_move_tail(&job->link, &list); > - spin_unlock_irqrestore(&sched->lock, flags); > - > - list_for_each_entry_safe(job, next, &list, link) > - mock_sched_free_job(&job->base); > - > drm_sched_fini(&sched->base); > } > > diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h > index fbba38137f0c..0eddfb8d89e6 100644 > --- a/drivers/gpu/drm/scheduler/tests/sched_tests.h > +++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h > @@ -49,7 +49,6 @@ struct drm_mock_scheduler { > > spinlock_t lock; > struct list_head job_list; > - struct list_head done_list; > > struct { > u64 context; Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com> Regards, Tvrtko
© 2016 - 2025 Red Hat, Inc.