Avoid leaking a lockdep map on each drm sched creation and destruction
by using a single lockdep map for all drm sched allocated submit_wq.
v2:
- Use alloc_ordered_workqueue_lockdep_map (Tejun)
Cc: Luben Tuikov <ltuikov89@gmail.com>
Cc: Christian König <christian.koenig@amd.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
drivers/gpu/drm/scheduler/sched_main.c | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index ab53ab486fe6..cf79d348cb32 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -87,6 +87,12 @@
#define CREATE_TRACE_POINTS
#include "gpu_scheduler_trace.h"
+#ifdef CONFIG_LOCKDEP
+static struct lockdep_map drm_sched_lockdep_map = {
+ .name = "drm_sched_lockdep_map"
+};
+#endif
+
#define to_drm_sched_job(sched_job) \
container_of((sched_job), struct drm_sched_job, queue_node)
@@ -1272,7 +1278,12 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->submit_wq = submit_wq;
sched->own_submit_wq = false;
} else {
+#ifdef CONFIG_LOCKDEP
+ sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, 0,
+ &drm_sched_lockdep_map);
+#else
sched->submit_wq = alloc_ordered_workqueue(name, 0);
+#endif
if (!sched->submit_wq)
return -ENOMEM;
--
2.34.1
On 10-08-2024 03:58, Matthew Brost wrote:
> Avoid leaking a lockdep map on each drm sched creation and destruction
> by using a single lockdep map for all drm sched allocated submit_wq.
>
> v2:
> - Use alloc_ordered_workqueue_lockdep_map (Tejun)
>
> Cc: Luben Tuikov <ltuikov89@gmail.com>
> Cc: Christian König <christian.koenig@amd.com>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
> drivers/gpu/drm/scheduler/sched_main.c | 11 +++++++++++
> 1 file changed, 11 insertions(+)
>
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index ab53ab486fe6..cf79d348cb32 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -87,6 +87,12 @@
> #define CREATE_TRACE_POINTS
> #include "gpu_scheduler_trace.h"
>
> +#ifdef CONFIG_LOCKDEP
> +static struct lockdep_map drm_sched_lockdep_map = {
> + .name = "drm_sched_lockdep_map"
> +};
will it be better to use STATIC_LOCKDEP_MAP_INIT ? Initializing key here
instead of while registering the class ?
> +#endif
> +
> #define to_drm_sched_job(sched_job) \
> container_of((sched_job), struct drm_sched_job, queue_node)
>
> @@ -1272,7 +1278,12 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
> sched->submit_wq = submit_wq;
> sched->own_submit_wq = false;
> } else {
> +#ifdef CONFIG_LOCKDEP
> + sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, 0,
> + &drm_sched_lockdep_map);
> +#else
> sched->submit_wq = alloc_ordered_workqueue(name, 0);
> +#endif
> if (!sched->submit_wq)
> return -ENOMEM;
>
On Mon, Aug 12, 2024 at 11:05:31AM +0530, Ghimiray, Himal Prasad wrote:
>
>
> On 10-08-2024 03:58, Matthew Brost wrote:
> > Avoid leaking a lockdep map on each drm sched creation and destruction
> > by using a single lockdep map for all drm sched allocated submit_wq.
> >
> > v2:
> > - Use alloc_ordered_workqueue_lockdep_map (Tejun)
> >
> > Cc: Luben Tuikov <ltuikov89@gmail.com>
> > Cc: Christian König <christian.koenig@amd.com>
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> > drivers/gpu/drm/scheduler/sched_main.c | 11 +++++++++++
> > 1 file changed, 11 insertions(+)
> >
> > diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> > index ab53ab486fe6..cf79d348cb32 100644
> > --- a/drivers/gpu/drm/scheduler/sched_main.c
> > +++ b/drivers/gpu/drm/scheduler/sched_main.c
> > @@ -87,6 +87,12 @@
> > #define CREATE_TRACE_POINTS
> > #include "gpu_scheduler_trace.h"
> > +#ifdef CONFIG_LOCKDEP
> > +static struct lockdep_map drm_sched_lockdep_map = {
> > + .name = "drm_sched_lockdep_map"
> > +};
>
>
> will it be better to use STATIC_LOCKDEP_MAP_INIT ? Initializing key here
> instead of while registering the class ?
>
Most existing design patterns in the kernel define static lockdep class
this way so I think this is fine. But honestly don't really have an
opinion here.
Matt
>
> > +#endif
> > +
> > #define to_drm_sched_job(sched_job) \
> > container_of((sched_job), struct drm_sched_job, queue_node)
> > @@ -1272,7 +1278,12 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
> > sched->submit_wq = submit_wq;
> > sched->own_submit_wq = false;
> > } else {
> > +#ifdef CONFIG_LOCKDEP
> > + sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, 0,
> > + &drm_sched_lockdep_map);
> > +#else
> > sched->submit_wq = alloc_ordered_workqueue(name, 0);
> > +#endif
> > if (!sched->submit_wq)
> > return -ENOMEM;
On 14-08-2024 00:46, Matthew Brost wrote:
> On Mon, Aug 12, 2024 at 11:05:31AM +0530, Ghimiray, Himal Prasad wrote:
>>
>>
>> On 10-08-2024 03:58, Matthew Brost wrote:
>>> Avoid leaking a lockdep map on each drm sched creation and destruction
>>> by using a single lockdep map for all drm sched allocated submit_wq.
>>>
>>> v2:
>>> - Use alloc_ordered_workqueue_lockdep_map (Tejun)
>>>
>>> Cc: Luben Tuikov <ltuikov89@gmail.com>
>>> Cc: Christian König <christian.koenig@amd.com>
>>> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
>>> ---
>>> drivers/gpu/drm/scheduler/sched_main.c | 11 +++++++++++
>>> 1 file changed, 11 insertions(+)
>>>
>>> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
>>> index ab53ab486fe6..cf79d348cb32 100644
>>> --- a/drivers/gpu/drm/scheduler/sched_main.c
>>> +++ b/drivers/gpu/drm/scheduler/sched_main.c
>>> @@ -87,6 +87,12 @@
>>> #define CREATE_TRACE_POINTS
>>> #include "gpu_scheduler_trace.h"
>>> +#ifdef CONFIG_LOCKDEP
>>> +static struct lockdep_map drm_sched_lockdep_map = {
>>> + .name = "drm_sched_lockdep_map"
>>> +};
>>
>>
>> will it be better to use STATIC_LOCKDEP_MAP_INIT ? Initializing key here
>> instead of while registering the class ?
>>
>
> Most existing design patterns in the kernel define static lockdep class
> this way so I think this is fine. But honestly don't really have an
> opinion here.
>
> Matt
In that case, I have no concerns with the current initialization.
>
>>
>>> +#endif
>>> +
>>> #define to_drm_sched_job(sched_job) \
>>> container_of((sched_job), struct drm_sched_job, queue_node)
>>> @@ -1272,7 +1278,12 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
>>> sched->submit_wq = submit_wq;
>>> sched->own_submit_wq = false;
>>> } else {
>>> +#ifdef CONFIG_LOCKDEP
>>> + sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, 0,
>>> + &drm_sched_lockdep_map);
>>> +#else
>>> sched->submit_wq = alloc_ordered_workqueue(name, 0);
>>> +#endif
>>> if (!sched->submit_wq)
>>> return -ENOMEM;
© 2016 - 2026 Red Hat, Inc.