The global workqueue is only used for vblanks inside KMS code. Move
allocation / flushing / deallcation of it to msm_kms.c
Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
---
drivers/gpu/drm/msm/msm_drv.c | 21 ++-------------------
drivers/gpu/drm/msm/msm_kms.c | 16 +++++++++++++++-
2 files changed, 17 insertions(+), 20 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c3588dc9e53764a27efda1901b094724cec8928a..02beb40eb9146941aa43862d07a6d82ae21c965e 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -82,13 +82,6 @@ static int msm_drm_uninit(struct device *dev)
drm_atomic_helper_shutdown(ddev);
}
- /* We must cancel and cleanup any pending vblank enable/disable
- * work before msm_irq_uninstall() to avoid work re-enabling an
- * irq after uninstall has disabled it.
- */
-
- flush_workqueue(priv->wq);
-
msm_gem_shrinker_cleanup(ddev);
msm_perf_debugfs_cleanup(priv);
@@ -104,8 +97,6 @@ static int msm_drm_uninit(struct device *dev)
ddev->dev_private = NULL;
drm_dev_put(ddev);
- destroy_workqueue(priv->wq);
-
return 0;
}
@@ -227,12 +218,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
ddev->dev_private = priv;
priv->dev = ddev;
- priv->wq = alloc_ordered_workqueue("msm", 0);
- if (!priv->wq) {
- ret = -ENOMEM;
- goto err_put_dev;
- }
-
INIT_LIST_HEAD(&priv->objects);
mutex_init(&priv->obj_lock);
@@ -253,12 +238,12 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (priv->kms_init) {
ret = drmm_mode_config_init(ddev);
if (ret)
- goto err_destroy_wq;
+ goto err_put_dev;
}
ret = msm_init_vram(ddev);
if (ret)
- goto err_destroy_wq;
+ goto err_put_dev;
dma_set_max_seg_size(dev, UINT_MAX);
@@ -304,8 +289,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
err_deinit_vram:
msm_deinit_vram(ddev);
-err_destroy_wq:
- destroy_workqueue(priv->wq);
err_put_dev:
drm_dev_put(ddev);
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index 35d5397e73b4c5cb90b1770e8570277e782be7ec..821f0b9f968fc3d448e612bfae04639ceb770353 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -227,6 +227,13 @@ void msm_drm_kms_uninit(struct device *dev)
BUG_ON(!kms);
+ /* We must cancel and cleanup any pending vblank enable/disable
+ * work before msm_irq_uninstall() to avoid work re-enabling an
+ * irq after uninstall has disabled it.
+ */
+
+ flush_workqueue(priv->wq);
+
/* clean up event worker threads */
for (i = 0; i < priv->num_crtcs; i++) {
if (priv->event_thread[i].worker)
@@ -243,6 +250,8 @@ void msm_drm_kms_uninit(struct device *dev)
if (kms && kms->funcs)
kms->funcs->destroy(kms);
+
+ destroy_workqueue(priv->wq);
}
int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
@@ -258,10 +267,14 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
if (ret)
return ret;
+ priv->wq = alloc_ordered_workqueue("msm", 0);
+ if (!priv->wq)
+ return -ENOMEM;
+
ret = priv->kms_init(ddev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to load kms\n");
- return ret;
+ goto err_msm_uninit;
}
/* Enable normalization of plane zpos */
@@ -319,6 +332,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
return 0;
err_msm_uninit:
+ destroy_workqueue(priv->wq);
return ret;
}
--
2.39.5
On Sun, Apr 13, 2025 at 9:33 AM Dmitry Baryshkov
<dmitry.baryshkov@oss.qualcomm.com> wrote:
>
> The global workqueue is only used for vblanks inside KMS code. Move
> allocation / flushing / deallcation of it to msm_kms.c
Maybe we should also just move the wq into struct msm_kms?
BR,
-R
>
> Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
> ---
> drivers/gpu/drm/msm/msm_drv.c | 21 ++-------------------
> drivers/gpu/drm/msm/msm_kms.c | 16 +++++++++++++++-
> 2 files changed, 17 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
> index c3588dc9e53764a27efda1901b094724cec8928a..02beb40eb9146941aa43862d07a6d82ae21c965e 100644
> --- a/drivers/gpu/drm/msm/msm_drv.c
> +++ b/drivers/gpu/drm/msm/msm_drv.c
> @@ -82,13 +82,6 @@ static int msm_drm_uninit(struct device *dev)
> drm_atomic_helper_shutdown(ddev);
> }
>
> - /* We must cancel and cleanup any pending vblank enable/disable
> - * work before msm_irq_uninstall() to avoid work re-enabling an
> - * irq after uninstall has disabled it.
> - */
> -
> - flush_workqueue(priv->wq);
> -
> msm_gem_shrinker_cleanup(ddev);
>
> msm_perf_debugfs_cleanup(priv);
> @@ -104,8 +97,6 @@ static int msm_drm_uninit(struct device *dev)
> ddev->dev_private = NULL;
> drm_dev_put(ddev);
>
> - destroy_workqueue(priv->wq);
> -
> return 0;
> }
>
> @@ -227,12 +218,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
> ddev->dev_private = priv;
> priv->dev = ddev;
>
> - priv->wq = alloc_ordered_workqueue("msm", 0);
> - if (!priv->wq) {
> - ret = -ENOMEM;
> - goto err_put_dev;
> - }
> -
> INIT_LIST_HEAD(&priv->objects);
> mutex_init(&priv->obj_lock);
>
> @@ -253,12 +238,12 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
> if (priv->kms_init) {
> ret = drmm_mode_config_init(ddev);
> if (ret)
> - goto err_destroy_wq;
> + goto err_put_dev;
> }
>
> ret = msm_init_vram(ddev);
> if (ret)
> - goto err_destroy_wq;
> + goto err_put_dev;
>
> dma_set_max_seg_size(dev, UINT_MAX);
>
> @@ -304,8 +289,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
>
> err_deinit_vram:
> msm_deinit_vram(ddev);
> -err_destroy_wq:
> - destroy_workqueue(priv->wq);
> err_put_dev:
> drm_dev_put(ddev);
>
> diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
> index 35d5397e73b4c5cb90b1770e8570277e782be7ec..821f0b9f968fc3d448e612bfae04639ceb770353 100644
> --- a/drivers/gpu/drm/msm/msm_kms.c
> +++ b/drivers/gpu/drm/msm/msm_kms.c
> @@ -227,6 +227,13 @@ void msm_drm_kms_uninit(struct device *dev)
>
> BUG_ON(!kms);
>
> + /* We must cancel and cleanup any pending vblank enable/disable
> + * work before msm_irq_uninstall() to avoid work re-enabling an
> + * irq after uninstall has disabled it.
> + */
> +
> + flush_workqueue(priv->wq);
> +
> /* clean up event worker threads */
> for (i = 0; i < priv->num_crtcs; i++) {
> if (priv->event_thread[i].worker)
> @@ -243,6 +250,8 @@ void msm_drm_kms_uninit(struct device *dev)
>
> if (kms && kms->funcs)
> kms->funcs->destroy(kms);
> +
> + destroy_workqueue(priv->wq);
> }
>
> int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
> @@ -258,10 +267,14 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
> if (ret)
> return ret;
>
> + priv->wq = alloc_ordered_workqueue("msm", 0);
> + if (!priv->wq)
> + return -ENOMEM;
> +
> ret = priv->kms_init(ddev);
> if (ret) {
> DRM_DEV_ERROR(dev, "failed to load kms\n");
> - return ret;
> + goto err_msm_uninit;
> }
>
> /* Enable normalization of plane zpos */
> @@ -319,6 +332,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
> return 0;
>
> err_msm_uninit:
> + destroy_workqueue(priv->wq);
> return ret;
> }
>
>
> --
> 2.39.5
>
On 14/04/2025 18:58, Rob Clark wrote:
> On Sun, Apr 13, 2025 at 9:33 AM Dmitry Baryshkov
> <dmitry.baryshkov@oss.qualcomm.com> wrote:
>>
>> The global workqueue is only used for vblanks inside KMS code. Move
>> allocation / flushing / deallcation of it to msm_kms.c
>
> Maybe we should also just move the wq into struct msm_kms?
... together with several other KMS-only fields. I will take a look.
>
> BR,
> -R
>
>>
>> Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
>> ---
>> drivers/gpu/drm/msm/msm_drv.c | 21 ++-------------------
>> drivers/gpu/drm/msm/msm_kms.c | 16 +++++++++++++++-
>> 2 files changed, 17 insertions(+), 20 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
>> index c3588dc9e53764a27efda1901b094724cec8928a..02beb40eb9146941aa43862d07a6d82ae21c965e 100644
>> --- a/drivers/gpu/drm/msm/msm_drv.c
>> +++ b/drivers/gpu/drm/msm/msm_drv.c
>> @@ -82,13 +82,6 @@ static int msm_drm_uninit(struct device *dev)
>> drm_atomic_helper_shutdown(ddev);
>> }
>>
>> - /* We must cancel and cleanup any pending vblank enable/disable
>> - * work before msm_irq_uninstall() to avoid work re-enabling an
>> - * irq after uninstall has disabled it.
>> - */
>> -
>> - flush_workqueue(priv->wq);
>> -
>> msm_gem_shrinker_cleanup(ddev);
>>
>> msm_perf_debugfs_cleanup(priv);
>> @@ -104,8 +97,6 @@ static int msm_drm_uninit(struct device *dev)
>> ddev->dev_private = NULL;
>> drm_dev_put(ddev);
>>
>> - destroy_workqueue(priv->wq);
>> -
>> return 0;
>> }
>>
>> @@ -227,12 +218,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
>> ddev->dev_private = priv;
>> priv->dev = ddev;
>>
>> - priv->wq = alloc_ordered_workqueue("msm", 0);
>> - if (!priv->wq) {
>> - ret = -ENOMEM;
>> - goto err_put_dev;
>> - }
>> -
>> INIT_LIST_HEAD(&priv->objects);
>> mutex_init(&priv->obj_lock);
>>
>> @@ -253,12 +238,12 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
>> if (priv->kms_init) {
>> ret = drmm_mode_config_init(ddev);
>> if (ret)
>> - goto err_destroy_wq;
>> + goto err_put_dev;
>> }
>>
>> ret = msm_init_vram(ddev);
>> if (ret)
>> - goto err_destroy_wq;
>> + goto err_put_dev;
>>
>> dma_set_max_seg_size(dev, UINT_MAX);
>>
>> @@ -304,8 +289,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
>>
>> err_deinit_vram:
>> msm_deinit_vram(ddev);
>> -err_destroy_wq:
>> - destroy_workqueue(priv->wq);
>> err_put_dev:
>> drm_dev_put(ddev);
>>
>> diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
>> index 35d5397e73b4c5cb90b1770e8570277e782be7ec..821f0b9f968fc3d448e612bfae04639ceb770353 100644
>> --- a/drivers/gpu/drm/msm/msm_kms.c
>> +++ b/drivers/gpu/drm/msm/msm_kms.c
>> @@ -227,6 +227,13 @@ void msm_drm_kms_uninit(struct device *dev)
>>
>> BUG_ON(!kms);
>>
>> + /* We must cancel and cleanup any pending vblank enable/disable
>> + * work before msm_irq_uninstall() to avoid work re-enabling an
>> + * irq after uninstall has disabled it.
>> + */
>> +
>> + flush_workqueue(priv->wq);
>> +
>> /* clean up event worker threads */
>> for (i = 0; i < priv->num_crtcs; i++) {
>> if (priv->event_thread[i].worker)
>> @@ -243,6 +250,8 @@ void msm_drm_kms_uninit(struct device *dev)
>>
>> if (kms && kms->funcs)
>> kms->funcs->destroy(kms);
>> +
>> + destroy_workqueue(priv->wq);
>> }
>>
>> int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
>> @@ -258,10 +267,14 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
>> if (ret)
>> return ret;
>>
>> + priv->wq = alloc_ordered_workqueue("msm", 0);
>> + if (!priv->wq)
>> + return -ENOMEM;
>> +
>> ret = priv->kms_init(ddev);
>> if (ret) {
>> DRM_DEV_ERROR(dev, "failed to load kms\n");
>> - return ret;
>> + goto err_msm_uninit;
>> }
>>
>> /* Enable normalization of plane zpos */
>> @@ -319,6 +332,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
>> return 0;
>>
>> err_msm_uninit:
>> + destroy_workqueue(priv->wq);
>> return ret;
>> }
>>
>>
>> --
>> 2.39.5
>>
--
With best wishes
Dmitry
On 4/15/2025 1:59 AM, Dmitry Baryshkov wrote:
> On 14/04/2025 18:58, Rob Clark wrote:
>> On Sun, Apr 13, 2025 at 9:33 AM Dmitry Baryshkov
>> <dmitry.baryshkov@oss.qualcomm.com> wrote:
>>>
>>> The global workqueue is only used for vblanks inside KMS code. Move
>>> allocation / flushing / deallcation of it to msm_kms.c
>>
>> Maybe we should also just move the wq into struct msm_kms?
>
> ... together with several other KMS-only fields. I will take a look.
>
Yeah the usages seem to be only within kms, so we can move this to msm_kms.
>>
>> BR,
>> -R
>>
>>>
>>> Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
>>> ---
>>> drivers/gpu/drm/msm/msm_drv.c | 21 ++-------------------
>>> drivers/gpu/drm/msm/msm_kms.c | 16 +++++++++++++++-
>>> 2 files changed, 17 insertions(+), 20 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/
>>> msm_drv.c
>>> index
>>> c3588dc9e53764a27efda1901b094724cec8928a..02beb40eb9146941aa43862d07a6d82ae21c965e 100644
>>> --- a/drivers/gpu/drm/msm/msm_drv.c
>>> +++ b/drivers/gpu/drm/msm/msm_drv.c
>>> @@ -82,13 +82,6 @@ static int msm_drm_uninit(struct device *dev)
>>> drm_atomic_helper_shutdown(ddev);
>>> }
>>>
>>> - /* We must cancel and cleanup any pending vblank enable/disable
>>> - * work before msm_irq_uninstall() to avoid work re-enabling an
>>> - * irq after uninstall has disabled it.
>>> - */
>>> -
>>> - flush_workqueue(priv->wq);
>>> -
>>> msm_gem_shrinker_cleanup(ddev);
>>>
>>> msm_perf_debugfs_cleanup(priv);
>>> @@ -104,8 +97,6 @@ static int msm_drm_uninit(struct device *dev)
>>> ddev->dev_private = NULL;
>>> drm_dev_put(ddev);
>>>
>>> - destroy_workqueue(priv->wq);
>>> -
>>> return 0;
>>> }
>>>
>>> @@ -227,12 +218,6 @@ static int msm_drm_init(struct device *dev,
>>> const struct drm_driver *drv)
>>> ddev->dev_private = priv;
>>> priv->dev = ddev;
>>>
>>> - priv->wq = alloc_ordered_workqueue("msm", 0);
>>> - if (!priv->wq) {
>>> - ret = -ENOMEM;
>>> - goto err_put_dev;
>>> - }
>>> -
>>> INIT_LIST_HEAD(&priv->objects);
>>> mutex_init(&priv->obj_lock);
>>>
>>> @@ -253,12 +238,12 @@ static int msm_drm_init(struct device *dev,
>>> const struct drm_driver *drv)
>>> if (priv->kms_init) {
>>> ret = drmm_mode_config_init(ddev);
>>> if (ret)
>>> - goto err_destroy_wq;
>>> + goto err_put_dev;
>>> }
>>>
>>> ret = msm_init_vram(ddev);
>>> if (ret)
>>> - goto err_destroy_wq;
>>> + goto err_put_dev;
>>>
>>> dma_set_max_seg_size(dev, UINT_MAX);
>>>
>>> @@ -304,8 +289,6 @@ static int msm_drm_init(struct device *dev, const
>>> struct drm_driver *drv)
>>>
>>> err_deinit_vram:
>>> msm_deinit_vram(ddev);
>>> -err_destroy_wq:
>>> - destroy_workqueue(priv->wq);
>>> err_put_dev:
>>> drm_dev_put(ddev);
>>>
>>> diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/
>>> msm_kms.c
>>> index
>>> 35d5397e73b4c5cb90b1770e8570277e782be7ec..821f0b9f968fc3d448e612bfae04639ceb770353 100644
>>> --- a/drivers/gpu/drm/msm/msm_kms.c
>>> +++ b/drivers/gpu/drm/msm/msm_kms.c
>>> @@ -227,6 +227,13 @@ void msm_drm_kms_uninit(struct device *dev)
>>>
>>> BUG_ON(!kms);
>>>
>>> + /* We must cancel and cleanup any pending vblank enable/disable
>>> + * work before msm_irq_uninstall() to avoid work re-enabling an
>>> + * irq after uninstall has disabled it.
>>> + */
>>> +
>>> + flush_workqueue(priv->wq);
>>> +
>>> /* clean up event worker threads */
>>> for (i = 0; i < priv->num_crtcs; i++) {
>>> if (priv->event_thread[i].worker)
>>> @@ -243,6 +250,8 @@ void msm_drm_kms_uninit(struct device *dev)
>>>
>>> if (kms && kms->funcs)
>>> kms->funcs->destroy(kms);
>>> +
>>> + destroy_workqueue(priv->wq);
>>> }
>>>
>>> int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
>>> @@ -258,10 +267,14 @@ int msm_drm_kms_init(struct device *dev, const
>>> struct drm_driver *drv)
>>> if (ret)
>>> return ret;
>>>
>>> + priv->wq = alloc_ordered_workqueue("msm", 0);
>>> + if (!priv->wq)
>>> + return -ENOMEM;
>>> +
>>> ret = priv->kms_init(ddev);
>>> if (ret) {
>>> DRM_DEV_ERROR(dev, "failed to load kms\n");
>>> - return ret;
>>> + goto err_msm_uninit;
>>> }
>>>
>>> /* Enable normalization of plane zpos */
>>> @@ -319,6 +332,7 @@ int msm_drm_kms_init(struct device *dev, const
>>> struct drm_driver *drv)
>>> return 0;
>>>
>>> err_msm_uninit:
>>> + destroy_workqueue(priv->wq);
>>> return ret;
>>> }
>>>
>>>
>>> --
>>> 2.39.5
>>>
>
>
© 2016 - 2025 Red Hat, Inc.