Since the HPD IRQ handler directly notifies DRM core for hotplug events,
there is no need to maintain a separate event waitqueue.
Drop the event waitqueue and all related structs and helpers.
Signed-off-by: Jessica Zhang <jessica.zhang@oss.qualcomm.com>
---
drivers/gpu/drm/msm/dp/dp_display.c | 255 +-----------------------------------
1 file changed, 7 insertions(+), 248 deletions(-)
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index b9e2e368c4a8..eabd6e6981fb 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -52,27 +52,6 @@ enum {
ST_DISPLAY_OFF,
};
-enum {
- EV_NO_EVENT,
- /* hpd events */
- EV_HPD_PLUG_INT,
- EV_IRQ_HPD_INT,
- EV_HPD_UNPLUG_INT,
-};
-
-#define EVENT_TIMEOUT (HZ/10) /* 100ms */
-#define DP_EVENT_Q_MAX 8
-
-#define DP_TIMEOUT_NONE 0
-
-#define WAIT_FOR_RESUME_TIMEOUT_JIFFIES (HZ / 2)
-
-struct msm_dp_event {
- u32 event_id;
- u32 data;
- u32 delay;
-};
-
struct msm_dp_display_private {
int irq;
@@ -100,16 +79,7 @@ struct msm_dp_display_private {
spinlock_t irq_thread_lock;
u32 hpd_isr_status;
- /* event related only access by event thread */
- struct mutex event_mutex;
- wait_queue_head_t event_q;
u32 hpd_state;
- u32 event_pndx;
- u32 event_gndx;
- struct task_struct *ev_tsk;
- struct msm_dp_event event_list[DP_EVENT_Q_MAX];
- spinlock_t event_lock;
-
bool wide_bus_supported;
struct msm_dp_audio *audio;
@@ -212,60 +182,6 @@ static struct msm_dp_display_private *dev_get_dp_display_private(struct device *
return container_of(dp, struct msm_dp_display_private, msm_dp_display);
}
-static int msm_dp_add_event(struct msm_dp_display_private *msm_dp_priv, u32 event,
- u32 data, u32 delay)
-{
- unsigned long flag;
- struct msm_dp_event *todo;
- int pndx;
-
- spin_lock_irqsave(&msm_dp_priv->event_lock, flag);
- pndx = msm_dp_priv->event_pndx + 1;
- pndx %= DP_EVENT_Q_MAX;
- if (pndx == msm_dp_priv->event_gndx) {
- pr_err("event_q is full: pndx=%d gndx=%d\n",
- msm_dp_priv->event_pndx, msm_dp_priv->event_gndx);
- spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
- return -EPERM;
- }
- todo = &msm_dp_priv->event_list[msm_dp_priv->event_pndx++];
- msm_dp_priv->event_pndx %= DP_EVENT_Q_MAX;
- todo->event_id = event;
- todo->data = data;
- todo->delay = delay;
- wake_up(&msm_dp_priv->event_q);
- spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
-
- return 0;
-}
-
-static int msm_dp_del_event(struct msm_dp_display_private *msm_dp_priv, u32 event)
-{
- unsigned long flag;
- struct msm_dp_event *todo;
- u32 gndx;
-
- spin_lock_irqsave(&msm_dp_priv->event_lock, flag);
- if (msm_dp_priv->event_pndx == msm_dp_priv->event_gndx) {
- spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
- return -ENOENT;
- }
-
- gndx = msm_dp_priv->event_gndx;
- while (msm_dp_priv->event_pndx != gndx) {
- todo = &msm_dp_priv->event_list[gndx];
- if (todo->event_id == event) {
- todo->event_id = EV_NO_EVENT; /* deleted */
- todo->delay = 0;
- }
- gndx++;
- gndx %= DP_EVENT_Q_MAX;
- }
- spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
-
- return 0;
-}
-
void msm_dp_display_signal_audio_start(struct msm_dp *msm_dp_display)
{
struct msm_dp_display_private *dp;
@@ -284,8 +200,6 @@ void msm_dp_display_signal_audio_complete(struct msm_dp *msm_dp_display)
complete_all(&dp->audio_comp);
}
-static int msm_dp_hpd_event_thread_start(struct msm_dp_display_private *msm_dp_priv);
-
static int msm_dp_display_bind(struct device *dev, struct device *master,
void *data)
{
@@ -305,12 +219,6 @@ static int msm_dp_display_bind(struct device *dev, struct device *master,
goto end;
}
- rc = msm_dp_hpd_event_thread_start(dp);
- if (rc) {
- DRM_ERROR("Event thread create failed\n");
- goto end;
- }
-
return 0;
end:
return rc;
@@ -322,8 +230,6 @@ static void msm_dp_display_unbind(struct device *dev, struct device *master,
struct msm_dp_display_private *dp = dev_get_dp_display_private(dev);
struct msm_drm_private *priv = dev_get_drvdata(master);
- kthread_stop(dp->ev_tsk);
-
of_dp_aux_depopulate_bus(dp->aux);
msm_dp_aux_unregister(dp->aux);
@@ -585,33 +491,22 @@ static int msm_dp_hpd_plug_handle(struct msm_dp_display_private *dp, u32 data)
msm_dp_aux_enable_xfers(dp->aux, true);
- mutex_lock(&dp->event_mutex);
-
state = dp->hpd_state;
drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
dp->msm_dp_display.connector_type, state);
- if (state == ST_DISPLAY_OFF) {
- mutex_unlock(&dp->event_mutex);
+ if (state == ST_DISPLAY_OFF)
return 0;
- }
- if (state == ST_MAINLINK_READY || state == ST_CONNECTED) {
- mutex_unlock(&dp->event_mutex);
+ if (state == ST_MAINLINK_READY || state == ST_CONNECTED)
return 0;
- }
- if (state == ST_DISCONNECT_PENDING) {
- /* wait until ST_DISCONNECTED */
- msm_dp_add_event(dp, EV_HPD_PLUG_INT, 0, 1); /* delay = 1 */
- mutex_unlock(&dp->event_mutex);
+ if (state == ST_DISCONNECT_PENDING)
return 0;
- }
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret) {
DRM_ERROR("failed to pm_runtime_resume\n");
- mutex_unlock(&dp->event_mutex);
return ret;
}
@@ -625,7 +520,6 @@ static int msm_dp_hpd_plug_handle(struct msm_dp_display_private *dp, u32 data)
drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
dp->msm_dp_display.connector_type, state);
- mutex_unlock(&dp->event_mutex);
/* uevent will complete connection part */
return 0;
@@ -652,26 +546,19 @@ static int msm_dp_hpd_unplug_handle(struct msm_dp_display_private *dp, u32 data)
msm_dp_aux_enable_xfers(dp->aux, false);
- mutex_lock(&dp->event_mutex);
-
state = dp->hpd_state;
drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
dp->msm_dp_display.connector_type, state);
- /* unplugged, no more irq_hpd handle */
- msm_dp_del_event(dp, EV_IRQ_HPD_INT);
-
if (state == ST_DISCONNECTED) {
/* triggered by irq_hdp with sink_count = 0 */
if (dp->link->sink_count == 0) {
msm_dp_display_host_phy_exit(dp);
}
msm_dp_display_notify_disconnect(&dp->msm_dp_display.pdev->dev);
- mutex_unlock(&dp->event_mutex);
return 0;
} else if (state == ST_DISCONNECT_PENDING) {
- mutex_unlock(&dp->event_mutex);
return 0;
} else if (state == ST_MAINLINK_READY) {
msm_dp_ctrl_off_link(dp->ctrl);
@@ -679,7 +566,6 @@ static int msm_dp_hpd_unplug_handle(struct msm_dp_display_private *dp, u32 data)
dp->hpd_state = ST_DISCONNECTED;
msm_dp_display_notify_disconnect(&dp->msm_dp_display.pdev->dev);
pm_runtime_put_sync(&pdev->dev);
- mutex_unlock(&dp->event_mutex);
return 0;
}
@@ -703,7 +589,6 @@ static int msm_dp_hpd_unplug_handle(struct msm_dp_display_private *dp, u32 data)
/* uevent will complete disconnection part */
pm_runtime_put_sync(&pdev->dev);
- mutex_unlock(&dp->event_mutex);
return 0;
}
@@ -711,32 +596,22 @@ static int msm_dp_irq_hpd_handle(struct msm_dp_display_private *dp, u32 data)
{
u32 state;
- mutex_lock(&dp->event_mutex);
-
/* irq_hpd can happen at either connected or disconnected state */
state = dp->hpd_state;
drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
dp->msm_dp_display.connector_type, state);
- if (state == ST_DISPLAY_OFF) {
- mutex_unlock(&dp->event_mutex);
+ if (state == ST_DISPLAY_OFF)
return 0;
- }
- if (state == ST_MAINLINK_READY || state == ST_DISCONNECT_PENDING) {
- /* wait until ST_CONNECTED */
- msm_dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
- mutex_unlock(&dp->event_mutex);
+ if (state == ST_MAINLINK_READY || state == ST_DISCONNECT_PENDING)
return 0;
- }
msm_dp_display_usbpd_attention_cb(&dp->msm_dp_display.pdev->dev);
drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
dp->msm_dp_display.connector_type, state);
- mutex_unlock(&dp->event_mutex);
-
return 0;
}
@@ -1013,12 +888,8 @@ void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
* power_on status before dumping DP registers to avoid crash due
* to unclocked access
*/
- mutex_lock(&msm_dp_display->event_mutex);
-
- if (!dp->power_on) {
- mutex_unlock(&msm_dp_display->event_mutex);
+ if (!dp->power_on)
return;
- }
msm_disp_snapshot_add_block(disp_state, msm_dp_display->ahb_len,
msm_dp_display->ahb_base, "dp_ahb");
@@ -1028,8 +899,6 @@ void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
msm_dp_display->link_base, "dp_link");
msm_disp_snapshot_add_block(disp_state, msm_dp_display->p0_len,
msm_dp_display->p0_base, "dp_p0");
-
- mutex_unlock(&msm_dp_display->event_mutex);
}
void msm_dp_display_set_psr(struct msm_dp *msm_dp_display, bool enter)
@@ -1045,96 +914,6 @@ void msm_dp_display_set_psr(struct msm_dp *msm_dp_display, bool enter)
msm_dp_ctrl_set_psr(dp->ctrl, enter);
}
-static int hpd_event_thread(void *data)
-{
- struct msm_dp_display_private *msm_dp_priv;
- unsigned long flag;
- struct msm_dp_event *todo;
- int timeout_mode = 0;
-
- msm_dp_priv = (struct msm_dp_display_private *)data;
-
- while (1) {
- if (timeout_mode) {
- wait_event_timeout(msm_dp_priv->event_q,
- (msm_dp_priv->event_pndx == msm_dp_priv->event_gndx) ||
- kthread_should_stop(), EVENT_TIMEOUT);
- } else {
- wait_event_interruptible(msm_dp_priv->event_q,
- (msm_dp_priv->event_pndx != msm_dp_priv->event_gndx) ||
- kthread_should_stop());
- }
-
- if (kthread_should_stop())
- break;
-
- spin_lock_irqsave(&msm_dp_priv->event_lock, flag);
- todo = &msm_dp_priv->event_list[msm_dp_priv->event_gndx];
- if (todo->delay) {
- struct msm_dp_event *todo_next;
-
- msm_dp_priv->event_gndx++;
- msm_dp_priv->event_gndx %= DP_EVENT_Q_MAX;
-
- /* re enter delay event into q */
- todo_next = &msm_dp_priv->event_list[msm_dp_priv->event_pndx++];
- msm_dp_priv->event_pndx %= DP_EVENT_Q_MAX;
- todo_next->event_id = todo->event_id;
- todo_next->data = todo->data;
- todo_next->delay = todo->delay - 1;
-
- /* clean up older event */
- todo->event_id = EV_NO_EVENT;
- todo->delay = 0;
-
- /* switch to timeout mode */
- timeout_mode = 1;
- spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
- continue;
- }
-
- /* timeout with no events in q */
- if (msm_dp_priv->event_pndx == msm_dp_priv->event_gndx) {
- spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
- continue;
- }
-
- msm_dp_priv->event_gndx++;
- msm_dp_priv->event_gndx %= DP_EVENT_Q_MAX;
- timeout_mode = 0;
- spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
-
- switch (todo->event_id) {
- case EV_HPD_PLUG_INT:
- msm_dp_hpd_plug_handle(msm_dp_priv, todo->data);
- break;
- case EV_HPD_UNPLUG_INT:
- msm_dp_hpd_unplug_handle(msm_dp_priv, todo->data);
- break;
- case EV_IRQ_HPD_INT:
- msm_dp_irq_hpd_handle(msm_dp_priv, todo->data);
- break;
- default:
- break;
- }
- }
-
- return 0;
-}
-
-static int msm_dp_hpd_event_thread_start(struct msm_dp_display_private *msm_dp_priv)
-{
- /* set event q to empty */
- msm_dp_priv->event_gndx = 0;
- msm_dp_priv->event_pndx = 0;
-
- msm_dp_priv->ev_tsk = kthread_run(hpd_event_thread, msm_dp_priv, "dp_hpd_handler");
- if (IS_ERR(msm_dp_priv->ev_tsk))
- return PTR_ERR(msm_dp_priv->ev_tsk);
-
- return 0;
-}
-
/**
* msm_dp_bridge_detect - callback to determine if connector is connected
* @bridge: Pointer to drm bridge structure
@@ -1455,11 +1234,6 @@ static int msm_dp_display_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- /* setup event q */
- mutex_init(&dp->event_mutex);
- init_waitqueue_head(&dp->event_q);
- spin_lock_init(&dp->event_lock);
-
/* Store DP audio handle inside DP display */
dp->msm_dp_display.msm_dp_audio = dp->audio;
@@ -1667,23 +1441,18 @@ void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
if (dp->is_edp)
msm_dp_hpd_plug_handle(msm_dp_display, 0);
- mutex_lock(&msm_dp_display->event_mutex);
if (pm_runtime_resume_and_get(&dp->pdev->dev)) {
DRM_ERROR("failed to pm_runtime_resume\n");
- mutex_unlock(&msm_dp_display->event_mutex);
return;
}
hpd_state = msm_dp_display->hpd_state;
- if (hpd_state != ST_DISPLAY_OFF && hpd_state != ST_MAINLINK_READY) {
- mutex_unlock(&msm_dp_display->event_mutex);
+ if (hpd_state != ST_DISPLAY_OFF && hpd_state != ST_MAINLINK_READY)
return;
- }
rc = msm_dp_display_set_mode(dp, &msm_dp_display->msm_dp_mode);
if (rc) {
DRM_ERROR("Failed to perform a mode set, rc=%d\n", rc);
- mutex_unlock(&msm_dp_display->event_mutex);
return;
}
@@ -1712,7 +1481,6 @@ void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
msm_dp_display->hpd_state = ST_CONNECTED;
drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type);
- mutex_unlock(&msm_dp_display->event_mutex);
}
void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
@@ -1740,8 +1508,6 @@ void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
if (dp->is_edp)
msm_dp_hpd_unplug_handle(msm_dp_display, 0);
- mutex_lock(&msm_dp_display->event_mutex);
-
hpd_state = msm_dp_display->hpd_state;
if (hpd_state != ST_DISCONNECT_PENDING && hpd_state != ST_CONNECTED)
drm_dbg_dp(dp->drm_dev, "type=%d wrong hpd_state=%d\n",
@@ -1760,7 +1526,6 @@ void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type);
pm_runtime_put_sync(&dp->pdev->dev);
- mutex_unlock(&msm_dp_display->event_mutex);
}
void msm_dp_bridge_mode_set(struct drm_bridge *drm_bridge,
@@ -1816,10 +1581,8 @@ void msm_dp_bridge_hpd_enable(struct drm_bridge *bridge)
* step-4: DP PHY is initialized at plugin handler before link training
*
*/
- mutex_lock(&dp->event_mutex);
if (pm_runtime_resume_and_get(&msm_dp_display->pdev->dev)) {
DRM_ERROR("failed to resume power\n");
- mutex_unlock(&dp->event_mutex);
return;
}
@@ -1827,7 +1590,6 @@ void msm_dp_bridge_hpd_enable(struct drm_bridge *bridge)
msm_dp_aux_hpd_intr_enable(dp->aux);
msm_dp_display->internal_hpd = true;
- mutex_unlock(&dp->event_mutex);
}
void msm_dp_bridge_hpd_disable(struct drm_bridge *bridge)
@@ -1836,15 +1598,12 @@ void msm_dp_bridge_hpd_disable(struct drm_bridge *bridge)
struct msm_dp *msm_dp_display = msm_dp_bridge->msm_dp_display;
struct msm_dp_display_private *dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
- mutex_lock(&dp->event_mutex);
-
msm_dp_aux_hpd_intr_disable(dp->aux);
msm_dp_aux_hpd_disable(dp->aux);
msm_dp_display->internal_hpd = false;
pm_runtime_put_sync(&msm_dp_display->pdev->dev);
- mutex_unlock(&dp->event_mutex);
}
void msm_dp_bridge_hpd_notify(struct drm_bridge *bridge,
--
2.50.1
On Fri, Aug 08, 2025 at 05:35:20PM -0700, Jessica Zhang wrote:
> Since the HPD IRQ handler directly notifies DRM core for hotplug events,
> there is no need to maintain a separate event waitqueue.
>
> Drop the event waitqueue and all related structs and helpers.
>
> Signed-off-by: Jessica Zhang <jessica.zhang@oss.qualcomm.com>
> ---
> drivers/gpu/drm/msm/dp/dp_display.c | 255 +-----------------------------------
> 1 file changed, 7 insertions(+), 248 deletions(-)
>
> diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
> index b9e2e368c4a8..eabd6e6981fb 100644
> --- a/drivers/gpu/drm/msm/dp/dp_display.c
> +++ b/drivers/gpu/drm/msm/dp/dp_display.c
> @@ -52,27 +52,6 @@ enum {
> ST_DISPLAY_OFF,
> };
>
> -enum {
> - EV_NO_EVENT,
> - /* hpd events */
> - EV_HPD_PLUG_INT,
> - EV_IRQ_HPD_INT,
> - EV_HPD_UNPLUG_INT,
> -};
> -
> -#define EVENT_TIMEOUT (HZ/10) /* 100ms */
> -#define DP_EVENT_Q_MAX 8
> -
> -#define DP_TIMEOUT_NONE 0
> -
> -#define WAIT_FOR_RESUME_TIMEOUT_JIFFIES (HZ / 2)
> -
> -struct msm_dp_event {
> - u32 event_id;
> - u32 data;
> - u32 delay;
> -};
> -
> struct msm_dp_display_private {
> int irq;
>
> @@ -100,16 +79,7 @@ struct msm_dp_display_private {
> spinlock_t irq_thread_lock;
> u32 hpd_isr_status;
>
> - /* event related only access by event thread */
> - struct mutex event_mutex;
> - wait_queue_head_t event_q;
> u32 hpd_state;
> - u32 event_pndx;
> - u32 event_gndx;
> - struct task_struct *ev_tsk;
> - struct msm_dp_event event_list[DP_EVENT_Q_MAX];
> - spinlock_t event_lock;
> -
> bool wide_bus_supported;
>
> struct msm_dp_audio *audio;
> @@ -212,60 +182,6 @@ static struct msm_dp_display_private *dev_get_dp_display_private(struct device *
> return container_of(dp, struct msm_dp_display_private, msm_dp_display);
> }
>
> -static int msm_dp_add_event(struct msm_dp_display_private *msm_dp_priv, u32 event,
> - u32 data, u32 delay)
> -{
> - unsigned long flag;
> - struct msm_dp_event *todo;
> - int pndx;
> -
> - spin_lock_irqsave(&msm_dp_priv->event_lock, flag);
> - pndx = msm_dp_priv->event_pndx + 1;
> - pndx %= DP_EVENT_Q_MAX;
> - if (pndx == msm_dp_priv->event_gndx) {
> - pr_err("event_q is full: pndx=%d gndx=%d\n",
> - msm_dp_priv->event_pndx, msm_dp_priv->event_gndx);
> - spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
> - return -EPERM;
> - }
> - todo = &msm_dp_priv->event_list[msm_dp_priv->event_pndx++];
> - msm_dp_priv->event_pndx %= DP_EVENT_Q_MAX;
> - todo->event_id = event;
> - todo->data = data;
> - todo->delay = delay;
> - wake_up(&msm_dp_priv->event_q);
> - spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
> -
> - return 0;
> -}
> -
> -static int msm_dp_del_event(struct msm_dp_display_private *msm_dp_priv, u32 event)
> -{
> - unsigned long flag;
> - struct msm_dp_event *todo;
> - u32 gndx;
> -
> - spin_lock_irqsave(&msm_dp_priv->event_lock, flag);
> - if (msm_dp_priv->event_pndx == msm_dp_priv->event_gndx) {
> - spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
> - return -ENOENT;
> - }
> -
> - gndx = msm_dp_priv->event_gndx;
> - while (msm_dp_priv->event_pndx != gndx) {
> - todo = &msm_dp_priv->event_list[gndx];
> - if (todo->event_id == event) {
> - todo->event_id = EV_NO_EVENT; /* deleted */
> - todo->delay = 0;
> - }
> - gndx++;
> - gndx %= DP_EVENT_Q_MAX;
> - }
> - spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
> -
> - return 0;
> -}
> -
> void msm_dp_display_signal_audio_start(struct msm_dp *msm_dp_display)
> {
> struct msm_dp_display_private *dp;
> @@ -284,8 +200,6 @@ void msm_dp_display_signal_audio_complete(struct msm_dp *msm_dp_display)
> complete_all(&dp->audio_comp);
> }
>
> -static int msm_dp_hpd_event_thread_start(struct msm_dp_display_private *msm_dp_priv);
> -
> static int msm_dp_display_bind(struct device *dev, struct device *master,
> void *data)
> {
> @@ -305,12 +219,6 @@ static int msm_dp_display_bind(struct device *dev, struct device *master,
> goto end;
> }
>
> - rc = msm_dp_hpd_event_thread_start(dp);
> - if (rc) {
> - DRM_ERROR("Event thread create failed\n");
> - goto end;
> - }
> -
> return 0;
> end:
> return rc;
> @@ -322,8 +230,6 @@ static void msm_dp_display_unbind(struct device *dev, struct device *master,
> struct msm_dp_display_private *dp = dev_get_dp_display_private(dev);
> struct msm_drm_private *priv = dev_get_drvdata(master);
>
> - kthread_stop(dp->ev_tsk);
> -
> of_dp_aux_depopulate_bus(dp->aux);
>
> msm_dp_aux_unregister(dp->aux);
> @@ -585,33 +491,22 @@ static int msm_dp_hpd_plug_handle(struct msm_dp_display_private *dp, u32 data)
>
> msm_dp_aux_enable_xfers(dp->aux, true);
>
> - mutex_lock(&dp->event_mutex);
I think the event_mutex should be kept as is. Otherwise we don't have
protection against delivering the next event while we still process the
previous one.
> -
> state = dp->hpd_state;
> drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
> dp->msm_dp_display.connector_type, state);
>
--
With best wishes
Dmitry
© 2016 - 2026 Red Hat, Inc.