[PATCH v7 5/5] drm/amdgpu: Make use of drm_wedge_task_info

André Almeida posted 5 patches 3 months, 4 weeks ago
There is a newer version of this series
[PATCH v7 5/5] drm/amdgpu: Make use of drm_wedge_task_info
Posted by André Almeida 3 months, 4 weeks ago
To notify userspace about which task (if any) made the device get in a
wedge state, make use of drm_wedge_task_info parameter, filling it with
the task PID and name.

Signed-off-by: André Almeida <andrealmeid@igalia.com>
---
v7:
 - Remove struct cast, now we can use `info = &ti->task`
 - Fix struct lifetime, move amdgpu_vm_put_task_info() after
   drm_dev_wedged_event() call
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 17 +++++++++++++++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    |  8 ++++++--
 2 files changed, 21 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 8a0f36f33f13..67cff53678e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -6363,8 +6363,21 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
 	atomic_set(&adev->reset_domain->reset_res, r);
 
-	if (!r)
-		drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
+	if (!r) {
+		struct drm_wedge_task_info *info = NULL;
+		struct amdgpu_task_info *ti = NULL;
+
+		if (job) {
+			ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
+			if (ti)
+				info = &ti->task;
+		}
+
+		drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, info);
+
+		if (ti)
+			amdgpu_vm_put_task_info(ti);
+	}
 
 	return r;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 0c1381b527fe..f061f691f556 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -89,6 +89,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
 {
 	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
 	struct amdgpu_job *job = to_amdgpu_job(s_job);
+	struct drm_wedge_task_info *info = NULL;
 	struct amdgpu_task_info *ti;
 	struct amdgpu_device *adev = ring->adev;
 	int idx;
@@ -125,7 +126,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
 	ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid);
 	if (ti) {
 		amdgpu_vm_print_task_info(adev, ti);
-		amdgpu_vm_put_task_info(ti);
+		info = &ti->task;
 	}
 
 	/* attempt a per ring reset */
@@ -164,13 +165,16 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
 			if (amdgpu_ring_sched_ready(ring))
 				drm_sched_start(&ring->sched, 0);
 			dev_err(adev->dev, "Ring %s reset succeeded\n", ring->sched.name);
-			drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
+			drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, info);
 			goto exit;
 		}
 		dev_err(adev->dev, "Ring %s reset failure\n", ring->sched.name);
 	}
 	dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
 
+	if (ti)
+		amdgpu_vm_put_task_info(ti);
+
 	if (amdgpu_device_should_recover_gpu(ring->adev)) {
 		struct amdgpu_reset_context reset_context;
 		memset(&reset_context, 0, sizeof(reset_context));
-- 
2.49.0

Re: [PATCH v7 5/5] drm/amdgpu: Make use of drm_wedge_task_info
Posted by Christian König 3 months, 3 weeks ago
On 6/13/25 20:43, André Almeida wrote:
> To notify userspace about which task (if any) made the device get in a
> wedge state, make use of drm_wedge_task_info parameter, filling it with
> the task PID and name.
> 
> Signed-off-by: André Almeida <andrealmeid@igalia.com>
> ---
> v7:
>  - Remove struct cast, now we can use `info = &ti->task`
>  - Fix struct lifetime, move amdgpu_vm_put_task_info() after
>    drm_dev_wedged_event() call
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 17 +++++++++++++++--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    |  8 ++++++--
>  2 files changed, 21 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 8a0f36f33f13..67cff53678e1 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -6363,8 +6363,21 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>  
>  	atomic_set(&adev->reset_domain->reset_res, r);
>  
> -	if (!r)
> -		drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
> +	if (!r) {
> +		struct drm_wedge_task_info *info = NULL;
> +		struct amdgpu_task_info *ti = NULL;
> +
> +		if (job) {
> +			ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
> +			if (ti)
> +				info = &ti->task;

Drop the local variable and write that as ti ? &ti->task : NULL

> +		}
> +
> +		drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, info);

here.

> +
> +		if (ti)
> +			amdgpu_vm_put_task_info(ti);

As rule of thumb *put* and *free* functions in the Linux kernel usually accept NULL as parameter.

It would probably be better if we do that for amdgpu_vm_put_task_info() as well and drop the extra check.

Apart from that looks good to me.

Regards,
Christian.

> +	}
>  
>  	return r;
>  }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index 0c1381b527fe..f061f691f556 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -89,6 +89,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
>  {
>  	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
>  	struct amdgpu_job *job = to_amdgpu_job(s_job);
> +	struct drm_wedge_task_info *info = NULL;
>  	struct amdgpu_task_info *ti;
>  	struct amdgpu_device *adev = ring->adev;
>  	int idx;
> @@ -125,7 +126,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
>  	ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid);
>  	if (ti) {
>  		amdgpu_vm_print_task_info(adev, ti);
> -		amdgpu_vm_put_task_info(ti);
> +		info = &ti->task;
>  	}
>  
>  	/* attempt a per ring reset */
> @@ -164,13 +165,16 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
>  			if (amdgpu_ring_sched_ready(ring))
>  				drm_sched_start(&ring->sched, 0);
>  			dev_err(adev->dev, "Ring %s reset succeeded\n", ring->sched.name);
> -			drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
> +			drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, info);
>  			goto exit;
>  		}
>  		dev_err(adev->dev, "Ring %s reset failure\n", ring->sched.name);
>  	}
>  	dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
>  
> +	if (ti)
> +		amdgpu_vm_put_task_info(ti);
> +
>  	if (amdgpu_device_should_recover_gpu(ring->adev)) {
>  		struct amdgpu_reset_context reset_context;
>  		memset(&reset_context, 0, sizeof(reset_context));

Re: [PATCH v7 5/5] drm/amdgpu: Make use of drm_wedge_task_info
Posted by Alex Deucher 3 months, 4 weeks ago
On Fri, Jun 13, 2025 at 2:44 PM André Almeida <andrealmeid@igalia.com> wrote:
>
> To notify userspace about which task (if any) made the device get in a
> wedge state, make use of drm_wedge_task_info parameter, filling it with
> the task PID and name.
>
> Signed-off-by: André Almeida <andrealmeid@igalia.com>

If you want the guilty state to be reliably correct for GC, you'll
need this patch:
https://lists.freedesktop.org/archives/amd-gfx/2025-June/125715.html
GC is pipelined, so the hardware will start working on subsequent jobs
before prior submissions are complete.  This can lead to subsequent
jobs causing a hang which gets attributed to a prior job.  With that
patch in place, the driver will force a fence wait between jobs from
different contexts to ensure they are serialized.

Alex

> ---
> v7:
>  - Remove struct cast, now we can use `info = &ti->task`
>  - Fix struct lifetime, move amdgpu_vm_put_task_info() after
>    drm_dev_wedged_event() call
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 17 +++++++++++++++--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    |  8 ++++++--
>  2 files changed, 21 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 8a0f36f33f13..67cff53678e1 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -6363,8 +6363,21 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>
>         atomic_set(&adev->reset_domain->reset_res, r);
>
> -       if (!r)
> -               drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
> +       if (!r) {
> +               struct drm_wedge_task_info *info = NULL;
> +               struct amdgpu_task_info *ti = NULL;
> +
> +               if (job) {
> +                       ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
> +                       if (ti)
> +                               info = &ti->task;
> +               }
> +
> +               drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, info);
> +
> +               if (ti)
> +                       amdgpu_vm_put_task_info(ti);
> +       }
>
>         return r;
>  }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index 0c1381b527fe..f061f691f556 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -89,6 +89,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
>  {
>         struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
>         struct amdgpu_job *job = to_amdgpu_job(s_job);
> +       struct drm_wedge_task_info *info = NULL;
>         struct amdgpu_task_info *ti;
>         struct amdgpu_device *adev = ring->adev;
>         int idx;
> @@ -125,7 +126,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
>         ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid);
>         if (ti) {
>                 amdgpu_vm_print_task_info(adev, ti);
> -               amdgpu_vm_put_task_info(ti);
> +               info = &ti->task;
>         }
>
>         /* attempt a per ring reset */
> @@ -164,13 +165,16 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
>                         if (amdgpu_ring_sched_ready(ring))
>                                 drm_sched_start(&ring->sched, 0);
>                         dev_err(adev->dev, "Ring %s reset succeeded\n", ring->sched.name);
> -                       drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
> +                       drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, info);
>                         goto exit;
>                 }
>                 dev_err(adev->dev, "Ring %s reset failure\n", ring->sched.name);
>         }
>         dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
>
> +       if (ti)
> +               amdgpu_vm_put_task_info(ti);
> +
>         if (amdgpu_device_should_recover_gpu(ring->adev)) {
>                 struct amdgpu_reset_context reset_context;
>                 memset(&reset_context, 0, sizeof(reset_context));
> --
> 2.49.0
>