drivers/gpu/drm/panthor/panthor_mmu.c | 4 ++++ drivers/gpu/drm/panthor/panthor_sched.c | 14 ++++++++++++++ drivers/gpu/drm/panthor/panthor_sched.h | 1 + 3 files changed, 19 insertions(+)
Ensure all related groups are evicted and suspended before VM
destruction takes place.
This fixes an issue where panthor_vm_destroy() destroys and unmaps the
heap context while there are still on slot groups using this.
The FW will do a write out to the heap context when a CSG (group) is
suspended, so a premature unmap of the heap context will cause a
GPU page fault.
This page fault is quite harmless, and do not affect the continued
operation of the GPU.
Fixes: 647810ec2476 ("drm/panthor: Add the MMU/VM logical block")
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Co-developed-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Ketil Johnsen <ketil.johnsen@arm.com>
---
Changes in v2:
- Removed check for ptdev->scheduler
- R-b from Boris
- Link to v1: https://lore.kernel.org/all/20251218162644.828495-1-ketil.johnsen@arm.com/
---
drivers/gpu/drm/panthor/panthor_mmu.c | 4 ++++
drivers/gpu/drm/panthor/panthor_sched.c | 14 ++++++++++++++
drivers/gpu/drm/panthor/panthor_sched.h | 1 +
3 files changed, 19 insertions(+)
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 74230f7199121..0e4b301a9c70e 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -1537,6 +1537,10 @@ static void panthor_vm_destroy(struct panthor_vm *vm)
vm->destroyed = true;
+ /* Tell scheduler to stop all GPU work related to this VM */
+ if (refcount_read(&vm->as.active_cnt) > 0)
+ panthor_sched_prepare_for_vm_destruction(vm->ptdev);
+
mutex_lock(&vm->heaps.lock);
panthor_heap_pool_destroy(vm->heaps.pool);
vm->heaps.pool = NULL;
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index f680edcd40aad..a40ac94e5e989 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -2930,6 +2930,20 @@ void panthor_sched_report_mmu_fault(struct panthor_device *ptdev)
sched_queue_delayed_work(ptdev->scheduler, tick, 0);
}
+void panthor_sched_prepare_for_vm_destruction(struct panthor_device *ptdev)
+{
+ /* FW can write out internal state, like the heap context, during CSG
+ * suspend. It is therefore important that the scheduler has fully
+ * evicted any pending and related groups before VM destruction can
+ * safely continue. Failure to do so can lead to GPU page faults.
+ * A controlled termination of a Panthor instance involves destroying
+ * the group(s) before the VM. This means any relevant group eviction
+ * has already been initiated by this point, and we just need to
+ * ensure that any pending tick_work() has been completed.
+ */
+ flush_work(&ptdev->scheduler->tick_work.work);
+}
+
void panthor_sched_resume(struct panthor_device *ptdev)
{
/* Force a tick to re-evaluate after a resume. */
diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h
index f4a475aa34c0a..9a8692de8aded 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.h
+++ b/drivers/gpu/drm/panthor/panthor_sched.h
@@ -50,6 +50,7 @@ void panthor_sched_suspend(struct panthor_device *ptdev);
void panthor_sched_resume(struct panthor_device *ptdev);
void panthor_sched_report_mmu_fault(struct panthor_device *ptdev);
+void panthor_sched_prepare_for_vm_destruction(struct panthor_device *ptdev);
void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events);
void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile);
--
2.43.0
On 19/12/2025 09:35, Ketil Johnsen wrote:
> Ensure all related groups are evicted and suspended before VM
> destruction takes place.
>
> This fixes an issue where panthor_vm_destroy() destroys and unmaps the
> heap context while there are still on slot groups using this.
> The FW will do a write out to the heap context when a CSG (group) is
> suspended, so a premature unmap of the heap context will cause a
> GPU page fault.
> This page fault is quite harmless, and do not affect the continued
> operation of the GPU.
>
> Fixes: 647810ec2476 ("drm/panthor: Add the MMU/VM logical block")
> Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
> Co-developed-by: Boris Brezillon <boris.brezillon@collabora.com>
> Signed-off-by: Ketil Johnsen <ketil.johnsen@arm.com>
We're technically missing Boris's SoB here (Co-deleveloped-by requires a
SoB). Boris do you want to merge this (adding your SoB)?
Thanks,
Steve
> ---
> Changes in v2:
> - Removed check for ptdev->scheduler
> - R-b from Boris
> - Link to v1: https://lore.kernel.org/all/20251218162644.828495-1-ketil.johnsen@arm.com/
> ---
> drivers/gpu/drm/panthor/panthor_mmu.c | 4 ++++
> drivers/gpu/drm/panthor/panthor_sched.c | 14 ++++++++++++++
> drivers/gpu/drm/panthor/panthor_sched.h | 1 +
> 3 files changed, 19 insertions(+)
>
> diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
> index 74230f7199121..0e4b301a9c70e 100644
> --- a/drivers/gpu/drm/panthor/panthor_mmu.c
> +++ b/drivers/gpu/drm/panthor/panthor_mmu.c
> @@ -1537,6 +1537,10 @@ static void panthor_vm_destroy(struct panthor_vm *vm)
>
> vm->destroyed = true;
>
> + /* Tell scheduler to stop all GPU work related to this VM */
> + if (refcount_read(&vm->as.active_cnt) > 0)
> + panthor_sched_prepare_for_vm_destruction(vm->ptdev);
> +
> mutex_lock(&vm->heaps.lock);
> panthor_heap_pool_destroy(vm->heaps.pool);
> vm->heaps.pool = NULL;
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
> index f680edcd40aad..a40ac94e5e989 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.c
> +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> @@ -2930,6 +2930,20 @@ void panthor_sched_report_mmu_fault(struct panthor_device *ptdev)
> sched_queue_delayed_work(ptdev->scheduler, tick, 0);
> }
>
> +void panthor_sched_prepare_for_vm_destruction(struct panthor_device *ptdev)
> +{
> + /* FW can write out internal state, like the heap context, during CSG
> + * suspend. It is therefore important that the scheduler has fully
> + * evicted any pending and related groups before VM destruction can
> + * safely continue. Failure to do so can lead to GPU page faults.
> + * A controlled termination of a Panthor instance involves destroying
> + * the group(s) before the VM. This means any relevant group eviction
> + * has already been initiated by this point, and we just need to
> + * ensure that any pending tick_work() has been completed.
> + */
> + flush_work(&ptdev->scheduler->tick_work.work);
> +}
> +
> void panthor_sched_resume(struct panthor_device *ptdev)
> {
> /* Force a tick to re-evaluate after a resume. */
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h
> index f4a475aa34c0a..9a8692de8aded 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.h
> +++ b/drivers/gpu/drm/panthor/panthor_sched.h
> @@ -50,6 +50,7 @@ void panthor_sched_suspend(struct panthor_device *ptdev);
> void panthor_sched_resume(struct panthor_device *ptdev);
>
> void panthor_sched_report_mmu_fault(struct panthor_device *ptdev);
> +void panthor_sched_prepare_for_vm_destruction(struct panthor_device *ptdev);
> void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events);
>
> void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile);
On Fri, 19 Dec 2025 10:45:48 +0000
Steven Price <steven.price@arm.com> wrote:
> On 19/12/2025 09:35, Ketil Johnsen wrote:
> > Ensure all related groups are evicted and suspended before VM
> > destruction takes place.
> >
> > This fixes an issue where panthor_vm_destroy() destroys and unmaps the
> > heap context while there are still on slot groups using this.
> > The FW will do a write out to the heap context when a CSG (group) is
> > suspended, so a premature unmap of the heap context will cause a
> > GPU page fault.
> > This page fault is quite harmless, and do not affect the continued
> > operation of the GPU.
> >
> > Fixes: 647810ec2476 ("drm/panthor: Add the MMU/VM logical block")
> > Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
> > Co-developed-by: Boris Brezillon <boris.brezillon@collabora.com>
> > Signed-off-by: Ketil Johnsen <ketil.johnsen@arm.com>
>
> We're technically missing Boris's SoB here (Co-deleveloped-by requires a
> SoB). Boris do you want to merge this (adding your SoB)?
Done and queued to drm-misc-next, since I don't think the fix is super
urgent, and I'd rather avoid conflicts between misc-next and
misc-fixes when I can avoid them.
>
> Thanks,
> Steve
>
> > ---
> > Changes in v2:
> > - Removed check for ptdev->scheduler
> > - R-b from Boris
> > - Link to v1: https://lore.kernel.org/all/20251218162644.828495-1-ketil.johnsen@arm.com/
> > ---
> > drivers/gpu/drm/panthor/panthor_mmu.c | 4 ++++
> > drivers/gpu/drm/panthor/panthor_sched.c | 14 ++++++++++++++
> > drivers/gpu/drm/panthor/panthor_sched.h | 1 +
> > 3 files changed, 19 insertions(+)
> >
> > diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
> > index 74230f7199121..0e4b301a9c70e 100644
> > --- a/drivers/gpu/drm/panthor/panthor_mmu.c
> > +++ b/drivers/gpu/drm/panthor/panthor_mmu.c
> > @@ -1537,6 +1537,10 @@ static void panthor_vm_destroy(struct panthor_vm *vm)
> >
> > vm->destroyed = true;
> >
> > + /* Tell scheduler to stop all GPU work related to this VM */
> > + if (refcount_read(&vm->as.active_cnt) > 0)
> > + panthor_sched_prepare_for_vm_destruction(vm->ptdev);
> > +
> > mutex_lock(&vm->heaps.lock);
> > panthor_heap_pool_destroy(vm->heaps.pool);
> > vm->heaps.pool = NULL;
> > diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
> > index f680edcd40aad..a40ac94e5e989 100644
> > --- a/drivers/gpu/drm/panthor/panthor_sched.c
> > +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> > @@ -2930,6 +2930,20 @@ void panthor_sched_report_mmu_fault(struct panthor_device *ptdev)
> > sched_queue_delayed_work(ptdev->scheduler, tick, 0);
> > }
> >
> > +void panthor_sched_prepare_for_vm_destruction(struct panthor_device *ptdev)
> > +{
> > + /* FW can write out internal state, like the heap context, during CSG
> > + * suspend. It is therefore important that the scheduler has fully
> > + * evicted any pending and related groups before VM destruction can
> > + * safely continue. Failure to do so can lead to GPU page faults.
> > + * A controlled termination of a Panthor instance involves destroying
> > + * the group(s) before the VM. This means any relevant group eviction
> > + * has already been initiated by this point, and we just need to
> > + * ensure that any pending tick_work() has been completed.
> > + */
> > + flush_work(&ptdev->scheduler->tick_work.work);
> > +}
> > +
> > void panthor_sched_resume(struct panthor_device *ptdev)
> > {
> > /* Force a tick to re-evaluate after a resume. */
> > diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h
> > index f4a475aa34c0a..9a8692de8aded 100644
> > --- a/drivers/gpu/drm/panthor/panthor_sched.h
> > +++ b/drivers/gpu/drm/panthor/panthor_sched.h
> > @@ -50,6 +50,7 @@ void panthor_sched_suspend(struct panthor_device *ptdev);
> > void panthor_sched_resume(struct panthor_device *ptdev);
> >
> > void panthor_sched_report_mmu_fault(struct panthor_device *ptdev);
> > +void panthor_sched_prepare_for_vm_destruction(struct panthor_device *ptdev);
> > void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events);
> >
> > void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile);
>
On Fri, Dec 19, 2025 at 10:35:44AM +0100, Ketil Johnsen wrote:
> Ensure all related groups are evicted and suspended before VM
> destruction takes place.
>
> This fixes an issue where panthor_vm_destroy() destroys and unmaps the
> heap context while there are still on slot groups using this.
> The FW will do a write out to the heap context when a CSG (group) is
> suspended, so a premature unmap of the heap context will cause a
> GPU page fault.
> This page fault is quite harmless, and do not affect the continued
> operation of the GPU.
>
> Fixes: 647810ec2476 ("drm/panthor: Add the MMU/VM logical block")
> Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
> Co-developed-by: Boris Brezillon <boris.brezillon@collabora.com>
> Signed-off-by: Ketil Johnsen <ketil.johnsen@arm.com>
Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
Best regards,
Liviu
> ---
> Changes in v2:
> - Removed check for ptdev->scheduler
> - R-b from Boris
> - Link to v1: https://lore.kernel.org/all/20251218162644.828495-1-ketil.johnsen@arm.com/
> ---
> drivers/gpu/drm/panthor/panthor_mmu.c | 4 ++++
> drivers/gpu/drm/panthor/panthor_sched.c | 14 ++++++++++++++
> drivers/gpu/drm/panthor/panthor_sched.h | 1 +
> 3 files changed, 19 insertions(+)
>
> diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
> index 74230f7199121..0e4b301a9c70e 100644
> --- a/drivers/gpu/drm/panthor/panthor_mmu.c
> +++ b/drivers/gpu/drm/panthor/panthor_mmu.c
> @@ -1537,6 +1537,10 @@ static void panthor_vm_destroy(struct panthor_vm *vm)
>
> vm->destroyed = true;
>
> + /* Tell scheduler to stop all GPU work related to this VM */
> + if (refcount_read(&vm->as.active_cnt) > 0)
> + panthor_sched_prepare_for_vm_destruction(vm->ptdev);
> +
> mutex_lock(&vm->heaps.lock);
> panthor_heap_pool_destroy(vm->heaps.pool);
> vm->heaps.pool = NULL;
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
> index f680edcd40aad..a40ac94e5e989 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.c
> +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> @@ -2930,6 +2930,20 @@ void panthor_sched_report_mmu_fault(struct panthor_device *ptdev)
> sched_queue_delayed_work(ptdev->scheduler, tick, 0);
> }
>
> +void panthor_sched_prepare_for_vm_destruction(struct panthor_device *ptdev)
> +{
> + /* FW can write out internal state, like the heap context, during CSG
> + * suspend. It is therefore important that the scheduler has fully
> + * evicted any pending and related groups before VM destruction can
> + * safely continue. Failure to do so can lead to GPU page faults.
> + * A controlled termination of a Panthor instance involves destroying
> + * the group(s) before the VM. This means any relevant group eviction
> + * has already been initiated by this point, and we just need to
> + * ensure that any pending tick_work() has been completed.
> + */
> + flush_work(&ptdev->scheduler->tick_work.work);
> +}
> +
> void panthor_sched_resume(struct panthor_device *ptdev)
> {
> /* Force a tick to re-evaluate after a resume. */
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h
> index f4a475aa34c0a..9a8692de8aded 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.h
> +++ b/drivers/gpu/drm/panthor/panthor_sched.h
> @@ -50,6 +50,7 @@ void panthor_sched_suspend(struct panthor_device *ptdev);
> void panthor_sched_resume(struct panthor_device *ptdev);
>
> void panthor_sched_report_mmu_fault(struct panthor_device *ptdev);
> +void panthor_sched_prepare_for_vm_destruction(struct panthor_device *ptdev);
> void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events);
>
> void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile);
> --
> 2.43.0
>
On 19/12/2025 09:35, Ketil Johnsen wrote:
> Ensure all related groups are evicted and suspended before VM
> destruction takes place.
>
> This fixes an issue where panthor_vm_destroy() destroys and unmaps the
> heap context while there are still on slot groups using this.
> The FW will do a write out to the heap context when a CSG (group) is
> suspended, so a premature unmap of the heap context will cause a
> GPU page fault.
> This page fault is quite harmless, and do not affect the continued
> operation of the GPU.
>
> Fixes: 647810ec2476 ("drm/panthor: Add the MMU/VM logical block")
> Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
> Co-developed-by: Boris Brezillon <boris.brezillon@collabora.com>
> Signed-off-by: Ketil Johnsen <ketil.johnsen@arm.com>
Reviewed-by: Steven Price <steven.price@arm.com>
> ---
> Changes in v2:
> - Removed check for ptdev->scheduler
> - R-b from Boris
> - Link to v1: https://lore.kernel.org/all/20251218162644.828495-1-ketil.johnsen@arm.com/
> ---
> drivers/gpu/drm/panthor/panthor_mmu.c | 4 ++++
> drivers/gpu/drm/panthor/panthor_sched.c | 14 ++++++++++++++
> drivers/gpu/drm/panthor/panthor_sched.h | 1 +
> 3 files changed, 19 insertions(+)
>
> diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
> index 74230f7199121..0e4b301a9c70e 100644
> --- a/drivers/gpu/drm/panthor/panthor_mmu.c
> +++ b/drivers/gpu/drm/panthor/panthor_mmu.c
> @@ -1537,6 +1537,10 @@ static void panthor_vm_destroy(struct panthor_vm *vm)
>
> vm->destroyed = true;
>
> + /* Tell scheduler to stop all GPU work related to this VM */
> + if (refcount_read(&vm->as.active_cnt) > 0)
> + panthor_sched_prepare_for_vm_destruction(vm->ptdev);
> +
> mutex_lock(&vm->heaps.lock);
> panthor_heap_pool_destroy(vm->heaps.pool);
> vm->heaps.pool = NULL;
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
> index f680edcd40aad..a40ac94e5e989 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.c
> +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> @@ -2930,6 +2930,20 @@ void panthor_sched_report_mmu_fault(struct panthor_device *ptdev)
> sched_queue_delayed_work(ptdev->scheduler, tick, 0);
> }
>
> +void panthor_sched_prepare_for_vm_destruction(struct panthor_device *ptdev)
> +{
> + /* FW can write out internal state, like the heap context, during CSG
> + * suspend. It is therefore important that the scheduler has fully
> + * evicted any pending and related groups before VM destruction can
> + * safely continue. Failure to do so can lead to GPU page faults.
> + * A controlled termination of a Panthor instance involves destroying
> + * the group(s) before the VM. This means any relevant group eviction
> + * has already been initiated by this point, and we just need to
> + * ensure that any pending tick_work() has been completed.
> + */
> + flush_work(&ptdev->scheduler->tick_work.work);
> +}
> +
> void panthor_sched_resume(struct panthor_device *ptdev)
> {
> /* Force a tick to re-evaluate after a resume. */
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h
> index f4a475aa34c0a..9a8692de8aded 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.h
> +++ b/drivers/gpu/drm/panthor/panthor_sched.h
> @@ -50,6 +50,7 @@ void panthor_sched_suspend(struct panthor_device *ptdev);
> void panthor_sched_resume(struct panthor_device *ptdev);
>
> void panthor_sched_report_mmu_fault(struct panthor_device *ptdev);
> +void panthor_sched_prepare_for_vm_destruction(struct panthor_device *ptdev);
> void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events);
>
> void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile);
© 2016 - 2026 Red Hat, Inc.