This allows to have init/fini functions to hold all the init and
teardown code for amdgpu_ttm_buffer_entity.
For now only drm_sched_entity init/destroy function calls are moved
here, but as entities gain new members it will make code simpler.
Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 73 +++++++++++++------------
1 file changed, 38 insertions(+), 35 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 5850a013e65e..8b38b5ed9a9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -2013,10 +2013,18 @@ static void amdgpu_ttm_free_mmio_remap_bo(struct amdgpu_device *adev)
}
static int amdgpu_ttm_buffer_entity_init(struct amdgpu_ttm_buffer_entity *entity,
+ enum drm_sched_priority prio,
+ struct drm_gpu_scheduler **scheds,
+ int num_schedulers,
int starting_gart_window,
u32 num_gart_windows)
{
- int i;
+ int i, r;
+
+ r = drm_sched_entity_init(&entity->base, prio, scheds, num_schedulers, NULL);
+ if (r)
+ return r;
+
mutex_init(&entity->lock);
@@ -2033,6 +2041,11 @@ static int amdgpu_ttm_buffer_entity_init(struct amdgpu_ttm_buffer_entity *entity
return starting_gart_window;
}
+static void amdgpu_ttm_buffer_entity_fini(struct amdgpu_ttm_buffer_entity *entity)
+{
+ drm_sched_entity_destroy(&entity->base);
+}
+
/*
* amdgpu_ttm_init - Init the memory management (ttm) as well as various
* gtt/vram related fields.
@@ -2317,7 +2330,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
{
struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
- u32 used_windows;
uint64_t size;
int r;
@@ -2331,47 +2343,36 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
ring = adev->mman.buffer_funcs_ring;
sched = &ring->sched;
- r = drm_sched_entity_init(&adev->mman.default_entity.base,
- DRM_SCHED_PRIORITY_KERNEL, &sched,
- 1, NULL);
- if (r) {
+ r = amdgpu_ttm_buffer_entity_init(&adev->mman.default_entity,
+ DRM_SCHED_PRIORITY_KERNEL, &sched, 1,
+ 0, 0);
+ if (r < 0) {
dev_err(adev->dev,
- "Failed setting up TTM BO move entity (%d)\n",
- r);
+ "Failed setting up TTM entity (%d)\n", r);
return;
}
- r = drm_sched_entity_init(&adev->mman.clear_entity.base,
- DRM_SCHED_PRIORITY_NORMAL, &sched,
- 1, NULL);
- if (r) {
+ r = amdgpu_ttm_buffer_entity_init(&adev->mman.clear_entity,
+ DRM_SCHED_PRIORITY_NORMAL, &sched, 1,
+ r, 1);
+ if (r < 0) {
dev_err(adev->dev,
- "Failed setting up TTM BO clear entity (%d)\n",
- r);
- goto error_free_entity;
+ "Failed setting up TTM BO clear entity (%d)\n", r);
+ goto error_free_default_entity;
}
- r = drm_sched_entity_init(&adev->mman.move_entity.base,
- DRM_SCHED_PRIORITY_NORMAL, &sched,
- 1, NULL);
- if (r) {
+ r = amdgpu_ttm_buffer_entity_init(&adev->mman.move_entity,
+ DRM_SCHED_PRIORITY_NORMAL, &sched, 1,
+ r, 2);
+ if (r < 0) {
dev_err(adev->dev,
- "Failed setting up TTM BO move entity (%d)\n",
- r);
- drm_sched_entity_destroy(&adev->mman.clear_entity.base);
- goto error_free_entity;
+ "Failed setting up TTM BO move entity (%d)\n", r);
+ goto error_free_clear_entity;
}
-
- /* Statically assign GART windows to each entity. */
- used_windows = amdgpu_ttm_buffer_entity_init(&adev->mman.default_entity, 0, 0);
- used_windows = amdgpu_ttm_buffer_entity_init(&adev->mman.move_entity,
- used_windows, 2);
- used_windows = amdgpu_ttm_buffer_entity_init(&adev->mman.clear_entity,
- used_windows, 1);
} else {
- drm_sched_entity_destroy(&adev->mman.default_entity.base);
- drm_sched_entity_destroy(&adev->mman.clear_entity.base);
- drm_sched_entity_destroy(&adev->mman.move_entity.base);
+ amdgpu_ttm_buffer_entity_fini(&adev->mman.default_entity);
+ amdgpu_ttm_buffer_entity_fini(&adev->mman.clear_entity);
+ amdgpu_ttm_buffer_entity_fini(&adev->mman.move_entity);
/* Drop all the old fences since re-creating the scheduler entities
* will allocate new contexts.
*/
@@ -2388,8 +2389,10 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
return;
-error_free_entity:
- drm_sched_entity_destroy(&adev->mman.default_entity.base);
+error_free_clear_entity:
+ amdgpu_ttm_buffer_entity_fini(&adev->mman.clear_entity);
+error_free_default_entity:
+ amdgpu_ttm_buffer_entity_fini(&adev->mman.default_entity);
}
static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
--
2.43.0
On 1/22/26 18:02, Pierre-Eric Pelloux-Prayer wrote:
> This allows to have init/fini functions to hold all the init and
> teardown code for amdgpu_ttm_buffer_entity.
> For now only drm_sched_entity init/destroy function calls are moved
> here, but as entities gain new members it will make code simpler.
>
> Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 73 +++++++++++++------------
> 1 file changed, 38 insertions(+), 35 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 5850a013e65e..8b38b5ed9a9c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -2013,10 +2013,18 @@ static void amdgpu_ttm_free_mmio_remap_bo(struct amdgpu_device *adev)
> }
>
> static int amdgpu_ttm_buffer_entity_init(struct amdgpu_ttm_buffer_entity *entity,
> + enum drm_sched_priority prio,
> + struct drm_gpu_scheduler **scheds,
> + int num_schedulers,
> int starting_gart_window,
> u32 num_gart_windows)
> {
> - int i;
> + int i, r;
> +
> + r = drm_sched_entity_init(&entity->base, prio, scheds, num_schedulers, NULL);
> + if (r)
> + return r;
> +
>
> mutex_init(&entity->lock);
>
> @@ -2033,6 +2041,11 @@ static int amdgpu_ttm_buffer_entity_init(struct amdgpu_ttm_buffer_entity *entity
> return starting_gart_window;
> }
>
> +static void amdgpu_ttm_buffer_entity_fini(struct amdgpu_ttm_buffer_entity *entity)
> +{
> + drm_sched_entity_destroy(&entity->base);
> +}
> +
> /*
> * amdgpu_ttm_init - Init the memory management (ttm) as well as various
> * gtt/vram related fields.
> @@ -2317,7 +2330,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
> void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
> {
> struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
> - u32 used_windows;
> uint64_t size;
> int r;
>
> @@ -2331,47 +2343,36 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
>
> ring = adev->mman.buffer_funcs_ring;
> sched = &ring->sched;
> - r = drm_sched_entity_init(&adev->mman.default_entity.base,
> - DRM_SCHED_PRIORITY_KERNEL, &sched,
> - 1, NULL);
> - if (r) {
> + r = amdgpu_ttm_buffer_entity_init(&adev->mman.default_entity,
> + DRM_SCHED_PRIORITY_KERNEL, &sched, 1,
> + 0, 0);
> + if (r < 0) {
> dev_err(adev->dev,
> - "Failed setting up TTM BO move entity (%d)\n",
> - r);
> + "Failed setting up TTM entity (%d)\n", r);
> return;
> }
>
> - r = drm_sched_entity_init(&adev->mman.clear_entity.base,
> - DRM_SCHED_PRIORITY_NORMAL, &sched,
> - 1, NULL);
> - if (r) {
> + r = amdgpu_ttm_buffer_entity_init(&adev->mman.clear_entity,
> + DRM_SCHED_PRIORITY_NORMAL, &sched, 1,
> + r, 1);
> + if (r < 0) {
> dev_err(adev->dev,
> - "Failed setting up TTM BO clear entity (%d)\n",
> - r);
> - goto error_free_entity;
> + "Failed setting up TTM BO clear entity (%d)\n", r);
> + goto error_free_default_entity;
> }
>
> - r = drm_sched_entity_init(&adev->mman.move_entity.base,
> - DRM_SCHED_PRIORITY_NORMAL, &sched,
> - 1, NULL);
> - if (r) {
> + r = amdgpu_ttm_buffer_entity_init(&adev->mman.move_entity,
> + DRM_SCHED_PRIORITY_NORMAL, &sched, 1,
> + r, 2);
> + if (r < 0) {
> dev_err(adev->dev,
> - "Failed setting up TTM BO move entity (%d)\n",
> - r);
> - drm_sched_entity_destroy(&adev->mman.clear_entity.base);
> - goto error_free_entity;
> + "Failed setting up TTM BO move entity (%d)\n", r);
> + goto error_free_clear_entity;
> }
> -
> - /* Statically assign GART windows to each entity. */
> - used_windows = amdgpu_ttm_buffer_entity_init(&adev->mman.default_entity, 0, 0);
> - used_windows = amdgpu_ttm_buffer_entity_init(&adev->mman.move_entity,
> - used_windows, 2);
> - used_windows = amdgpu_ttm_buffer_entity_init(&adev->mman.clear_entity,
> - used_windows, 1);
> } else {
> - drm_sched_entity_destroy(&adev->mman.default_entity.base);
> - drm_sched_entity_destroy(&adev->mman.clear_entity.base);
> - drm_sched_entity_destroy(&adev->mman.move_entity.base);
> + amdgpu_ttm_buffer_entity_fini(&adev->mman.default_entity);
> + amdgpu_ttm_buffer_entity_fini(&adev->mman.clear_entity);
> + amdgpu_ttm_buffer_entity_fini(&adev->mman.move_entity);
> /* Drop all the old fences since re-creating the scheduler entities
> * will allocate new contexts.
> */
> @@ -2388,8 +2389,10 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
>
> return;
>
> -error_free_entity:
> - drm_sched_entity_destroy(&adev->mman.default_entity.base);
> +error_free_clear_entity:
> + amdgpu_ttm_buffer_entity_fini(&adev->mman.clear_entity);
> +error_free_default_entity:
> + amdgpu_ttm_buffer_entity_fini(&adev->mman.default_entity);
> }
>
> static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
© 2016 - 2026 Red Hat, Inc.