[PATCH v2 21/26] drm/xe/migrate: Add function to copy of VRAM data in chunks

Michał Winiarski posted 26 patches 3 months, 2 weeks ago
There is a newer version of this series
[PATCH v2 21/26] drm/xe/migrate: Add function to copy of VRAM data in chunks
Posted by Michał Winiarski 3 months, 2 weeks ago
From: Lukasz Laguna <lukasz.laguna@intel.com>

Introduce a new function to copy data between VRAM and sysmem objects.
The existing xe_migrate_copy() is tailored for eviction and restore
operations, which involves additional logic and operates on entire
objects.
The xe_migrate_vram_copy_chunk() allows copying chunks of data to or
from a dedicated buffer object, which is essential in case of VF
migration.

Signed-off-by: Lukasz Laguna <lukasz.laguna@intel.com>
Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
---
 drivers/gpu/drm/xe/xe_migrate.c | 134 ++++++++++++++++++++++++++++++--
 drivers/gpu/drm/xe/xe_migrate.h |   8 ++
 2 files changed, 136 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 3112c966c67d7..d30675707162b 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -514,7 +514,7 @@ int xe_migrate_init(struct xe_migrate *m)
 
 static u64 max_mem_transfer_per_pass(struct xe_device *xe)
 {
-	if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
+	if ((!IS_DGFX(xe) || IS_SRIOV_PF(xe)) && xe_device_has_flat_ccs(xe))
 		return MAX_CCS_LIMITED_TRANSFER;
 
 	return MAX_PREEMPTDISABLE_TRANSFER;
@@ -1155,6 +1155,133 @@ struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate)
 	return migrate->q;
 }
 
+/**
+ * xe_migrate_vram_copy_chunk() - Copy a chunk of a VRAM buffer object.
+ * @vram_bo: The VRAM buffer object.
+ * @vram_offset: The VRAM offset.
+ * @sysmem_bo: The sysmem buffer object.
+ * @sysmem_offset: The sysmem offset.
+ * @size: The size of VRAM chunk to copy.
+ * @dir: The direction of the copy operation.
+ *
+ * Copies a portion of a buffer object between VRAM and system memory.
+ * On Xe2 platforms that support flat CCS, VRAM data is decompressed when
+ * copying to system memory.
+ *
+ * Return: Pointer to a dma_fence representing the last copy batch, or
+ * an error pointer on failure. If there is a failure, any copy operation
+ * started by the function call has been synced.
+ */
+struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset,
+					     struct xe_bo *sysmem_bo, u64 sysmem_offset,
+					     u64 size, enum xe_migrate_copy_dir dir)
+{
+	struct xe_device *xe = xe_bo_device(vram_bo);
+	struct xe_tile *tile = vram_bo->tile;
+	struct xe_gt *gt = tile->primary_gt;
+	struct xe_migrate *m = tile->migrate;
+	struct dma_fence *fence = NULL;
+	struct ttm_resource *vram = vram_bo->ttm.resource;
+	struct ttm_resource *sysmem = sysmem_bo->ttm.resource;
+	struct xe_res_cursor vram_it, sysmem_it;
+	u64 vram_L0_ofs, sysmem_L0_ofs;
+	u32 vram_L0_pt, sysmem_L0_pt;
+	u64 vram_L0, sysmem_L0;
+	bool to_sysmem = (dir == XE_MIGRATE_COPY_TO_SRAM);
+	bool use_comp_pat = to_sysmem &&
+		GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe);
+	int pass = 0;
+	int err;
+
+	xe_assert(xe, IS_ALIGNED(vram_offset | sysmem_offset | size, PAGE_SIZE));
+	xe_assert(xe, xe_bo_is_vram(vram_bo));
+	xe_assert(xe, !xe_bo_is_vram(sysmem_bo));
+	xe_assert(xe, !range_overflows(vram_offset, size, (u64)vram_bo->ttm.base.size));
+	xe_assert(xe, !range_overflows(sysmem_offset, size, (u64)sysmem_bo->ttm.base.size));
+
+	xe_res_first(vram, vram_offset, size, &vram_it);
+	xe_res_first_sg(xe_bo_sg(sysmem_bo), sysmem_offset, size, &sysmem_it);
+
+	while (size) {
+		u32 pte_flags = PTE_UPDATE_FLAG_IS_VRAM;
+		u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
+		struct xe_sched_job *job;
+		struct xe_bb *bb;
+		u32 update_idx;
+		bool usm = xe->info.has_usm;
+		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
+
+		sysmem_L0 = xe_migrate_res_sizes(m, &sysmem_it);
+		vram_L0 = min(xe_migrate_res_sizes(m, &vram_it), sysmem_L0);
+
+		drm_dbg(&xe->drm, "Pass %u, size: %llu\n", pass++, vram_L0);
+
+		pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
+		batch_size += pte_update_size(m, pte_flags, vram, &vram_it, &vram_L0,
+					      &vram_L0_ofs, &vram_L0_pt, 0, 0, avail_pts);
+
+		batch_size += pte_update_size(m, 0, sysmem, &sysmem_it, &vram_L0, &sysmem_L0_ofs,
+					      &sysmem_L0_pt, 0, avail_pts, avail_pts);
+		batch_size += EMIT_COPY_DW;
+
+		bb = xe_bb_new(gt, batch_size, usm);
+		if (IS_ERR(bb)) {
+			err = PTR_ERR(bb);
+			return ERR_PTR(err);
+		}
+
+		if (xe_migrate_allow_identity(vram_L0, &vram_it))
+			xe_res_next(&vram_it, vram_L0);
+		else
+			emit_pte(m, bb, vram_L0_pt, true, use_comp_pat, &vram_it, vram_L0, vram);
+
+		emit_pte(m, bb, sysmem_L0_pt, false, false, &sysmem_it, vram_L0, sysmem);
+
+		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
+		update_idx = bb->len;
+
+		if (to_sysmem)
+			emit_copy(gt, bb, vram_L0_ofs, sysmem_L0_ofs, vram_L0, XE_PAGE_SIZE);
+		else
+			emit_copy(gt, bb, sysmem_L0_ofs, vram_L0_ofs, vram_L0, XE_PAGE_SIZE);
+
+		job = xe_bb_create_migration_job(m->q, bb, xe_migrate_batch_base(m, usm),
+						 update_idx);
+		if (IS_ERR(job)) {
+			err = PTR_ERR(job);
+			goto err;
+		}
+
+		xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
+
+		WARN_ON_ONCE(!dma_resv_test_signaled(vram_bo->ttm.base.resv,
+						     DMA_RESV_USAGE_BOOKKEEP));
+		WARN_ON_ONCE(!dma_resv_test_signaled(sysmem_bo->ttm.base.resv,
+						     DMA_RESV_USAGE_BOOKKEEP));
+
+		mutex_lock(&m->job_mutex);
+		xe_sched_job_arm(job);
+		dma_fence_put(fence);
+		fence = dma_fence_get(&job->drm.s_fence->finished);
+		xe_sched_job_push(job);
+
+		dma_fence_put(m->fence);
+		m->fence = dma_fence_get(fence);
+		mutex_unlock(&m->job_mutex);
+
+		xe_bb_free(bb, fence);
+		size -= vram_L0;
+		continue;
+
+err:
+		xe_bb_free(bb, NULL);
+
+		return ERR_PTR(err);
+	}
+
+	return fence;
+}
+
 static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
 				 u32 size, u32 pitch)
 {
@@ -1852,11 +1979,6 @@ static bool xe_migrate_vram_use_pde(struct drm_pagemap_addr *sram_addr,
 	return true;
 }
 
-enum xe_migrate_copy_dir {
-	XE_MIGRATE_COPY_TO_VRAM,
-	XE_MIGRATE_COPY_TO_SRAM,
-};
-
 #define XE_CACHELINE_BYTES	64ull
 #define XE_CACHELINE_MASK	(XE_CACHELINE_BYTES - 1)
 
diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
index 4fad324b62535..d7bcc6ad8464e 100644
--- a/drivers/gpu/drm/xe/xe_migrate.h
+++ b/drivers/gpu/drm/xe/xe_migrate.h
@@ -28,6 +28,11 @@ struct xe_vma;
 
 enum xe_sriov_vf_ccs_rw_ctxs;
 
+enum xe_migrate_copy_dir {
+	XE_MIGRATE_COPY_TO_VRAM,
+	XE_MIGRATE_COPY_TO_SRAM,
+};
+
 /**
  * struct xe_migrate_pt_update_ops - Callbacks for the
  * xe_migrate_update_pgtables() function.
@@ -131,6 +136,9 @@ int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
 
 struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate);
 struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate);
+struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset,
+					     struct xe_bo *sysmem_bo, u64 sysmem_offset,
+					     u64 size, enum xe_migrate_copy_dir dir);
 int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
 			     unsigned long offset, void *buf, int len,
 			     int write);
-- 
2.50.1

Re: [PATCH v2 21/26] drm/xe/migrate: Add function to copy of VRAM data in chunks
Posted by Michal Wajdeczko 3 months, 2 weeks ago

On 10/22/2025 12:41 AM, Michał Winiarski wrote:
> From: Lukasz Laguna <lukasz.laguna@intel.com>
> 
> Introduce a new function to copy data between VRAM and sysmem objects.
> The existing xe_migrate_copy() is tailored for eviction and restore
> operations, which involves additional logic and operates on entire
> objects.
> The xe_migrate_vram_copy_chunk() allows copying chunks of data to or
> from a dedicated buffer object, which is essential in case of VF
> migration.
> 
> Signed-off-by: Lukasz Laguna <lukasz.laguna@intel.com>
> Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
> ---
>  drivers/gpu/drm/xe/xe_migrate.c | 134 ++++++++++++++++++++++++++++++--
>  drivers/gpu/drm/xe/xe_migrate.h |   8 ++
>  2 files changed, 136 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
> index 3112c966c67d7..d30675707162b 100644
> --- a/drivers/gpu/drm/xe/xe_migrate.c
> +++ b/drivers/gpu/drm/xe/xe_migrate.c
> @@ -514,7 +514,7 @@ int xe_migrate_init(struct xe_migrate *m)
>  
>  static u64 max_mem_transfer_per_pass(struct xe_device *xe)
>  {
> -	if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
> +	if ((!IS_DGFX(xe) || IS_SRIOV_PF(xe)) && xe_device_has_flat_ccs(xe))

being a PF is permanent case, while your expected usage is only during of the handling of the VF migration.

maybe it would be better to introduce flag FORCE_CCS_LIMITED_TRANSFER and pass it to the migration calls when really needed ?


>  		return MAX_CCS_LIMITED_TRANSFER;
>  
>  	return MAX_PREEMPTDISABLE_TRANSFER;
> @@ -1155,6 +1155,133 @@ struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate)
>  	return migrate->q;
>  }
>  
> +/**
> + * xe_migrate_vram_copy_chunk() - Copy a chunk of a VRAM buffer object.
> + * @vram_bo: The VRAM buffer object.
> + * @vram_offset: The VRAM offset.
> + * @sysmem_bo: The sysmem buffer object.
> + * @sysmem_offset: The sysmem offset.
> + * @size: The size of VRAM chunk to copy.
> + * @dir: The direction of the copy operation.
> + *
> + * Copies a portion of a buffer object between VRAM and system memory.
> + * On Xe2 platforms that support flat CCS, VRAM data is decompressed when
> + * copying to system memory.
> + *
> + * Return: Pointer to a dma_fence representing the last copy batch, or
> + * an error pointer on failure. If there is a failure, any copy operation
> + * started by the function call has been synced.
> + */
> +struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset,
> +					     struct xe_bo *sysmem_bo, u64 sysmem_offset,
> +					     u64 size, enum xe_migrate_copy_dir dir)
> +{
> +	struct xe_device *xe = xe_bo_device(vram_bo);
> +	struct xe_tile *tile = vram_bo->tile;
> +	struct xe_gt *gt = tile->primary_gt;
> +	struct xe_migrate *m = tile->migrate;
> +	struct dma_fence *fence = NULL;
> +	struct ttm_resource *vram = vram_bo->ttm.resource;
> +	struct ttm_resource *sysmem = sysmem_bo->ttm.resource;
> +	struct xe_res_cursor vram_it, sysmem_it;
> +	u64 vram_L0_ofs, sysmem_L0_ofs;
> +	u32 vram_L0_pt, sysmem_L0_pt;
> +	u64 vram_L0, sysmem_L0;
> +	bool to_sysmem = (dir == XE_MIGRATE_COPY_TO_SRAM);
> +	bool use_comp_pat = to_sysmem &&
> +		GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe);
> +	int pass = 0;
> +	int err;
> +
> +	xe_assert(xe, IS_ALIGNED(vram_offset | sysmem_offset | size, PAGE_SIZE));
> +	xe_assert(xe, xe_bo_is_vram(vram_bo));
> +	xe_assert(xe, !xe_bo_is_vram(sysmem_bo));
> +	xe_assert(xe, !range_overflows(vram_offset, size, (u64)vram_bo->ttm.base.size));
> +	xe_assert(xe, !range_overflows(sysmem_offset, size, (u64)sysmem_bo->ttm.base.size));
> +
> +	xe_res_first(vram, vram_offset, size, &vram_it);
> +	xe_res_first_sg(xe_bo_sg(sysmem_bo), sysmem_offset, size, &sysmem_it);
> +
> +	while (size) {
> +		u32 pte_flags = PTE_UPDATE_FLAG_IS_VRAM;
> +		u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
> +		struct xe_sched_job *job;
> +		struct xe_bb *bb;
> +		u32 update_idx;
> +		bool usm = xe->info.has_usm;
> +		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
> +
> +		sysmem_L0 = xe_migrate_res_sizes(m, &sysmem_it);
> +		vram_L0 = min(xe_migrate_res_sizes(m, &vram_it), sysmem_L0);
> +
> +		drm_dbg(&xe->drm, "Pass %u, size: %llu\n", pass++, vram_L0);

nit: there is xe_dbg()

> +
> +		pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
> +		batch_size += pte_update_size(m, pte_flags, vram, &vram_it, &vram_L0,
> +					      &vram_L0_ofs, &vram_L0_pt, 0, 0, avail_pts);
> +
> +		batch_size += pte_update_size(m, 0, sysmem, &sysmem_it, &vram_L0, &sysmem_L0_ofs,
> +					      &sysmem_L0_pt, 0, avail_pts, avail_pts);
> +		batch_size += EMIT_COPY_DW;
> +
> +		bb = xe_bb_new(gt, batch_size, usm);
> +		if (IS_ERR(bb)) {
> +			err = PTR_ERR(bb);
> +			return ERR_PTR(err);
> +		}
> +
> +		if (xe_migrate_allow_identity(vram_L0, &vram_it))
> +			xe_res_next(&vram_it, vram_L0);
> +		else
> +			emit_pte(m, bb, vram_L0_pt, true, use_comp_pat, &vram_it, vram_L0, vram);
> +
> +		emit_pte(m, bb, sysmem_L0_pt, false, false, &sysmem_it, vram_L0, sysmem);
> +
> +		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
> +		update_idx = bb->len;
> +
> +		if (to_sysmem)
> +			emit_copy(gt, bb, vram_L0_ofs, sysmem_L0_ofs, vram_L0, XE_PAGE_SIZE);
> +		else
> +			emit_copy(gt, bb, sysmem_L0_ofs, vram_L0_ofs, vram_L0, XE_PAGE_SIZE);
> +
> +		job = xe_bb_create_migration_job(m->q, bb, xe_migrate_batch_base(m, usm),
> +						 update_idx);
> +		if (IS_ERR(job)) {
> +			err = PTR_ERR(job);
> +			goto err;

this goto inside 'while' loop is weird

> +		}
> +
> +		xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
> +
> +		WARN_ON_ONCE(!dma_resv_test_signaled(vram_bo->ttm.base.resv,
> +						     DMA_RESV_USAGE_BOOKKEEP));
> +		WARN_ON_ONCE(!dma_resv_test_signaled(sysmem_bo->ttm.base.resv,
> +						     DMA_RESV_USAGE_BOOKKEEP));

xe_WARN_ON_ONCE() ?

but why do not use asserts() if we are sure that this shouldn't happen ?

> +
> +		mutex_lock(&m->job_mutex);

scoped_quard(mutex) ?

> +		xe_sched_job_arm(job);
> +		dma_fence_put(fence);
> +		fence = dma_fence_get(&job->drm.s_fence->finished);
> +		xe_sched_job_push(job);
> +
> +		dma_fence_put(m->fence);
> +		m->fence = dma_fence_get(fence);

> +		mutex_unlock(&m->job_mutex);
> +
> +		xe_bb_free(bb, fence);
> +		size -= vram_L0;
> +		continue;
> +
> +err:
> +		xe_bb_free(bb, NULL);
> +
> +		return ERR_PTR(err);
> +	}
> +
> +	return fence;
> +}
> +
>  static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
>  				 u32 size, u32 pitch)
>  {
> @@ -1852,11 +1979,6 @@ static bool xe_migrate_vram_use_pde(struct drm_pagemap_addr *sram_addr,
>  	return true;
>  }
>  
> -enum xe_migrate_copy_dir {
> -	XE_MIGRATE_COPY_TO_VRAM,
> -	XE_MIGRATE_COPY_TO_SRAM,
> -};
> -
>  #define XE_CACHELINE_BYTES	64ull
>  #define XE_CACHELINE_MASK	(XE_CACHELINE_BYTES - 1)
>  
> diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
> index 4fad324b62535..d7bcc6ad8464e 100644
> --- a/drivers/gpu/drm/xe/xe_migrate.h
> +++ b/drivers/gpu/drm/xe/xe_migrate.h
> @@ -28,6 +28,11 @@ struct xe_vma;
>  
>  enum xe_sriov_vf_ccs_rw_ctxs;
>  
> +enum xe_migrate_copy_dir {
> +	XE_MIGRATE_COPY_TO_VRAM,
> +	XE_MIGRATE_COPY_TO_SRAM,
> +};

nit: it's time for xe_migrate_types.h ;)

but not as part of this series

> +
>  /**
>   * struct xe_migrate_pt_update_ops - Callbacks for the
>   * xe_migrate_update_pgtables() function.
> @@ -131,6 +136,9 @@ int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
>  
>  struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate);
>  struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate);
> +struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset,
> +					     struct xe_bo *sysmem_bo, u64 sysmem_offset,
> +					     u64 size, enum xe_migrate_copy_dir dir);
>  int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
>  			     unsigned long offset, void *buf, int len,
>  			     int write);

Re: [PATCH v2 21/26] drm/xe/migrate: Add function to copy of VRAM data in chunks
Posted by Laguna, Lukasz 3 months, 1 week ago
On 10/23/2025 21:29, Michal Wajdeczko wrote:
>
> On 10/22/2025 12:41 AM, Michał Winiarski wrote:
>> From: Lukasz Laguna <lukasz.laguna@intel.com>
>>
>> Introduce a new function to copy data between VRAM and sysmem objects.
>> The existing xe_migrate_copy() is tailored for eviction and restore
>> operations, which involves additional logic and operates on entire
>> objects.
>> The xe_migrate_vram_copy_chunk() allows copying chunks of data to or
>> from a dedicated buffer object, which is essential in case of VF
>> migration.
>>
>> Signed-off-by: Lukasz Laguna <lukasz.laguna@intel.com>
>> Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
>> ---
>>   drivers/gpu/drm/xe/xe_migrate.c | 134 ++++++++++++++++++++++++++++++--
>>   drivers/gpu/drm/xe/xe_migrate.h |   8 ++
>>   2 files changed, 136 insertions(+), 6 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
>> index 3112c966c67d7..d30675707162b 100644
>> --- a/drivers/gpu/drm/xe/xe_migrate.c
>> +++ b/drivers/gpu/drm/xe/xe_migrate.c
>> @@ -514,7 +514,7 @@ int xe_migrate_init(struct xe_migrate *m)
>>   
>>   static u64 max_mem_transfer_per_pass(struct xe_device *xe)
>>   {
>> -	if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
>> +	if ((!IS_DGFX(xe) || IS_SRIOV_PF(xe)) && xe_device_has_flat_ccs(xe))
> being a PF is permanent case, while your expected usage is only during of the handling of the VF migration.
>
> maybe it would be better to introduce flag FORCE_CCS_LIMITED_TRANSFER and pass it to the migration calls when really needed ?

I don't think this change is necessary anymore since we removed support 
for raw CCS copy. I'll revert these updates. I tested the copy with 8M 
blocks on BMG, and it worked fine.

>
>>   		return MAX_CCS_LIMITED_TRANSFER;
>>   
>>   	return MAX_PREEMPTDISABLE_TRANSFER;
>> @@ -1155,6 +1155,133 @@ struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate)
>>   	return migrate->q;
>>   }
>>   
>> +/**
>> + * xe_migrate_vram_copy_chunk() - Copy a chunk of a VRAM buffer object.
>> + * @vram_bo: The VRAM buffer object.
>> + * @vram_offset: The VRAM offset.
>> + * @sysmem_bo: The sysmem buffer object.
>> + * @sysmem_offset: The sysmem offset.
>> + * @size: The size of VRAM chunk to copy.
>> + * @dir: The direction of the copy operation.
>> + *
>> + * Copies a portion of a buffer object between VRAM and system memory.
>> + * On Xe2 platforms that support flat CCS, VRAM data is decompressed when
>> + * copying to system memory.
>> + *
>> + * Return: Pointer to a dma_fence representing the last copy batch, or
>> + * an error pointer on failure. If there is a failure, any copy operation
>> + * started by the function call has been synced.
>> + */
>> +struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset,
>> +					     struct xe_bo *sysmem_bo, u64 sysmem_offset,
>> +					     u64 size, enum xe_migrate_copy_dir dir)
>> +{
>> +	struct xe_device *xe = xe_bo_device(vram_bo);
>> +	struct xe_tile *tile = vram_bo->tile;
>> +	struct xe_gt *gt = tile->primary_gt;
>> +	struct xe_migrate *m = tile->migrate;
>> +	struct dma_fence *fence = NULL;
>> +	struct ttm_resource *vram = vram_bo->ttm.resource;
>> +	struct ttm_resource *sysmem = sysmem_bo->ttm.resource;
>> +	struct xe_res_cursor vram_it, sysmem_it;
>> +	u64 vram_L0_ofs, sysmem_L0_ofs;
>> +	u32 vram_L0_pt, sysmem_L0_pt;
>> +	u64 vram_L0, sysmem_L0;
>> +	bool to_sysmem = (dir == XE_MIGRATE_COPY_TO_SRAM);
>> +	bool use_comp_pat = to_sysmem &&
>> +		GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe);
>> +	int pass = 0;
>> +	int err;
>> +
>> +	xe_assert(xe, IS_ALIGNED(vram_offset | sysmem_offset | size, PAGE_SIZE));
>> +	xe_assert(xe, xe_bo_is_vram(vram_bo));
>> +	xe_assert(xe, !xe_bo_is_vram(sysmem_bo));
>> +	xe_assert(xe, !range_overflows(vram_offset, size, (u64)vram_bo->ttm.base.size));
>> +	xe_assert(xe, !range_overflows(sysmem_offset, size, (u64)sysmem_bo->ttm.base.size));
>> +
>> +	xe_res_first(vram, vram_offset, size, &vram_it);
>> +	xe_res_first_sg(xe_bo_sg(sysmem_bo), sysmem_offset, size, &sysmem_it);
>> +
>> +	while (size) {
>> +		u32 pte_flags = PTE_UPDATE_FLAG_IS_VRAM;
>> +		u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
>> +		struct xe_sched_job *job;
>> +		struct xe_bb *bb;
>> +		u32 update_idx;
>> +		bool usm = xe->info.has_usm;
>> +		u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
>> +
>> +		sysmem_L0 = xe_migrate_res_sizes(m, &sysmem_it);
>> +		vram_L0 = min(xe_migrate_res_sizes(m, &vram_it), sysmem_L0);
>> +
>> +		drm_dbg(&xe->drm, "Pass %u, size: %llu\n", pass++, vram_L0);
> nit: there is xe_dbg()

Ok

>
>> +
>> +		pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
>> +		batch_size += pte_update_size(m, pte_flags, vram, &vram_it, &vram_L0,
>> +					      &vram_L0_ofs, &vram_L0_pt, 0, 0, avail_pts);
>> +
>> +		batch_size += pte_update_size(m, 0, sysmem, &sysmem_it, &vram_L0, &sysmem_L0_ofs,
>> +					      &sysmem_L0_pt, 0, avail_pts, avail_pts);
>> +		batch_size += EMIT_COPY_DW;
>> +
>> +		bb = xe_bb_new(gt, batch_size, usm);
>> +		if (IS_ERR(bb)) {
>> +			err = PTR_ERR(bb);
>> +			return ERR_PTR(err);
>> +		}
>> +
>> +		if (xe_migrate_allow_identity(vram_L0, &vram_it))
>> +			xe_res_next(&vram_it, vram_L0);
>> +		else
>> +			emit_pte(m, bb, vram_L0_pt, true, use_comp_pat, &vram_it, vram_L0, vram);
>> +
>> +		emit_pte(m, bb, sysmem_L0_pt, false, false, &sysmem_it, vram_L0, sysmem);
>> +
>> +		bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
>> +		update_idx = bb->len;
>> +
>> +		if (to_sysmem)
>> +			emit_copy(gt, bb, vram_L0_ofs, sysmem_L0_ofs, vram_L0, XE_PAGE_SIZE);
>> +		else
>> +			emit_copy(gt, bb, sysmem_L0_ofs, vram_L0_ofs, vram_L0, XE_PAGE_SIZE);
>> +
>> +		job = xe_bb_create_migration_job(m->q, bb, xe_migrate_batch_base(m, usm),
>> +						 update_idx);
>> +		if (IS_ERR(job)) {
>> +			err = PTR_ERR(job);
>> +			goto err;
> this goto inside 'while' loop is weird

Good point

>
>> +		}
>> +
>> +		xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
>> +
>> +		WARN_ON_ONCE(!dma_resv_test_signaled(vram_bo->ttm.base.resv,
>> +						     DMA_RESV_USAGE_BOOKKEEP));
>> +		WARN_ON_ONCE(!dma_resv_test_signaled(sysmem_bo->ttm.base.resv,
>> +						     DMA_RESV_USAGE_BOOKKEEP));
> xe_WARN_ON_ONCE() ?
>
> but why do not use asserts() if we are sure that this shouldn't happen ?

Ok, I'll use asserts

>
>> +
>> +		mutex_lock(&m->job_mutex);
> scoped_quard(mutex) ?

Ok

>
>> +		xe_sched_job_arm(job);
>> +		dma_fence_put(fence);
>> +		fence = dma_fence_get(&job->drm.s_fence->finished);
>> +		xe_sched_job_push(job);
>> +
>> +		dma_fence_put(m->fence);
>> +		m->fence = dma_fence_get(fence);
>> +		mutex_unlock(&m->job_mutex);
>> +
>> +		xe_bb_free(bb, fence);
>> +		size -= vram_L0;
>> +		continue;
>> +
>> +err:
>> +		xe_bb_free(bb, NULL);
>> +
>> +		return ERR_PTR(err);
>> +	}
>> +
>> +	return fence;
>> +}
>> +
>>   static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
>>   				 u32 size, u32 pitch)
>>   {
>> @@ -1852,11 +1979,6 @@ static bool xe_migrate_vram_use_pde(struct drm_pagemap_addr *sram_addr,
>>   	return true;
>>   }
>>   
>> -enum xe_migrate_copy_dir {
>> -	XE_MIGRATE_COPY_TO_VRAM,
>> -	XE_MIGRATE_COPY_TO_SRAM,
>> -};
>> -
>>   #define XE_CACHELINE_BYTES	64ull
>>   #define XE_CACHELINE_MASK	(XE_CACHELINE_BYTES - 1)
>>   
>> diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
>> index 4fad324b62535..d7bcc6ad8464e 100644
>> --- a/drivers/gpu/drm/xe/xe_migrate.h
>> +++ b/drivers/gpu/drm/xe/xe_migrate.h
>> @@ -28,6 +28,11 @@ struct xe_vma;
>>   
>>   enum xe_sriov_vf_ccs_rw_ctxs;
>>   
>> +enum xe_migrate_copy_dir {
>> +	XE_MIGRATE_COPY_TO_VRAM,
>> +	XE_MIGRATE_COPY_TO_SRAM,
>> +};
> nit: it's time for xe_migrate_types.h ;)
>
> but not as part of this series
>
>> +
>>   /**
>>    * struct xe_migrate_pt_update_ops - Callbacks for the
>>    * xe_migrate_update_pgtables() function.
>> @@ -131,6 +136,9 @@ int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
>>   
>>   struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate);
>>   struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate);
>> +struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset,
>> +					     struct xe_bo *sysmem_bo, u64 sysmem_offset,
>> +					     u64 size, enum xe_migrate_copy_dir dir);
>>   int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
>>   			     unsigned long offset, void *buf, int len,
>>   			     int write);