[PATCH v3 08/28] drm/xe/pf: Add minimalistic migration descriptor

Michał Winiarski posted 28 patches 3 months, 1 week ago
There is a newer version of this series
[PATCH v3 08/28] drm/xe/pf: Add minimalistic migration descriptor
Posted by Michał Winiarski 3 months, 1 week ago
The descriptor reuses the KLV format used by GuC and contains metadata
that can be used to quickly fail migration when source is incompatible
with destination.

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
---
 drivers/gpu/drm/xe/xe_sriov_migration_data.c | 89 +++++++++++++++++++-
 drivers/gpu/drm/xe/xe_sriov_migration_data.h |  2 +
 drivers/gpu/drm/xe/xe_sriov_pf_migration.c   |  6 ++
 3 files changed, 96 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/xe/xe_sriov_migration_data.c b/drivers/gpu/drm/xe/xe_sriov_migration_data.c
index a3f50836adc81..18e17706772fc 100644
--- a/drivers/gpu/drm/xe/xe_sriov_migration_data.c
+++ b/drivers/gpu/drm/xe/xe_sriov_migration_data.c
@@ -5,6 +5,7 @@
 
 #include "xe_bo.h"
 #include "xe_device.h"
+#include "xe_guc_klv_helpers.h"
 #include "xe_sriov_migration_data.h"
 #include "xe_sriov_pf_helpers.h"
 #include "xe_sriov_pf_migration.h"
@@ -383,11 +384,19 @@ ssize_t xe_sriov_migration_data_write(struct xe_device *xe, unsigned int vfid,
 	return produced;
 }
 
-#define MIGRATION_DESCRIPTOR_DWORDS 0
+#define MIGRATION_KLV_DEVICE_DEVID_KEY	0xf001u
+#define MIGRATION_KLV_DEVICE_DEVID_LEN	1u
+#define MIGRATION_KLV_DEVICE_REVID_KEY	0xf002u
+#define MIGRATION_KLV_DEVICE_REVID_LEN	1u
+
+#define MIGRATION_DESCRIPTOR_DWORDS	(GUC_KLV_LEN_MIN + MIGRATION_KLV_DEVICE_DEVID_LEN + \
+					 GUC_KLV_LEN_MIN + MIGRATION_KLV_DEVICE_REVID_LEN)
 static size_t pf_descriptor_init(struct xe_device *xe, unsigned int vfid)
 {
 	struct xe_sriov_migration_data **desc = pf_pick_descriptor(xe, vfid);
 	struct xe_sriov_migration_data *data;
+	unsigned int len = 0;
+	u32 *klvs;
 	int ret;
 
 	data = xe_sriov_migration_data_alloc(xe);
@@ -401,11 +410,89 @@ static size_t pf_descriptor_init(struct xe_device *xe, unsigned int vfid)
 		return ret;
 	}
 
+	klvs = data->vaddr;
+	klvs[len++] = PREP_GUC_KLV_CONST(MIGRATION_KLV_DEVICE_DEVID_KEY,
+					 MIGRATION_KLV_DEVICE_DEVID_LEN);
+	klvs[len++] = xe->info.devid;
+	klvs[len++] = PREP_GUC_KLV_CONST(MIGRATION_KLV_DEVICE_REVID_KEY,
+					 MIGRATION_KLV_DEVICE_REVID_LEN);
+	klvs[len++] = xe->info.revid;
+
+	xe_assert(xe, len == MIGRATION_DESCRIPTOR_DWORDS);
+
 	*desc = data;
 
 	return 0;
 }
 
+/**
+ * xe_sriov_migration_data_process_descriptor() - Process migration data descriptor.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ * @data: the &struct xe_sriov_pf_migration_data containing the descriptor
+ *
+ * The descriptor uses the same KLV format as GuC, and contains metadata used for
+ * checking migration data compatibility.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int xe_sriov_migration_data_process_descriptor(struct xe_device *xe, unsigned int vfid,
+					       struct xe_sriov_migration_data *data)
+{
+	u32 num_dwords = data->size / sizeof(u32);
+	u32 *klvs = data->vaddr;
+
+	xe_assert(xe, data->type == XE_SRIOV_MIGRATION_DATA_TYPE_DESCRIPTOR);
+
+	if (data->size % sizeof(u32)) {
+		xe_sriov_warn(xe, "Aborting migration, descriptor not in KLV format (size=%llu)\n",
+			      data->size);
+		return -EINVAL;
+	}
+
+	while (num_dwords >= GUC_KLV_LEN_MIN) {
+		u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]);
+		u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]);
+
+		klvs += GUC_KLV_LEN_MIN;
+		num_dwords -= GUC_KLV_LEN_MIN;
+
+		if (len > num_dwords)
+			return -EINVAL;
+
+		switch (key) {
+		case MIGRATION_KLV_DEVICE_DEVID_KEY:
+			if (*klvs != xe->info.devid) {
+				xe_sriov_warn(xe,
+					      "Aborting migration, devid mismatch %#06x!=%#06x\n",
+					      *klvs, xe->info.devid);
+				return -ENODEV;
+			}
+			break;
+		case MIGRATION_KLV_DEVICE_REVID_KEY:
+			if (*klvs != xe->info.revid) {
+				xe_sriov_warn(xe,
+					      "Aborting migration, revid mismatch %#06x!=%#06x\n",
+					      *klvs, xe->info.revid);
+				return -ENODEV;
+			}
+			break;
+		default:
+			xe_sriov_dbg(xe,
+				     "Skipping unknown migration descriptor key %#06x (len=%#06x)\n",
+				     key, len);
+			print_hex_dump_bytes("desc: ", DUMP_PREFIX_OFFSET, klvs,
+					     min(SZ_64, len * sizeof(u32)));
+			break;
+		}
+
+		klvs += len;
+		num_dwords -= len;
+	}
+
+	return 0;
+}
+
 static void pf_pending_init(struct xe_device *xe, unsigned int vfid)
 {
 	struct xe_sriov_migration_data **data = pf_pick_pending(xe, vfid);
diff --git a/drivers/gpu/drm/xe/xe_sriov_migration_data.h b/drivers/gpu/drm/xe/xe_sriov_migration_data.h
index 7ec489c3f28d2..bb4ea5850e5c0 100644
--- a/drivers/gpu/drm/xe/xe_sriov_migration_data.h
+++ b/drivers/gpu/drm/xe/xe_sriov_migration_data.h
@@ -30,6 +30,8 @@ ssize_t xe_sriov_migration_data_read(struct xe_device *xe, unsigned int vfid,
 				     char __user *buf, size_t len);
 ssize_t xe_sriov_migration_data_write(struct xe_device *xe, unsigned int vfid,
 				      const char __user *buf, size_t len);
+int xe_sriov_migration_data_process_descriptor(struct xe_device *xe, unsigned int vfid,
+					       struct xe_sriov_migration_data *data);
 int xe_sriov_migration_data_save_init(struct xe_device *xe, unsigned int vfid);
 
 #endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
index 8ea531d36f53b..f0a0c2b027a20 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
@@ -175,9 +175,15 @@ xe_sriov_pf_migration_save_consume(struct xe_device *xe, unsigned int vfid)
 static int pf_handle_descriptor(struct xe_device *xe, unsigned int vfid,
 				struct xe_sriov_migration_data *data)
 {
+	int ret;
+
 	if (data->tile != 0 || data->gt != 0)
 		return -EINVAL;
 
+	ret = xe_sriov_migration_data_process_descriptor(xe, vfid, data);
+	if (ret)
+		return ret;
+
 	xe_sriov_migration_data_free(data);
 
 	return 0;
-- 
2.50.1

Re: [PATCH v3 08/28] drm/xe/pf: Add minimalistic migration descriptor
Posted by Michal Wajdeczko 3 months, 1 week ago

On 10/30/2025 9:31 PM, Michał Winiarski wrote:
> The descriptor reuses the KLV format used by GuC and contains metadata
> that can be used to quickly fail migration when source is incompatible
> with destination.
> 
> Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
> ---
>  drivers/gpu/drm/xe/xe_sriov_migration_data.c | 89 +++++++++++++++++++-
>  drivers/gpu/drm/xe/xe_sriov_migration_data.h |  2 +
>  drivers/gpu/drm/xe/xe_sriov_pf_migration.c   |  6 ++
>  3 files changed, 96 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_sriov_migration_data.c b/drivers/gpu/drm/xe/xe_sriov_migration_data.c
> index a3f50836adc81..18e17706772fc 100644
> --- a/drivers/gpu/drm/xe/xe_sriov_migration_data.c
> +++ b/drivers/gpu/drm/xe/xe_sriov_migration_data.c
> @@ -5,6 +5,7 @@
>  
>  #include "xe_bo.h"
>  #include "xe_device.h"
> +#include "xe_guc_klv_helpers.h"
>  #include "xe_sriov_migration_data.h"
>  #include "xe_sriov_pf_helpers.h"
>  #include "xe_sriov_pf_migration.h"
> @@ -383,11 +384,19 @@ ssize_t xe_sriov_migration_data_write(struct xe_device *xe, unsigned int vfid,
>  	return produced;
>  }
>  
> -#define MIGRATION_DESCRIPTOR_DWORDS 0
> +#define MIGRATION_KLV_DEVICE_DEVID_KEY	0xf001u
> +#define MIGRATION_KLV_DEVICE_DEVID_LEN	1u
> +#define MIGRATION_KLV_DEVICE_REVID_KEY	0xf002u
> +#define MIGRATION_KLV_DEVICE_REVID_LEN	1u
> +
> +#define MIGRATION_DESCRIPTOR_DWORDS	(GUC_KLV_LEN_MIN + MIGRATION_KLV_DEVICE_DEVID_LEN + \
> +					 GUC_KLV_LEN_MIN + MIGRATION_KLV_DEVICE_REVID_LEN)
>  static size_t pf_descriptor_init(struct xe_device *xe, unsigned int vfid)
>  {
>  	struct xe_sriov_migration_data **desc = pf_pick_descriptor(xe, vfid);
>  	struct xe_sriov_migration_data *data;
> +	unsigned int len = 0;
> +	u32 *klvs;
>  	int ret;
>  
>  	data = xe_sriov_migration_data_alloc(xe);
> @@ -401,11 +410,89 @@ static size_t pf_descriptor_init(struct xe_device *xe, unsigned int vfid)
>  		return ret;
>  	}
>  
> +	klvs = data->vaddr;
> +	klvs[len++] = PREP_GUC_KLV_CONST(MIGRATION_KLV_DEVICE_DEVID_KEY,
> +					 MIGRATION_KLV_DEVICE_DEVID_LEN);
> +	klvs[len++] = xe->info.devid;
> +	klvs[len++] = PREP_GUC_KLV_CONST(MIGRATION_KLV_DEVICE_REVID_KEY,
> +					 MIGRATION_KLV_DEVICE_REVID_LEN);
> +	klvs[len++] = xe->info.revid;
> +
> +	xe_assert(xe, len == MIGRATION_DESCRIPTOR_DWORDS);
> +
>  	*desc = data;
>  
>  	return 0;
>  }
>  
> +/**
> + * xe_sriov_migration_data_process_descriptor() - Process migration data descriptor.
> + * @xe: the &xe_device
> + * @vfid: the VF identifier
> + * @data: the &struct xe_sriov_pf_migration_data containing the descriptor
> + *
> + * The descriptor uses the same KLV format as GuC, and contains metadata used for
> + * checking migration data compatibility.
> + *
> + * Return: 0 on success, -errno on failure.
> + */
> +int xe_sriov_migration_data_process_descriptor(struct xe_device *xe, unsigned int vfid,
> +					       struct xe_sriov_migration_data *data)
> +{
> +	u32 num_dwords = data->size / sizeof(u32);
> +	u32 *klvs = data->vaddr;
> +
> +	xe_assert(xe, data->type == XE_SRIOV_MIGRATION_DATA_TYPE_DESCRIPTOR);
> +
> +	if (data->size % sizeof(u32)) {
> +		xe_sriov_warn(xe, "Aborting migration, descriptor not in KLV format (size=%llu)\n",
> +			      data->size);
> +		return -EINVAL;
> +	}
> +
> +	while (num_dwords >= GUC_KLV_LEN_MIN) {
> +		u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]);
> +		u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]);
> +
> +		klvs += GUC_KLV_LEN_MIN;
> +		num_dwords -= GUC_KLV_LEN_MIN;
> +
> +		if (len > num_dwords)

nit:
			xe_sriov_warn(xe, "Aborting migration, truncated KLV %#x, len %u\n",


> +			return -EINVAL;
> +
> +		switch (key) {
> +		case MIGRATION_KLV_DEVICE_DEVID_KEY:
> +			if (*klvs != xe->info.devid) {
> +				xe_sriov_warn(xe,
> +					      "Aborting migration, devid mismatch %#06x!=%#06x\n",
> +					      *klvs, xe->info.devid);
> +				return -ENODEV;
> +			}
> +			break;
> +		case MIGRATION_KLV_DEVICE_REVID_KEY:
> +			if (*klvs != xe->info.revid) {
> +				xe_sriov_warn(xe,
> +					      "Aborting migration, revid mismatch %#06x!=%#06x\n",
> +					      *klvs, xe->info.revid);
> +				return -ENODEV;
> +			}
> +			break;
> +		default:
> +			xe_sriov_dbg(xe,
> +				     "Skipping unknown migration descriptor key %#06x (len=%#06x)\n",
> +				     key, len);
> +			print_hex_dump_bytes("desc: ", DUMP_PREFIX_OFFSET, klvs,
> +					     min(SZ_64, len * sizeof(u32)));
> +			break;
> +		}
> +
> +		klvs += len;
> +		num_dwords -= len;
> +	}
> +
> +	return 0;
> +}
> +
>  static void pf_pending_init(struct xe_device *xe, unsigned int vfid)
>  {
>  	struct xe_sriov_migration_data **data = pf_pick_pending(xe, vfid);
> diff --git a/drivers/gpu/drm/xe/xe_sriov_migration_data.h b/drivers/gpu/drm/xe/xe_sriov_migration_data.h
> index 7ec489c3f28d2..bb4ea5850e5c0 100644
> --- a/drivers/gpu/drm/xe/xe_sriov_migration_data.h
> +++ b/drivers/gpu/drm/xe/xe_sriov_migration_data.h
> @@ -30,6 +30,8 @@ ssize_t xe_sriov_migration_data_read(struct xe_device *xe, unsigned int vfid,
>  				     char __user *buf, size_t len);
>  ssize_t xe_sriov_migration_data_write(struct xe_device *xe, unsigned int vfid,
>  				      const char __user *buf, size_t len);
> +int xe_sriov_migration_data_process_descriptor(struct xe_device *xe, unsigned int vfid,
> +					       struct xe_sriov_migration_data *data);
>  int xe_sriov_migration_data_save_init(struct xe_device *xe, unsigned int vfid);
>  
>  #endif
> diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
> index 8ea531d36f53b..f0a0c2b027a20 100644
> --- a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
> +++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
> @@ -175,9 +175,15 @@ xe_sriov_pf_migration_save_consume(struct xe_device *xe, unsigned int vfid)
>  static int pf_handle_descriptor(struct xe_device *xe, unsigned int vfid,
>  				struct xe_sriov_migration_data *data)
>  {
> +	int ret;
> +
>  	if (data->tile != 0 || data->gt != 0)
>  		return -EINVAL;
>  
> +	ret = xe_sriov_migration_data_process_descriptor(xe, vfid, data);
> +	if (ret)
> +		return ret;
> +
>  	xe_sriov_migration_data_free(data);
>  
>  	return 0;

just nit, so

Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>