From: Weili Qian <qianweili@huawei.com>
If device error occurs during live migration, qemu will
reset the VF. At this time, VF reset and device reset are performed
simultaneously. The VF reset will timeout. Therefore, the QM_RESETTING
flag is used to ensure that VF reset and device reset are performed
serially.
Fixes: b0eed085903e ("hisi_acc_vfio_pci: Add support for VFIO live migration")
Signed-off-by: Weili Qian <qianweili@huawei.com>
---
.../vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 24 +++++++++++++++++++
.../vfio/pci/hisilicon/hisi_acc_vfio_pci.h | 2 ++
2 files changed, 26 insertions(+)
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
index fe2ffcd00d6e..d55365b21f78 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
@@ -1188,14 +1188,37 @@ hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev,
return 0;
}
+static void hisi_acc_vf_pci_reset_prepare(struct pci_dev *pdev)
+{
+ struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
+ struct hisi_qm *qm = hisi_acc_vdev->pf_qm;
+ struct device *dev = &qm->pdev->dev;
+ u32 delay = 0;
+
+ /* All reset requests need to be queued for processing */
+ while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
+ msleep(1);
+ if (++delay > QM_RESET_WAIT_TIMEOUT) {
+ dev_err(dev, "reset prepare failed\n");
+ return;
+ }
+ }
+
+ hisi_acc_vdev->set_reset_flag = true;
+}
+
static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
{
struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
+ struct hisi_qm *qm = hisi_acc_vdev->pf_qm;
if (hisi_acc_vdev->core_device.vdev.migration_flags !=
VFIO_MIGRATION_STOP_COPY)
return;
+ if (hisi_acc_vdev->set_reset_flag)
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
+
mutex_lock(&hisi_acc_vdev->state_mutex);
hisi_acc_vf_reset(hisi_acc_vdev);
mutex_unlock(&hisi_acc_vdev->state_mutex);
@@ -1746,6 +1769,7 @@ static const struct pci_device_id hisi_acc_vfio_pci_table[] = {
MODULE_DEVICE_TABLE(pci, hisi_acc_vfio_pci_table);
static const struct pci_error_handlers hisi_acc_vf_err_handlers = {
+ .reset_prepare = hisi_acc_vf_pci_reset_prepare,
.reset_done = hisi_acc_vf_pci_aer_reset_done,
.error_detected = vfio_pci_core_aer_err_detected,
};
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
index cd55eba64dfb..a3d91a31e3d8 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
@@ -27,6 +27,7 @@
#define ERROR_CHECK_TIMEOUT 100
#define CHECK_DELAY_TIME 100
+#define QM_RESET_WAIT_TIMEOUT 60000
#define QM_SQC_VFT_BASE_SHIFT_V2 28
#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
@@ -128,6 +129,7 @@ struct hisi_acc_vf_migration_file {
struct hisi_acc_vf_core_device {
struct vfio_pci_core_device core_device;
u8 match_done;
+ bool set_reset_flag;
/*
* io_base is only valid when dev_opened is true,
* which is protected by open_mutex.
--
2.24.0
On Sun, 4 Jan 2026 15:07:03 +0800
Longfang Liu <liulongfang@huawei.com> wrote:
> From: Weili Qian <qianweili@huawei.com>
>
> If device error occurs during live migration, qemu will
> reset the VF. At this time, VF reset and device reset are performed
> simultaneously. The VF reset will timeout. Therefore, the QM_RESETTING
> flag is used to ensure that VF reset and device reset are performed
> serially.
>
> Fixes: b0eed085903e ("hisi_acc_vfio_pci: Add support for VFIO live migration")
> Signed-off-by: Weili Qian <qianweili@huawei.com>
> ---
> .../vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 24 +++++++++++++++++++
> .../vfio/pci/hisilicon/hisi_acc_vfio_pci.h | 2 ++
> 2 files changed, 26 insertions(+)
>
> diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
> index fe2ffcd00d6e..d55365b21f78 100644
> --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
> +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
> @@ -1188,14 +1188,37 @@ hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev,
> return 0;
> }
>
> +static void hisi_acc_vf_pci_reset_prepare(struct pci_dev *pdev)
> +{
> + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
> + struct hisi_qm *qm = hisi_acc_vdev->pf_qm;
> + struct device *dev = &qm->pdev->dev;
> + u32 delay = 0;
> +
> + /* All reset requests need to be queued for processing */
> + while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
> + msleep(1);
> + if (++delay > QM_RESET_WAIT_TIMEOUT) {
> + dev_err(dev, "reset prepare failed\n");
> + return;
> + }
> + }
> +
> + hisi_acc_vdev->set_reset_flag = true;
> +}
> +
> static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
> {
> struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
> + struct hisi_qm *qm = hisi_acc_vdev->pf_qm;
>
> if (hisi_acc_vdev->core_device.vdev.migration_flags !=
> VFIO_MIGRATION_STOP_COPY)
> return;
>
> + if (hisi_acc_vdev->set_reset_flag)
> + clear_bit(QM_RESETTING, &qm->misc_ctl);
.reset_prepare sets QM_RESETTING unconditionally, .reset_done clears
QM_RESETTING conditionally based on the migration state. In 2/ this
becomes conditional on the device supporting migration ops. Doesn't
this enable a scenario where a device that does not support migration
puts QM_RESETTING into an inconsistent state that is never cleared?
Should the clear_bit() occur before the migration state/capability
check?
Thanks,
Alex
> +
> mutex_lock(&hisi_acc_vdev->state_mutex);
> hisi_acc_vf_reset(hisi_acc_vdev);
> mutex_unlock(&hisi_acc_vdev->state_mutex);
> @@ -1746,6 +1769,7 @@ static const struct pci_device_id hisi_acc_vfio_pci_table[] = {
> MODULE_DEVICE_TABLE(pci, hisi_acc_vfio_pci_table);
>
> static const struct pci_error_handlers hisi_acc_vf_err_handlers = {
> + .reset_prepare = hisi_acc_vf_pci_reset_prepare,
> .reset_done = hisi_acc_vf_pci_aer_reset_done,
> .error_detected = vfio_pci_core_aer_err_detected,
> };
> diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
> index cd55eba64dfb..a3d91a31e3d8 100644
> --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
> +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
> @@ -27,6 +27,7 @@
>
> #define ERROR_CHECK_TIMEOUT 100
> #define CHECK_DELAY_TIME 100
> +#define QM_RESET_WAIT_TIMEOUT 60000
>
> #define QM_SQC_VFT_BASE_SHIFT_V2 28
> #define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
> @@ -128,6 +129,7 @@ struct hisi_acc_vf_migration_file {
> struct hisi_acc_vf_core_device {
> struct vfio_pci_core_device core_device;
> u8 match_done;
> + bool set_reset_flag;
> /*
> * io_base is only valid when dev_opened is true,
> * which is protected by open_mutex.
On 2026/1/17 0:47, Alex Williamson wrote:
> On Sun, 4 Jan 2026 15:07:03 +0800
> Longfang Liu <liulongfang@huawei.com> wrote:
>
>> From: Weili Qian <qianweili@huawei.com>
>>
>> If device error occurs during live migration, qemu will
>> reset the VF. At this time, VF reset and device reset are performed
>> simultaneously. The VF reset will timeout. Therefore, the QM_RESETTING
>> flag is used to ensure that VF reset and device reset are performed
>> serially.
>>
>> Fixes: b0eed085903e ("hisi_acc_vfio_pci: Add support for VFIO live migration")
>> Signed-off-by: Weili Qian <qianweili@huawei.com>
>> ---
>> .../vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 24 +++++++++++++++++++
>> .../vfio/pci/hisilicon/hisi_acc_vfio_pci.h | 2 ++
>> 2 files changed, 26 insertions(+)
>>
>> diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
>> index fe2ffcd00d6e..d55365b21f78 100644
>> --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
>> +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
>> @@ -1188,14 +1188,37 @@ hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev,
>> return 0;
>> }
>>
>> +static void hisi_acc_vf_pci_reset_prepare(struct pci_dev *pdev)
>> +{
>> + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
>> + struct hisi_qm *qm = hisi_acc_vdev->pf_qm;
>> + struct device *dev = &qm->pdev->dev;
>> + u32 delay = 0;
>> +
>> + /* All reset requests need to be queued for processing */
>> + while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
>> + msleep(1);
>> + if (++delay > QM_RESET_WAIT_TIMEOUT) {
>> + dev_err(dev, "reset prepare failed\n");
>> + return;
>> + }
>> + }
>> +
>> + hisi_acc_vdev->set_reset_flag = true;
>> +}
>> +
>> static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
>> {
>> struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
>> + struct hisi_qm *qm = hisi_acc_vdev->pf_qm;
>>
>> if (hisi_acc_vdev->core_device.vdev.migration_flags !=
>> VFIO_MIGRATION_STOP_COPY)
>> return;
>>
>> + if (hisi_acc_vdev->set_reset_flag)
>> + clear_bit(QM_RESETTING, &qm->misc_ctl);
>
>
> .reset_prepare sets QM_RESETTING unconditionally, .reset_done clears
> QM_RESETTING conditionally based on the migration state. In 2/ this
> becomes conditional on the device supporting migration ops. Doesn't
> this enable a scenario where a device that does not support migration
> puts QM_RESETTING into an inconsistent state that is never cleared?
> Should the clear_bit() occur before the migration state/capability
> check?
>
Yes, it makes more sense to move clear_bit() before the migration state
or capability check.
Thanks,
Longfang.
> Thanks,
> Alex
>
>> +
>> mutex_lock(&hisi_acc_vdev->state_mutex);
>> hisi_acc_vf_reset(hisi_acc_vdev);
>> mutex_unlock(&hisi_acc_vdev->state_mutex);
>> @@ -1746,6 +1769,7 @@ static const struct pci_device_id hisi_acc_vfio_pci_table[] = {
>> MODULE_DEVICE_TABLE(pci, hisi_acc_vfio_pci_table);
>>
>> static const struct pci_error_handlers hisi_acc_vf_err_handlers = {
>> + .reset_prepare = hisi_acc_vf_pci_reset_prepare,
>> .reset_done = hisi_acc_vf_pci_aer_reset_done,
>> .error_detected = vfio_pci_core_aer_err_detected,
>> };
>> diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
>> index cd55eba64dfb..a3d91a31e3d8 100644
>> --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
>> +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
>> @@ -27,6 +27,7 @@
>>
>> #define ERROR_CHECK_TIMEOUT 100
>> #define CHECK_DELAY_TIME 100
>> +#define QM_RESET_WAIT_TIMEOUT 60000
>>
>> #define QM_SQC_VFT_BASE_SHIFT_V2 28
>> #define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
>> @@ -128,6 +129,7 @@ struct hisi_acc_vf_migration_file {
>> struct hisi_acc_vf_core_device {
>> struct vfio_pci_core_device core_device;
>> u8 match_done;
>> + bool set_reset_flag;
>> /*
>> * io_base is only valid when dev_opened is true,
>> * which is protected by open_mutex.
>
> .
>
© 2016 - 2026 Red Hat, Inc.