Currently we support device and iommu dirty tracking, device dirty tracking
is preferred.
Add the framework code in iommufd_cdev_unmap() to choose either device or
iommu dirty tracking, just like vfio_legacy_dma_unmap_one().
Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
Reviewed-by: Cédric Le Goater <clg@redhat.com>
Reviewed-by: Yi Liu <yi.l.liu@intel.com>
Tested-by: Xudong Hao <xudong.hao@intel.com>
Tested-by: Giovannio Cabiddu <giovanni.cabiddu@intel.com>
Tested-by: Rohith S R <rohith.s.r@intel.com>
---
hw/vfio/iommufd.c | 34 +++++++++++++++++++++++++++++++---
1 file changed, 31 insertions(+), 3 deletions(-)
diff --git a/hw/vfio/iommufd.c b/hw/vfio/iommufd.c
index bb5775aa71..806ca6ef14 100644
--- a/hw/vfio/iommufd.c
+++ b/hw/vfio/iommufd.c
@@ -61,14 +61,42 @@ static int iommufd_cdev_unmap(const VFIOContainer *bcontainer,
IOMMUTLBEntry *iotlb, bool unmap_all)
{
const VFIOIOMMUFDContainer *container = VFIO_IOMMU_IOMMUFD(bcontainer);
+ IOMMUFDBackend *be = container->be;
+ uint32_t ioas_id = container->ioas_id;
+ bool need_dirty_sync = false;
+ Error *local_err = NULL;
+ int ret;
if (unmap_all) {
size = UINT64_MAX;
}
- /* TODO: Handle dma_unmap_bitmap with iotlb args (migration) */
- return iommufd_backend_unmap_dma(container->be,
- container->ioas_id, iova, size);
+ if (iotlb && vfio_container_dirty_tracking_is_started(bcontainer)) {
+ if (!vfio_container_devices_dirty_tracking_is_supported(bcontainer) &&
+ bcontainer->dirty_pages_supported) {
+ /* TODO: query dirty bitmap before DMA unmap */
+ return iommufd_backend_unmap_dma(be, ioas_id, iova, size);
+ }
+
+ need_dirty_sync = true;
+ }
+
+ ret = iommufd_backend_unmap_dma(be, ioas_id, iova, size);
+ if (ret) {
+ return ret;
+ }
+
+ if (need_dirty_sync) {
+ ret = vfio_container_query_dirty_bitmap(bcontainer, iova, size,
+ iotlb->translated_addr,
+ &local_err);
+ if (ret) {
+ error_report_err(local_err);
+ return ret;
+ }
+ }
+
+ return 0;
}
static bool iommufd_cdev_kvm_device_add(VFIODevice *vbasedev, Error **errp)
--
2.47.1