Make use of the new mmap_prepare functionality to perform an I/O remap in
favour of the deprecated f_op->mmap hook, hooking the success path to
correctly update the users refcount.
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
---
drivers/iommu/iommufd/main.c | 47 ++++++++++++++++++++----------------
1 file changed, 26 insertions(+), 21 deletions(-)
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index 15af7ced0501..b8b9c0e7520d 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -535,46 +535,51 @@ static const struct vm_operations_struct iommufd_vma_ops = {
.close = iommufd_fops_vma_close,
};
+static int iommufd_fops_mmap_success(const struct vm_area_struct *vma)
+{
+ struct iommufd_mmap *immap = vma->vm_private_data;
+
+ /* vm_ops.open won't be called for mmap itself. */
+ refcount_inc(&immap->owner->users);
+
+ return 0;
+}
+
/* The vm_pgoff must be pre-allocated from mt_mmap, and given to user space */
-static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
+static int iommufd_fops_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *filp = desc->file;
struct iommufd_ctx *ictx = filp->private_data;
- size_t length = vma->vm_end - vma->vm_start;
+ const size_t length = vma_desc_size(desc);
struct iommufd_mmap *immap;
- int rc;
if (!PAGE_ALIGNED(length))
return -EINVAL;
- if (!(vma->vm_flags & VM_SHARED))
+ if (!(desc->vm_flags & VM_SHARED))
return -EINVAL;
- if (vma->vm_flags & VM_EXEC)
+ if (desc->vm_flags & VM_EXEC)
return -EPERM;
- /* vma->vm_pgoff carries a page-shifted start position to an immap */
- immap = mtree_load(&ictx->mt_mmap, vma->vm_pgoff << PAGE_SHIFT);
+ /* desc->pgoff carries a page-shifted start position to an immap */
+ immap = mtree_load(&ictx->mt_mmap, desc->pgoff << PAGE_SHIFT);
if (!immap)
return -ENXIO;
/*
* mtree_load() returns the immap for any contained mmio_addr, so only
* allow the exact immap thing to be mapped
*/
- if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length)
+ if (desc->pgoff != immap->vm_pgoff || length != immap->length)
return -ENXIO;
- vma->vm_pgoff = 0;
- vma->vm_private_data = immap;
- vma->vm_ops = &iommufd_vma_ops;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ desc->pgoff = 0;
+ desc->private_data = immap;
+ desc->vm_ops = &iommufd_vma_ops;
+ desc->page_prot = pgprot_noncached(desc->page_prot);
- rc = io_remap_pfn_range(vma, vma->vm_start,
- immap->mmio_addr >> PAGE_SHIFT, length,
- vma->vm_page_prot);
- if (rc)
- return rc;
+ mmap_action_ioremap_full(desc, immap->mmio_addr >> PAGE_SHIFT);
+ desc->action.success_hook = iommufd_fops_mmap_success;
- /* vm_ops.open won't be called for mmap itself. */
- refcount_inc(&immap->owner->users);
- return rc;
+ return 0;
}
static const struct file_operations iommufd_fops = {
@@ -582,7 +587,7 @@ static const struct file_operations iommufd_fops = {
.open = iommufd_fops_open,
.release = iommufd_fops_release,
.unlocked_ioctl = iommufd_fops_ioctl,
- .mmap = iommufd_fops_mmap,
+ .mmap_prepare = iommufd_fops_mmap_prepare,
};
/**
--
2.51.0
On Tue, Sep 16, 2025 at 03:11:59PM +0100, Lorenzo Stoakes wrote:
> -static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
> +static int iommufd_fops_mmap_prepare(struct vm_area_desc *desc)
> {
> + struct file *filp = desc->file;
> struct iommufd_ctx *ictx = filp->private_data;
> - size_t length = vma->vm_end - vma->vm_start;
> + const size_t length = vma_desc_size(desc);
> struct iommufd_mmap *immap;
> - int rc;
>
> if (!PAGE_ALIGNED(length))
> return -EINVAL;
This is for sure redundant? Ie vma_desc_size() is always page
multiples? Lets drop it
> - if (!(vma->vm_flags & VM_SHARED))
> + if (!(desc->vm_flags & VM_SHARED))
> return -EINVAL;
This should be that no COW helper David found
> - /* vma->vm_pgoff carries a page-shifted start position to an immap */
> - immap = mtree_load(&ictx->mt_mmap, vma->vm_pgoff << PAGE_SHIFT);
> + /* desc->pgoff carries a page-shifted start position to an immap */
> + immap = mtree_load(&ictx->mt_mmap, desc->pgoff << PAGE_SHIFT);
> if (!immap)
> return -ENXIO;
> /*
> * mtree_load() returns the immap for any contained mmio_addr, so only
> * allow the exact immap thing to be mapped
> */
> - if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length)
> + if (desc->pgoff != immap->vm_pgoff || length != immap->length)
> return -ENXIO;
>
> - vma->vm_pgoff = 0;
I think this is an existing bug, I must have missed it when I reviewed
this. If we drop it then the vma will naturally get pgoff right?
> - vma->vm_private_data = immap;
> - vma->vm_ops = &iommufd_vma_ops;
> - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
> + desc->pgoff = 0;
> + desc->private_data = immap;
> + desc->vm_ops = &iommufd_vma_ops;
> + desc->page_prot = pgprot_noncached(desc->page_prot);
>
> - rc = io_remap_pfn_range(vma, vma->vm_start,
> - immap->mmio_addr >> PAGE_SHIFT, length,
> - vma->vm_page_prot);
> - if (rc)
> - return rc;
> + mmap_action_ioremap_full(desc, immap->mmio_addr >> PAGE_SHIFT);
> + desc->action.success_hook = iommufd_fops_mmap_success;
>
> - /* vm_ops.open won't be called for mmap itself. */
> - refcount_inc(&immap->owner->users);
Ooh this is racey existing bug, I'm going to send a patch for it
right now.. So success_hook won't work here.
@@ -551,15 +551,24 @@ static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
return -EPERM;
/* vma->vm_pgoff carries a page-shifted start position to an immap */
+ mtree_lock(&ictx->mt_mmap);
immap = mtree_load(&ictx->mt_mmap, vma->vm_pgoff << PAGE_SHIFT);
- if (!immap)
+ if (!immap) {
+ mtree_unlock(&ictx->mt_mmap);
return -ENXIO;
+ }
+ /* vm_ops.open won't be called for mmap itself. */
+ refcount_inc(&immap->owner->users);
+ mtree_unlock(&ictx->mt_mmap);
+
/*
* mtree_load() returns the immap for any contained mmio_addr, so only
* allow the exact immap thing to be mapped
*/
- if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length)
- return -ENXIO;
+ if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length) {
+ rc = -ENXIO;
+ goto err_refcount;
+ }
vma->vm_pgoff = 0;
vma->vm_private_data = immap;
@@ -570,10 +579,11 @@ static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
immap->mmio_addr >> PAGE_SHIFT, length,
vma->vm_page_prot);
if (rc)
- return rc;
+ goto err_refcount;
+ return 0;
- /* vm_ops.open won't be called for mmap itself. */
- refcount_inc(&immap->owner->users);
+err_refcount:
+ refcount_dec(&immap->owner->users);
return rc;
}
Andrew - Jason has sent a conflicting patch against this file so it's not reasonable to include it in this series any more, please drop it. Sigh. Thanks, Lorenzo
On Tue, 16 Sep 2025 17:23:31 +0100 Lorenzo Stoakes <lorenzo.stoakes@oracle.com> wrote: > Andrew - Jason has sent a conflicting patch against this file so it's not > reasonable to include it in this series any more, please drop it. No probs. All added to mm-new, thanks. emails suppressed due to mercy.
On Tue, Sep 16, 2025 at 06:32:53PM -0700, Andrew Morton wrote: > On Tue, 16 Sep 2025 17:23:31 +0100 Lorenzo Stoakes <lorenzo.stoakes@oracle.com> wrote: > > > Andrew - Jason has sent a conflicting patch against this file so it's not > > reasonable to include it in this series any more, please drop it. > > No probs. > > All added to mm-new, thanks. emails suppressed due to mercy. Thanks, should have a new respin based on Jason's feedback today (with copious tags everywhere other than the bits I need to fixup so we should hopefully have this finalised very soon). Cheers, Lorenzo
© 2016 - 2026 Red Hat, Inc.