From: Mukesh Rathor <mrathor@linux.microsoft.com>
VFIO no longer puts the mmio pfn in vma->vm_pgoff. So, remove code
that is using it to map mmio space. It is broken and will cause
panic.
Signed-off-by: Mukesh Rathor <mrathor@linux.microsoft.com>
---
drivers/hv/mshv_root_main.c | 20 ++++----------------
1 file changed, 4 insertions(+), 16 deletions(-)
diff --git a/drivers/hv/mshv_root_main.c b/drivers/hv/mshv_root_main.c
index 27313419828d..03f3aa9f5541 100644
--- a/drivers/hv/mshv_root_main.c
+++ b/drivers/hv/mshv_root_main.c
@@ -1258,16 +1258,8 @@ static int mshv_prepare_pinned_region(struct mshv_mem_region *region)
}
/*
- * This maps two things: guest RAM and for pci passthru mmio space.
- *
- * mmio:
- * - vfio overloads vm_pgoff to store the mmio start pfn/spa.
- * - Two things need to happen for mapping mmio range:
- * 1. mapped in the uaddr so VMM can access it.
- * 2. mapped in the hwpt (gfn <-> mmio phys addr) so guest can access it.
- *
- * This function takes care of the second. The first one is managed by vfio,
- * and hence is taken care of via vfio_pci_mmap_fault().
+ * This is called for both user ram and mmio space. The mmio space is not
+ * mapped here, but later during intercept.
*/
static long
mshv_map_user_memory(struct mshv_partition *partition,
@@ -1276,7 +1268,6 @@ mshv_map_user_memory(struct mshv_partition *partition,
struct mshv_mem_region *region;
struct vm_area_struct *vma;
bool is_mmio;
- ulong mmio_pfn;
long ret;
if (mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP) ||
@@ -1286,7 +1277,6 @@ mshv_map_user_memory(struct mshv_partition *partition,
mmap_read_lock(current->mm);
vma = vma_lookup(current->mm, mem.userspace_addr);
is_mmio = vma ? !!(vma->vm_flags & (VM_IO | VM_PFNMAP)) : 0;
- mmio_pfn = is_mmio ? vma->vm_pgoff : 0;
mmap_read_unlock(current->mm);
if (!vma)
@@ -1313,10 +1303,8 @@ mshv_map_user_memory(struct mshv_partition *partition,
HV_MAP_GPA_NO_ACCESS, NULL);
break;
case MSHV_REGION_TYPE_MMIO:
- ret = hv_call_map_mmio_pages(partition->pt_id,
- region->start_gfn,
- mmio_pfn,
- region->nr_pages);
+ /* mmio mappings are handled later during intercepts */
+ ret = 0;
break;
}
--
2.51.2.vfs.0.1
On 1/19/2026 10:42 PM, Mukesh R wrote: > From: Mukesh Rathor <mrathor@linux.microsoft.com> > > VFIO no longer puts the mmio pfn in vma->vm_pgoff. So, remove code > that is using it to map mmio space. It is broken and will cause > panic. What is the reason for having this as a separate commit from patch 15? It seems like removing this code and adding the mmio intercept handling could be done in one patch. > > Signed-off-by: Mukesh Rathor <mrathor@linux.microsoft.com> > --- > drivers/hv/mshv_root_main.c | 20 ++++---------------- > 1 file changed, 4 insertions(+), 16 deletions(-) > > diff --git a/drivers/hv/mshv_root_main.c b/drivers/hv/mshv_root_main.c > index 27313419828d..03f3aa9f5541 100644 > --- a/drivers/hv/mshv_root_main.c > +++ b/drivers/hv/mshv_root_main.c > @@ -1258,16 +1258,8 @@ static int mshv_prepare_pinned_region(struct mshv_mem_region *region) > } > > /* > - * This maps two things: guest RAM and for pci passthru mmio space. > - * > - * mmio: > - * - vfio overloads vm_pgoff to store the mmio start pfn/spa. > - * - Two things need to happen for mapping mmio range: > - * 1. mapped in the uaddr so VMM can access it. > - * 2. mapped in the hwpt (gfn <-> mmio phys addr) so guest can access it. > - * > - * This function takes care of the second. The first one is managed by vfio, > - * and hence is taken care of via vfio_pci_mmap_fault(). > + * This is called for both user ram and mmio space. The mmio space is not > + * mapped here, but later during intercept. > */ > static long > mshv_map_user_memory(struct mshv_partition *partition, > @@ -1276,7 +1268,6 @@ mshv_map_user_memory(struct mshv_partition *partition, > struct mshv_mem_region *region; > struct vm_area_struct *vma; > bool is_mmio; > - ulong mmio_pfn; > long ret; > > if (mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP) || > @@ -1286,7 +1277,6 @@ mshv_map_user_memory(struct mshv_partition *partition, > mmap_read_lock(current->mm); > vma = vma_lookup(current->mm, mem.userspace_addr); > is_mmio = vma ? !!(vma->vm_flags & (VM_IO | VM_PFNMAP)) : 0; > - mmio_pfn = is_mmio ? vma->vm_pgoff : 0; > mmap_read_unlock(current->mm); > > if (!vma) > @@ -1313,10 +1303,8 @@ mshv_map_user_memory(struct mshv_partition *partition, > HV_MAP_GPA_NO_ACCESS, NULL); > break; > case MSHV_REGION_TYPE_MMIO: > - ret = hv_call_map_mmio_pages(partition->pt_id, > - region->start_gfn, > - mmio_pfn, > - region->nr_pages); > + /* mmio mappings are handled later during intercepts */ > + ret = 0; > break; > } >
On 1/23/26 10:34, Nuno Das Neves wrote: > On 1/19/2026 10:42 PM, Mukesh R wrote: >> From: Mukesh Rathor <mrathor@linux.microsoft.com> >> >> VFIO no longer puts the mmio pfn in vma->vm_pgoff. So, remove code >> that is using it to map mmio space. It is broken and will cause >> panic. > > What is the reason for having this as a separate commit from patch 15? > It seems like removing this code and adding the mmio intercept > handling could be done in one patch. Just ease of review and porting patches from this branch to that branch to that release to this release... I am sure someone would have asked for this to be a separate patch :). Thanks, -Mukesh >> >> Signed-off-by: Mukesh Rathor <mrathor@linux.microsoft.com> >> --- >> drivers/hv/mshv_root_main.c | 20 ++++---------------- >> 1 file changed, 4 insertions(+), 16 deletions(-) >> >> diff --git a/drivers/hv/mshv_root_main.c b/drivers/hv/mshv_root_main.c >> index 27313419828d..03f3aa9f5541 100644 >> --- a/drivers/hv/mshv_root_main.c >> +++ b/drivers/hv/mshv_root_main.c >> @@ -1258,16 +1258,8 @@ static int mshv_prepare_pinned_region(struct mshv_mem_region *region) >> } >> >> /* >> - * This maps two things: guest RAM and for pci passthru mmio space. >> - * >> - * mmio: >> - * - vfio overloads vm_pgoff to store the mmio start pfn/spa. >> - * - Two things need to happen for mapping mmio range: >> - * 1. mapped in the uaddr so VMM can access it. >> - * 2. mapped in the hwpt (gfn <-> mmio phys addr) so guest can access it. >> - * >> - * This function takes care of the second. The first one is managed by vfio, >> - * and hence is taken care of via vfio_pci_mmap_fault(). >> + * This is called for both user ram and mmio space. The mmio space is not >> + * mapped here, but later during intercept. >> */ >> static long >> mshv_map_user_memory(struct mshv_partition *partition, >> @@ -1276,7 +1268,6 @@ mshv_map_user_memory(struct mshv_partition *partition, >> struct mshv_mem_region *region; >> struct vm_area_struct *vma; >> bool is_mmio; >> - ulong mmio_pfn; >> long ret; >> >> if (mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP) || >> @@ -1286,7 +1277,6 @@ mshv_map_user_memory(struct mshv_partition *partition, >> mmap_read_lock(current->mm); >> vma = vma_lookup(current->mm, mem.userspace_addr); >> is_mmio = vma ? !!(vma->vm_flags & (VM_IO | VM_PFNMAP)) : 0; >> - mmio_pfn = is_mmio ? vma->vm_pgoff : 0; >> mmap_read_unlock(current->mm); >> >> if (!vma) >> @@ -1313,10 +1303,8 @@ mshv_map_user_memory(struct mshv_partition *partition, >> HV_MAP_GPA_NO_ACCESS, NULL); >> break; >> case MSHV_REGION_TYPE_MMIO: >> - ret = hv_call_map_mmio_pages(partition->pt_id, >> - region->start_gfn, >> - mmio_pfn, >> - region->nr_pages); >> + /* mmio mappings are handled later during intercepts */ >> + ret = 0; >> break; >> } >> >
On Mon, Jan 19, 2026 at 10:42:29PM -0800, Mukesh R wrote: > From: Mukesh Rathor <mrathor@linux.microsoft.com> > > VFIO no longer puts the mmio pfn in vma->vm_pgoff. So, remove code > that is using it to map mmio space. It is broken and will cause > panic. > > Signed-off-by: Mukesh Rathor <mrathor@linux.microsoft.com> > --- > drivers/hv/mshv_root_main.c | 20 ++++---------------- > 1 file changed, 4 insertions(+), 16 deletions(-) > > diff --git a/drivers/hv/mshv_root_main.c b/drivers/hv/mshv_root_main.c > index 27313419828d..03f3aa9f5541 100644 > --- a/drivers/hv/mshv_root_main.c > +++ b/drivers/hv/mshv_root_main.c > @@ -1258,16 +1258,8 @@ static int mshv_prepare_pinned_region(struct mshv_mem_region *region) > } > > /* > - * This maps two things: guest RAM and for pci passthru mmio space. > - * > - * mmio: > - * - vfio overloads vm_pgoff to store the mmio start pfn/spa. > - * - Two things need to happen for mapping mmio range: > - * 1. mapped in the uaddr so VMM can access it. > - * 2. mapped in the hwpt (gfn <-> mmio phys addr) so guest can access it. > - * > - * This function takes care of the second. The first one is managed by vfio, > - * and hence is taken care of via vfio_pci_mmap_fault(). > + * This is called for both user ram and mmio space. The mmio space is not > + * mapped here, but later during intercept. > */ > static long > mshv_map_user_memory(struct mshv_partition *partition, > @@ -1276,7 +1268,6 @@ mshv_map_user_memory(struct mshv_partition *partition, > struct mshv_mem_region *region; > struct vm_area_struct *vma; > bool is_mmio; > - ulong mmio_pfn; > long ret; > > if (mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP) || > @@ -1286,7 +1277,6 @@ mshv_map_user_memory(struct mshv_partition *partition, > mmap_read_lock(current->mm); > vma = vma_lookup(current->mm, mem.userspace_addr); > is_mmio = vma ? !!(vma->vm_flags & (VM_IO | VM_PFNMAP)) : 0; > - mmio_pfn = is_mmio ? vma->vm_pgoff : 0; > mmap_read_unlock(current->mm); > > if (!vma) > @@ -1313,10 +1303,8 @@ mshv_map_user_memory(struct mshv_partition *partition, > HV_MAP_GPA_NO_ACCESS, NULL); > break; > case MSHV_REGION_TYPE_MMIO: > - ret = hv_call_map_mmio_pages(partition->pt_id, > - region->start_gfn, > - mmio_pfn, > - region->nr_pages); > + /* mmio mappings are handled later during intercepts */ > + ret = 0; No need updating ret here: it's 0 after the previous call. Thanks, Stanislav > break; > } > > -- > 2.51.2.vfs.0.1 >
© 2016 - 2026 Red Hat, Inc.