From: Julien Grall <julien.grall@arm.com>
Now that map_pages_to_xen() has been extended to support 2MB mappings,
we can replace the create_mappings() call by map_pages_to_xen() call.
This has the advantage to remove the differences between 32-bit and
64-bit code.
Lastly remove create_mappings() as there is no more callers.
Signed-off-by: Julien Grall <julien.grall@arm.com>
Signed-off-by: Julien Grall <jgrall@amazon.com>
---
Changes in v4:
- Add missing _PAGE_BLOCK
Changes in v3:
- Fix typo in the commit message
- Remove the TODO regarding contiguous bit
Changes in v2:
- New patch
---
xen/arch/arm/mm.c | 64 +++++------------------------------------------
1 file changed, 6 insertions(+), 58 deletions(-)
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 65af44f42232..be37176a4725 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -369,40 +369,6 @@ void clear_fixmap(unsigned map)
BUG_ON(res != 0);
}
-/* Create Xen's mappings of memory.
- * Mapping_size must be either 2MB or 32MB.
- * Base and virt must be mapping_size aligned.
- * Size must be a multiple of mapping_size.
- * second must be a contiguous set of second level page tables
- * covering the region starting at virt_offset. */
-static void __init create_mappings(lpae_t *second,
- unsigned long virt_offset,
- unsigned long base_mfn,
- unsigned long nr_mfns,
- unsigned int mapping_size)
-{
- unsigned long i, count;
- const unsigned long granularity = mapping_size >> PAGE_SHIFT;
- lpae_t pte, *p;
-
- ASSERT((mapping_size == MB(2)) || (mapping_size == MB(32)));
- ASSERT(!((virt_offset >> PAGE_SHIFT) % granularity));
- ASSERT(!(base_mfn % granularity));
- ASSERT(!(nr_mfns % granularity));
-
- count = nr_mfns / XEN_PT_LPAE_ENTRIES;
- p = second + second_linear_offset(virt_offset);
- pte = mfn_to_xen_entry(_mfn(base_mfn), MT_NORMAL);
- if ( granularity == 16 * XEN_PT_LPAE_ENTRIES )
- pte.pt.contig = 1; /* These maps are in 16-entry contiguous chunks. */
- for ( i = 0; i < count; i++ )
- {
- write_pte(p + i, pte);
- pte.pt.base += 1 << XEN_PT_LPAE_SHIFT;
- }
- flush_xen_tlb_local();
-}
-
#ifdef CONFIG_DOMAIN_PAGE
void *map_domain_page_global(mfn_t mfn)
{
@@ -862,36 +828,18 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe)
unsigned long frametable_size = nr_pdxs * sizeof(struct page_info);
mfn_t base_mfn;
const unsigned long mapping_size = frametable_size < MB(32) ? MB(2) : MB(32);
-#ifdef CONFIG_ARM_64
- lpae_t *second, pte;
- unsigned long nr_second;
- mfn_t second_base;
- int i;
-#endif
+ int rc;
frametable_base_pdx = mfn_to_pdx(maddr_to_mfn(ps));
/* Round up to 2M or 32M boundary, as appropriate. */
frametable_size = ROUNDUP(frametable_size, mapping_size);
base_mfn = alloc_boot_pages(frametable_size >> PAGE_SHIFT, 32<<(20-12));
-#ifdef CONFIG_ARM_64
- /* Compute the number of second level pages. */
- nr_second = ROUNDUP(frametable_size, FIRST_SIZE) >> FIRST_SHIFT;
- second_base = alloc_boot_pages(nr_second, 1);
- second = mfn_to_virt(second_base);
- for ( i = 0; i < nr_second; i++ )
- {
- clear_page(mfn_to_virt(mfn_add(second_base, i)));
- pte = mfn_to_xen_entry(mfn_add(second_base, i), MT_NORMAL);
- pte.pt.table = 1;
- write_pte(&xen_first[first_table_offset(FRAMETABLE_VIRT_START)+i], pte);
- }
- create_mappings(second, 0, mfn_x(base_mfn), frametable_size >> PAGE_SHIFT,
- mapping_size);
-#else
- create_mappings(xen_second, FRAMETABLE_VIRT_START, mfn_x(base_mfn),
- frametable_size >> PAGE_SHIFT, mapping_size);
-#endif
+ rc = map_pages_to_xen(FRAMETABLE_VIRT_START, base_mfn,
+ frametable_size >> PAGE_SHIFT,
+ PAGE_HYPERVISOR_RW | _PAGE_BLOCK);
+ if ( rc )
+ panic("Unable to setup the frametable mappings.\n");
memset(&frame_table[0], 0, nr_pdxs * sizeof(struct page_info));
memset(&frame_table[nr_pdxs], -1,
--
2.32.0
On Fri, 20 May 2022, Julien Grall wrote:
> From: Julien Grall <julien.grall@arm.com>
>
> Now that map_pages_to_xen() has been extended to support 2MB mappings,
> we can replace the create_mappings() call by map_pages_to_xen() call.
>
> This has the advantage to remove the differences between 32-bit and
> 64-bit code.
>
> Lastly remove create_mappings() as there is no more callers.
>
> Signed-off-by: Julien Grall <julien.grall@arm.com>
> Signed-off-by: Julien Grall <jgrall@amazon.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
> ---
> Changes in v4:
> - Add missing _PAGE_BLOCK
>
> Changes in v3:
> - Fix typo in the commit message
> - Remove the TODO regarding contiguous bit
>
> Changes in v2:
> - New patch
> ---
> xen/arch/arm/mm.c | 64 +++++------------------------------------------
> 1 file changed, 6 insertions(+), 58 deletions(-)
>
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index 65af44f42232..be37176a4725 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -369,40 +369,6 @@ void clear_fixmap(unsigned map)
> BUG_ON(res != 0);
> }
>
> -/* Create Xen's mappings of memory.
> - * Mapping_size must be either 2MB or 32MB.
> - * Base and virt must be mapping_size aligned.
> - * Size must be a multiple of mapping_size.
> - * second must be a contiguous set of second level page tables
> - * covering the region starting at virt_offset. */
> -static void __init create_mappings(lpae_t *second,
> - unsigned long virt_offset,
> - unsigned long base_mfn,
> - unsigned long nr_mfns,
> - unsigned int mapping_size)
> -{
> - unsigned long i, count;
> - const unsigned long granularity = mapping_size >> PAGE_SHIFT;
> - lpae_t pte, *p;
> -
> - ASSERT((mapping_size == MB(2)) || (mapping_size == MB(32)));
> - ASSERT(!((virt_offset >> PAGE_SHIFT) % granularity));
> - ASSERT(!(base_mfn % granularity));
> - ASSERT(!(nr_mfns % granularity));
> -
> - count = nr_mfns / XEN_PT_LPAE_ENTRIES;
> - p = second + second_linear_offset(virt_offset);
> - pte = mfn_to_xen_entry(_mfn(base_mfn), MT_NORMAL);
> - if ( granularity == 16 * XEN_PT_LPAE_ENTRIES )
> - pte.pt.contig = 1; /* These maps are in 16-entry contiguous chunks. */
> - for ( i = 0; i < count; i++ )
> - {
> - write_pte(p + i, pte);
> - pte.pt.base += 1 << XEN_PT_LPAE_SHIFT;
> - }
> - flush_xen_tlb_local();
> -}
> -
> #ifdef CONFIG_DOMAIN_PAGE
> void *map_domain_page_global(mfn_t mfn)
> {
> @@ -862,36 +828,18 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe)
> unsigned long frametable_size = nr_pdxs * sizeof(struct page_info);
> mfn_t base_mfn;
> const unsigned long mapping_size = frametable_size < MB(32) ? MB(2) : MB(32);
> -#ifdef CONFIG_ARM_64
> - lpae_t *second, pte;
> - unsigned long nr_second;
> - mfn_t second_base;
> - int i;
> -#endif
> + int rc;
>
> frametable_base_pdx = mfn_to_pdx(maddr_to_mfn(ps));
> /* Round up to 2M or 32M boundary, as appropriate. */
> frametable_size = ROUNDUP(frametable_size, mapping_size);
> base_mfn = alloc_boot_pages(frametable_size >> PAGE_SHIFT, 32<<(20-12));
>
> -#ifdef CONFIG_ARM_64
> - /* Compute the number of second level pages. */
> - nr_second = ROUNDUP(frametable_size, FIRST_SIZE) >> FIRST_SHIFT;
> - second_base = alloc_boot_pages(nr_second, 1);
> - second = mfn_to_virt(second_base);
> - for ( i = 0; i < nr_second; i++ )
> - {
> - clear_page(mfn_to_virt(mfn_add(second_base, i)));
> - pte = mfn_to_xen_entry(mfn_add(second_base, i), MT_NORMAL);
> - pte.pt.table = 1;
> - write_pte(&xen_first[first_table_offset(FRAMETABLE_VIRT_START)+i], pte);
> - }
> - create_mappings(second, 0, mfn_x(base_mfn), frametable_size >> PAGE_SHIFT,
> - mapping_size);
> -#else
> - create_mappings(xen_second, FRAMETABLE_VIRT_START, mfn_x(base_mfn),
> - frametable_size >> PAGE_SHIFT, mapping_size);
> -#endif
> + rc = map_pages_to_xen(FRAMETABLE_VIRT_START, base_mfn,
> + frametable_size >> PAGE_SHIFT,
> + PAGE_HYPERVISOR_RW | _PAGE_BLOCK);
> + if ( rc )
> + panic("Unable to setup the frametable mappings.\n");
>
> memset(&frame_table[0], 0, nr_pdxs * sizeof(struct page_info));
> memset(&frame_table[nr_pdxs], -1,
> --
> 2.32.0
>
On Fri, 20 May 2022, Julien Grall wrote:
> From: Julien Grall <julien.grall@arm.com>
>
> Now that map_pages_to_xen() has been extended to support 2MB mappings,
> we can replace the create_mappings() call by map_pages_to_xen() call.
>
> This has the advantage to remove the differences between 32-bit and
> 64-bit code.
>
> Lastly remove create_mappings() as there is no more callers.
>
> Signed-off-by: Julien Grall <julien.grall@arm.com>
> Signed-off-by: Julien Grall <jgrall@amazon.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
> ---
> Changes in v4:
> - Add missing _PAGE_BLOCK
>
> Changes in v3:
> - Fix typo in the commit message
> - Remove the TODO regarding contiguous bit
>
> Changes in v2:
> - New patch
> ---
> xen/arch/arm/mm.c | 64 +++++------------------------------------------
> 1 file changed, 6 insertions(+), 58 deletions(-)
>
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index 65af44f42232..be37176a4725 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -369,40 +369,6 @@ void clear_fixmap(unsigned map)
> BUG_ON(res != 0);
> }
>
> -/* Create Xen's mappings of memory.
> - * Mapping_size must be either 2MB or 32MB.
> - * Base and virt must be mapping_size aligned.
> - * Size must be a multiple of mapping_size.
> - * second must be a contiguous set of second level page tables
> - * covering the region starting at virt_offset. */
> -static void __init create_mappings(lpae_t *second,
> - unsigned long virt_offset,
> - unsigned long base_mfn,
> - unsigned long nr_mfns,
> - unsigned int mapping_size)
> -{
> - unsigned long i, count;
> - const unsigned long granularity = mapping_size >> PAGE_SHIFT;
> - lpae_t pte, *p;
> -
> - ASSERT((mapping_size == MB(2)) || (mapping_size == MB(32)));
> - ASSERT(!((virt_offset >> PAGE_SHIFT) % granularity));
> - ASSERT(!(base_mfn % granularity));
> - ASSERT(!(nr_mfns % granularity));
> -
> - count = nr_mfns / XEN_PT_LPAE_ENTRIES;
> - p = second + second_linear_offset(virt_offset);
> - pte = mfn_to_xen_entry(_mfn(base_mfn), MT_NORMAL);
> - if ( granularity == 16 * XEN_PT_LPAE_ENTRIES )
> - pte.pt.contig = 1; /* These maps are in 16-entry contiguous chunks. */
> - for ( i = 0; i < count; i++ )
> - {
> - write_pte(p + i, pte);
> - pte.pt.base += 1 << XEN_PT_LPAE_SHIFT;
> - }
> - flush_xen_tlb_local();
> -}
> -
> #ifdef CONFIG_DOMAIN_PAGE
> void *map_domain_page_global(mfn_t mfn)
> {
> @@ -862,36 +828,18 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe)
> unsigned long frametable_size = nr_pdxs * sizeof(struct page_info);
> mfn_t base_mfn;
> const unsigned long mapping_size = frametable_size < MB(32) ? MB(2) : MB(32);
> -#ifdef CONFIG_ARM_64
> - lpae_t *second, pte;
> - unsigned long nr_second;
> - mfn_t second_base;
> - int i;
> -#endif
> + int rc;
>
> frametable_base_pdx = mfn_to_pdx(maddr_to_mfn(ps));
> /* Round up to 2M or 32M boundary, as appropriate. */
> frametable_size = ROUNDUP(frametable_size, mapping_size);
> base_mfn = alloc_boot_pages(frametable_size >> PAGE_SHIFT, 32<<(20-12));
>
> -#ifdef CONFIG_ARM_64
> - /* Compute the number of second level pages. */
> - nr_second = ROUNDUP(frametable_size, FIRST_SIZE) >> FIRST_SHIFT;
> - second_base = alloc_boot_pages(nr_second, 1);
> - second = mfn_to_virt(second_base);
> - for ( i = 0; i < nr_second; i++ )
> - {
> - clear_page(mfn_to_virt(mfn_add(second_base, i)));
> - pte = mfn_to_xen_entry(mfn_add(second_base, i), MT_NORMAL);
> - pte.pt.table = 1;
> - write_pte(&xen_first[first_table_offset(FRAMETABLE_VIRT_START)+i], pte);
> - }
> - create_mappings(second, 0, mfn_x(base_mfn), frametable_size >> PAGE_SHIFT,
> - mapping_size);
> -#else
> - create_mappings(xen_second, FRAMETABLE_VIRT_START, mfn_x(base_mfn),
> - frametable_size >> PAGE_SHIFT, mapping_size);
> -#endif
> + rc = map_pages_to_xen(FRAMETABLE_VIRT_START, base_mfn,
> + frametable_size >> PAGE_SHIFT,
> + PAGE_HYPERVISOR_RW | _PAGE_BLOCK);
> + if ( rc )
> + panic("Unable to setup the frametable mappings.\n");
>
> memset(&frame_table[0], 0, nr_pdxs * sizeof(struct page_info));
> memset(&frame_table[nr_pdxs], -1,
> --
> 2.32.0
>
> On 20 May 2022, at 13:09, Julien Grall <julien@xen.org> wrote: > > From: Julien Grall <julien.grall@arm.com> > > Now that map_pages_to_xen() has been extended to support 2MB mappings, > we can replace the create_mappings() call by map_pages_to_xen() call. > > This has the advantage to remove the differences between 32-bit and > 64-bit code. > > Lastly remove create_mappings() as there is no more callers. > > Signed-off-by: Julien Grall <julien.grall@arm.com> > Signed-off-by: Julien Grall <jgrall@amazon.com> > Hi Julien, Reviewed-by: Luca Fancellu <luca.fancellu@arm.com> I’ve also tested all patches including this one on arm64, booting Xen+Dom0 and starting few guests, connecting consoles, destroying, doing networking, no problem so far. Tested-by: Luca Fancellu <luca.fancellu@arm.com>
© 2016 - 2026 Red Hat, Inc.