From nobody Mon Feb 9 03:13:06 2026 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) client-ip=192.237.175.120; envelope-from=xen-devel-bounces@lists.xenproject.org; helo=lists.xenproject.org; Authentication-Results: mx.zohomail.com; spf=pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) smtp.mailfrom=xen-devel-bounces@lists.xenproject.org; dmarc=fail(p=none dis=none) header.from=arm.com Return-Path: Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) by mx.zohomail.com with SMTPS id 1662539851578282.5700064390762; Wed, 7 Sep 2022 01:37:31 -0700 (PDT) Received: from list by lists.xenproject.org with outflank-mailman.400947.642629 (Exim 4.92) (envelope-from ) id 1oVqYb-0003yX-CM; Wed, 07 Sep 2022 08:37:13 +0000 Received: by outflank-mailman (output) from mailman id 400947.642629; Wed, 07 Sep 2022 08:37:13 +0000 Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.92) (envelope-from ) id 1oVqYb-0003yM-8p; Wed, 07 Sep 2022 08:37:13 +0000 Received: by outflank-mailman (input) for mailman id 400947; Wed, 07 Sep 2022 08:37:11 +0000 Received: from se1-gles-sth1-in.inumbo.com ([159.253.27.254] helo=se1-gles-sth1.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.92) (envelope-from ) id 1oVqYZ-0002lD-O9 for xen-devel@lists.xenproject.org; Wed, 07 Sep 2022 08:37:11 +0000 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by se1-gles-sth1.inumbo.com (Halon) with ESMTP id 43a27fa8-2e88-11ed-a016-b9edf5238543; Wed, 07 Sep 2022 10:37:10 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 24808106F; Wed, 7 Sep 2022 01:37:16 -0700 (PDT) Received: from entos-skylake.shanghai.arm.com (entos-skylake.shanghai.arm.com [10.169.212.207]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id D6E3A3F534; Wed, 7 Sep 2022 01:37:35 -0700 (PDT) X-Outflank-Mailman: Message body and most headers restored to incoming version X-BeenThere: xen-devel@lists.xenproject.org List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Errors-To: xen-devel-bounces@lists.xenproject.org Precedence: list Sender: "Xen-devel" X-Inumbo-ID: 43a27fa8-2e88-11ed-a016-b9edf5238543 From: Henry Wang To: xen-devel@lists.xenproject.org Cc: Henry Wang , Stefano Stabellini , Julien Grall , Bertrand Marquis , Wei Chen , Volodymyr Babchuk Subject: [PATCH v3 3/4] xen/arm: mm: Rename xenheap_* variable to directmap_* Date: Wed, 7 Sep 2022 08:36:42 +0000 Message-Id: <20220907083643.20152-4-Henry.Wang@arm.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20220907083643.20152-1-Henry.Wang@arm.com> References: <20220907083643.20152-1-Henry.Wang@arm.com> X-ZM-MESSAGEID: 1662539853147100007 Content-Transfer-Encoding: quoted-printable MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" With the static heap setup, keep using xenheap_* in the function setup_xenheap_mappings() will make the code confusing to read, because we always need to map the full RAM on Arm64. Therefore, renaming all "xenheap_*" variables to "directmap_*" to make clear the area is used to access the RAM easily. On Arm32, only the xenheap is direct mapped today. So the renaming to "directmap_*" would be still valid for Arm32. No functional change is intended. Signed-off-by: Henry Wang --- Changes from v2 to v3: - Adjust the order of this patch, make it #3. Changes from v1 to v2: - New commit. --- xen/arch/arm/include/asm/config.h | 2 +- xen/arch/arm/include/asm/mm.h | 22 +++++++++++----------- xen/arch/arm/mm.c | 24 ++++++++++++------------ xen/arch/arm/setup.c | 27 ++++++++++++++------------- 4 files changed, 38 insertions(+), 37 deletions(-) diff --git a/xen/arch/arm/include/asm/config.h b/xen/arch/arm/include/asm/c= onfig.h index 2fafb9f228..0fefed1b8a 100644 --- a/xen/arch/arm/include/asm/config.h +++ b/xen/arch/arm/include/asm/config.h @@ -160,7 +160,7 @@ #define DIRECTMAP_SIZE (SLOT0_ENTRY_SIZE * (265-256)) #define DIRECTMAP_VIRT_END (DIRECTMAP_VIRT_START + DIRECTMAP_SIZE - 1) =20 -#define XENHEAP_VIRT_START xenheap_virt_start +#define XENHEAP_VIRT_START directmap_virt_start =20 #define HYPERVISOR_VIRT_END DIRECTMAP_VIRT_END =20 diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h index 749fbefa0c..7b4f6ce233 100644 --- a/xen/arch/arm/include/asm/mm.h +++ b/xen/arch/arm/include/asm/mm.h @@ -154,19 +154,19 @@ struct page_info #define _PGC_need_scrub _PGC_allocated #define PGC_need_scrub PGC_allocated =20 -extern mfn_t xenheap_mfn_start, xenheap_mfn_end; -extern vaddr_t xenheap_virt_end; +extern mfn_t directmap_mfn_start, directmap_mfn_end; +extern vaddr_t directmap_virt_end; #ifdef CONFIG_ARM_64 -extern vaddr_t xenheap_virt_start; -extern unsigned long xenheap_base_pdx; +extern vaddr_t directmap_virt_start; +extern unsigned long directmap_base_pdx; #endif =20 #ifdef CONFIG_ARM_32 #define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page)) #define is_xen_heap_mfn(mfn) ({ \ unsigned long mfn_ =3D mfn_x(mfn); \ - (mfn_ >=3D mfn_x(xenheap_mfn_start) && \ - mfn_ < mfn_x(xenheap_mfn_end)); \ + (mfn_ >=3D mfn_x(directmap_mfn_start) && \ + mfn_ < mfn_x(directmap_mfn_end)); \ }) #else #define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap) @@ -267,16 +267,16 @@ static inline paddr_t __virt_to_maddr(vaddr_t va) static inline void *maddr_to_virt(paddr_t ma) { ASSERT(is_xen_heap_mfn(maddr_to_mfn(ma))); - ma -=3D mfn_to_maddr(xenheap_mfn_start); + ma -=3D mfn_to_maddr(directmap_mfn_start); return (void *)(unsigned long) ma + XENHEAP_VIRT_START; } #else static inline void *maddr_to_virt(paddr_t ma) { - ASSERT((mfn_to_pdx(maddr_to_mfn(ma)) - xenheap_base_pdx) < + ASSERT((mfn_to_pdx(maddr_to_mfn(ma)) - directmap_base_pdx) < (DIRECTMAP_SIZE >> PAGE_SHIFT)); return (void *)(XENHEAP_VIRT_START - - (xenheap_base_pdx << PAGE_SHIFT) + + (directmap_base_pdx << PAGE_SHIFT) + ((ma & ma_va_bottom_mask) | ((ma & ma_top_mask) >> pfn_pdx_hole_shift))); } @@ -319,10 +319,10 @@ static inline struct page_info *virt_to_page(const vo= id *v) unsigned long pdx; =20 ASSERT(va >=3D XENHEAP_VIRT_START); - ASSERT(va < xenheap_virt_end); + ASSERT(va < directmap_virt_end); =20 pdx =3D (va - XENHEAP_VIRT_START) >> PAGE_SHIFT; - pdx +=3D mfn_to_pdx(xenheap_mfn_start); + pdx +=3D mfn_to_pdx(directmap_mfn_start); return frame_table + pdx - frametable_base_pdx; } =20 diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index 7f5b317d3e..4a70ed2986 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -132,12 +132,12 @@ uint64_t init_ttbr; static paddr_t phys_offset; =20 /* Limits of the Xen heap */ -mfn_t xenheap_mfn_start __read_mostly =3D INVALID_MFN_INITIALIZER; -mfn_t xenheap_mfn_end __read_mostly; -vaddr_t xenheap_virt_end __read_mostly; +mfn_t directmap_mfn_start __read_mostly =3D INVALID_MFN_INITIALIZER; +mfn_t directmap_mfn_end __read_mostly; +vaddr_t directmap_virt_end __read_mostly; #ifdef CONFIG_ARM_64 -vaddr_t xenheap_virt_start __read_mostly; -unsigned long xenheap_base_pdx __read_mostly; +vaddr_t directmap_virt_start __read_mostly; +unsigned long directmap_base_pdx __read_mostly; #endif =20 unsigned long frametable_base_pdx __read_mostly; @@ -609,7 +609,7 @@ void __init setup_xenheap_mappings(unsigned long base_m= fn, panic("Unable to setup the xenheap mappings.\n"); =20 /* Record where the xenheap is, for translation routines. */ - xenheap_virt_end =3D XENHEAP_VIRT_START + nr_mfns * PAGE_SIZE; + directmap_virt_end =3D XENHEAP_VIRT_START + nr_mfns * PAGE_SIZE; } #else /* CONFIG_ARM_64 */ void __init setup_xenheap_mappings(unsigned long base_mfn, @@ -618,12 +618,12 @@ void __init setup_xenheap_mappings(unsigned long base= _mfn, int rc; =20 /* First call sets the xenheap physical and virtual offset. */ - if ( mfn_eq(xenheap_mfn_start, INVALID_MFN) ) + if ( mfn_eq(directmap_mfn_start, INVALID_MFN) ) { unsigned long mfn_gb =3D base_mfn & ~((FIRST_SIZE >> PAGE_SHIFT) -= 1); =20 - xenheap_mfn_start =3D _mfn(base_mfn); - xenheap_base_pdx =3D mfn_to_pdx(_mfn(base_mfn)); + directmap_mfn_start =3D _mfn(base_mfn); + directmap_base_pdx =3D mfn_to_pdx(_mfn(base_mfn)); /* * The base address may not be aligned to the first level * size (e.g. 1GB when using 4KB pages). This would prevent @@ -633,13 +633,13 @@ void __init setup_xenheap_mappings(unsigned long base= _mfn, * Prevent that by offsetting the start of the xenheap virtual * address. */ - xenheap_virt_start =3D DIRECTMAP_VIRT_START + + directmap_virt_start =3D DIRECTMAP_VIRT_START + (base_mfn - mfn_gb) * PAGE_SIZE; } =20 - if ( base_mfn < mfn_x(xenheap_mfn_start) ) + if ( base_mfn < mfn_x(directmap_mfn_start) ) panic("cannot add xenheap mapping at %lx below heap start %lx\n", - base_mfn, mfn_x(xenheap_mfn_start)); + base_mfn, mfn_x(directmap_mfn_start)); =20 rc =3D map_pages_to_xen((vaddr_t)__mfn_to_virt(base_mfn), _mfn(base_mfn), nr_mfns, diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c index 3c36c050bf..4a8334c268 100644 --- a/xen/arch/arm/setup.c +++ b/xen/arch/arm/setup.c @@ -697,11 +697,11 @@ static void __init populate_boot_allocator(void) =20 #ifdef CONFIG_ARM_32 /* Avoid the xenheap */ - if ( s < mfn_to_maddr(xenheap_mfn_end) && - mfn_to_maddr(xenheap_mfn_start) < e ) + if ( s < mfn_to_maddr(directmap_mfn_end) && + mfn_to_maddr(directmap_mfn_start) < e ) { - e =3D mfn_to_maddr(xenheap_mfn_start); - n =3D mfn_to_maddr(xenheap_mfn_end); + e =3D mfn_to_maddr(directmap_mfn_start); + n =3D mfn_to_maddr(directmap_mfn_end); } #endif =20 @@ -793,15 +793,16 @@ static void __init setup_mm(void) * We need some memory to allocate the page-tables used for the * xenheap mappings. So populate the boot allocator first. * - * This requires us to set xenheap_mfn_{start, end} first so the Xenhe= ap + * Note that currently xenheap is direct mapped on Arm32. + * This requires us to set directmap_mfn_{start, end} first so the Xen= heap * region can be avoided. */ - xenheap_mfn_start =3D _mfn((e >> PAGE_SHIFT) - xenheap_pages); - xenheap_mfn_end =3D mfn_add(xenheap_mfn_start, xenheap_pages); + directmap_mfn_start =3D _mfn((e >> PAGE_SHIFT) - xenheap_pages); + directmap_mfn_end =3D mfn_add(directmap_mfn_start, xenheap_pages); =20 populate_boot_allocator(); =20 - setup_xenheap_mappings(mfn_x(xenheap_mfn_start), xenheap_pages); + setup_xenheap_mappings(mfn_x(directmap_mfn_start), xenheap_pages); =20 /* Frame table covers all of RAM region, including holes */ setup_frametable_mappings(ram_start, ram_end); @@ -816,8 +817,8 @@ static void __init setup_mm(void) smp_processor_id()); =20 /* Add xenheap memory that was not already added to the boot allocator= . */ - init_xenheap_pages(mfn_to_maddr(xenheap_mfn_start), - mfn_to_maddr(xenheap_mfn_end)); + init_xenheap_pages(mfn_to_maddr(directmap_mfn_start), + mfn_to_maddr(directmap_mfn_end)); =20 init_staticmem_pages(); } @@ -858,9 +859,9 @@ static void __init setup_mm(void) =20 total_pages +=3D ram_size >> PAGE_SHIFT; =20 - xenheap_virt_end =3D XENHEAP_VIRT_START + ram_end - ram_start; - xenheap_mfn_start =3D maddr_to_mfn(ram_start); - xenheap_mfn_end =3D maddr_to_mfn(ram_end); + directmap_virt_end =3D XENHEAP_VIRT_START + ram_end - ram_start; + directmap_mfn_start =3D maddr_to_mfn(ram_start); + directmap_mfn_end =3D maddr_to_mfn(ram_end); =20 setup_frametable_mappings(ram_start, ram_end); max_page =3D PFN_DOWN(ram_end); --=20 2.17.1