This patch introduces static memory initialization, during system boot up.
The new function init_staticmem_pages is responsible for static memory
initialization.
Helper free_staticmem_pages is the equivalent of free_heap_pages, to free
nr_mfns pages of static memory.
This commit also introduces new CONFIG_STATIC_MEMORY to avoid bringing dead
codes in other archs.
Put asynchronous scrubbing for pages of static memory in TODO list.
Signed-off-by: Penny Zheng <penny.zheng@arm.com>
---
v4 change:
- move the option CONFIG_STATIC_MEMORY to common code, and with Arm
"select"ing it
- replace round_pg{down,up}() with PFN_DOWN()/PFN_UP()
---
xen/arch/arm/Kconfig | 1 +
xen/arch/arm/setup.c | 24 ++++++++++++++++++++++++
xen/common/Kconfig | 3 +++
xen/common/page_alloc.c | 20 ++++++++++++++++++++
xen/include/xen/mm.h | 6 ++++++
5 files changed, 54 insertions(+)
diff --git a/xen/arch/arm/Kconfig b/xen/arch/arm/Kconfig
index ecfa6822e4..cc7a943d27 100644
--- a/xen/arch/arm/Kconfig
+++ b/xen/arch/arm/Kconfig
@@ -15,6 +15,7 @@ config ARM
select HAS_PASSTHROUGH
select HAS_PDX
select IOMMU_FORCE_PT_SHARE
+ select STATIC_MEMORY
config ARCH_DEFCONFIG
string
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index f569134317..369f6631ee 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -622,6 +622,26 @@ static void __init init_pdx(void)
}
}
+/* Static memory initialization */
+static void __init init_staticmem_pages(void)
+{
+ unsigned int bank;
+
+ /* TODO: Considering NUMA-support scenario. */
+ for ( bank = 0 ; bank < bootinfo.static_mem.nr_banks; bank++ )
+ {
+ unsigned long bank_start = PFN_UP(bootinfo.static_mem.bank[bank].start);
+ unsigned long bank_size = PFN_DOWN(bootinfo.static_mem.bank[bank].size);
+ unsigned long bank_end = bank_start + bank_size;
+
+ if ( bank_end <= bank_start )
+ return;
+
+ free_staticmem_pages(mfn_to_page(_mfn(bank_start)),
+ bank_size, false);
+ }
+}
+
#ifdef CONFIG_ARM_32
static void __init setup_mm(void)
{
@@ -749,6 +769,8 @@ static void __init setup_mm(void)
/* Add xenheap memory that was not already added to the boot allocator. */
init_xenheap_pages(mfn_to_maddr(xenheap_mfn_start),
mfn_to_maddr(xenheap_mfn_end));
+
+ init_staticmem_pages();
}
#else /* CONFIG_ARM_64 */
static void __init setup_mm(void)
@@ -802,6 +824,8 @@ static void __init setup_mm(void)
setup_frametable_mappings(ram_start, ram_end);
max_page = PFN_DOWN(ram_end);
+
+ init_staticmem_pages();
}
#endif
diff --git a/xen/common/Kconfig b/xen/common/Kconfig
index 0ddd18e11a..8f736eea82 100644
--- a/xen/common/Kconfig
+++ b/xen/common/Kconfig
@@ -67,6 +67,9 @@ config MEM_ACCESS
config NEEDS_LIBELF
bool
+config STATIC_MEMORY
+ bool
+
menu "Speculative hardening"
config SPECULATIVE_HARDEN_ARRAY
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index a3ee5eca9e..2acb73e323 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1519,6 +1519,26 @@ static void free_heap_pages(
spin_unlock(&heap_lock);
}
+#ifdef CONFIG_STATIC_MEMORY
+/* Equivalent of free_heap_pages to free nr_mfns pages of static memory. */
+void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
+ bool need_scrub)
+{
+ mfn_t mfn = page_to_mfn(pg);
+ unsigned long i;
+
+ for ( i = 0; i < nr_mfns; i++ )
+ {
+ mark_page_free(&pg[i], mfn_add(mfn, i));
+
+ if ( need_scrub )
+ {
+ /* TODO: asynchronous scrubbing for pages of static memory. */
+ scrub_one_page(pg);
+ }
+ }
+}
+#endif
/*
* Following rules applied for page offline:
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 667f9dac83..8e8fb5a615 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -85,6 +85,12 @@ bool scrub_free_pages(void);
} while ( false )
#define FREE_XENHEAP_PAGE(p) FREE_XENHEAP_PAGES(p, 0)
+#ifdef CONFIG_STATIC_MEMORY
+/* These functions are for static memory */
+void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
+ bool need_scrub);
+#endif
+
/* Map machine page range in Xen virtual address space. */
int map_pages_to_xen(
unsigned long virt,
--
2.25.1
On 28.07.2021 12:27, Penny Zheng wrote: > This patch introduces static memory initialization, during system boot up. > > The new function init_staticmem_pages is responsible for static memory > initialization. > > Helper free_staticmem_pages is the equivalent of free_heap_pages, to free > nr_mfns pages of static memory. > > This commit also introduces new CONFIG_STATIC_MEMORY to avoid bringing dead > codes in other archs. > > Put asynchronous scrubbing for pages of static memory in TODO list. > > Signed-off-by: Penny Zheng <penny.zheng@arm.com> Common code parts: Acked-by: Jan Beulich <jbeulich@suse.com> Jan
Hi Penny,
On 28/07/2021 11:27, Penny Zheng wrote:
> This patch introduces static memory initialization, during system boot up.
>
> The new function init_staticmem_pages is responsible for static memory
> initialization.
>
> Helper free_staticmem_pages is the equivalent of free_heap_pages, to free
> nr_mfns pages of static memory.
>
> This commit also introduces new CONFIG_STATIC_MEMORY to avoid bringing dead
> codes in other archs.
>
> Put asynchronous scrubbing for pages of static memory in TODO list.
>
> Signed-off-by: Penny Zheng <penny.zheng@arm.com>
> ---
> v4 change:
> - move the option CONFIG_STATIC_MEMORY to common code, and with Arm
> "select"ing it
> - replace round_pg{down,up}() with PFN_DOWN()/PFN_UP()
> ---
> xen/arch/arm/Kconfig | 1 +
> xen/arch/arm/setup.c | 24 ++++++++++++++++++++++++
> xen/common/Kconfig | 3 +++
> xen/common/page_alloc.c | 20 ++++++++++++++++++++
> xen/include/xen/mm.h | 6 ++++++
> 5 files changed, 54 insertions(+)
>
> diff --git a/xen/arch/arm/Kconfig b/xen/arch/arm/Kconfig
> index ecfa6822e4..cc7a943d27 100644
> --- a/xen/arch/arm/Kconfig
> +++ b/xen/arch/arm/Kconfig
> @@ -15,6 +15,7 @@ config ARM
> select HAS_PASSTHROUGH
> select HAS_PDX
> select IOMMU_FORCE_PT_SHARE
> + select STATIC_MEMORY
Given the list of TODOs, I think it would be better if STATIC_MEMORY is
user selectable and gated by UNSUPPORTED.
We can remove the dependency on UNSUPPORTED once every have been addressed.
>
> config ARCH_DEFCONFIG
> string
> diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
> index f569134317..369f6631ee 100644
> --- a/xen/arch/arm/setup.c
> +++ b/xen/arch/arm/setup.c
> @@ -622,6 +622,26 @@ static void __init init_pdx(void)
> }
> }
>
> +/* Static memory initialization */
> +static void __init init_staticmem_pages(void)
> +{
> + unsigned int bank;
> +
> + /* TODO: Considering NUMA-support scenario. */
> + for ( bank = 0 ; bank < bootinfo.static_mem.nr_banks; bank++ )
> + {
> + unsigned long bank_start = PFN_UP(bootinfo.static_mem.bank[bank].start);
I would prefer if bank_start is a mfn_t.
> + unsigned long bank_size = PFN_DOWN(bootinfo.static_mem.bank[bank].size);
NIT: I would suggest to name it bank_pages or bank_nr_pages. This would
make clear in the user that this contains pages.
> + unsigned long bank_end = bank_start + bank_size;
mfn_t please.
> +
> + if ( bank_end <= bank_start )
This will mean you will need to use mfn_x() for both. This code would be
less nice but at least it avoids mixing address and MFN.
> + return;
> +
> + free_staticmem_pages(mfn_to_page(_mfn(bank_start)),
> + bank_size, false);
> + }
> +}
> +
> #ifdef CONFIG_ARM_32
> static void __init setup_mm(void)
> {
> @@ -749,6 +769,8 @@ static void __init setup_mm(void)
> /* Add xenheap memory that was not already added to the boot allocator. */
> init_xenheap_pages(mfn_to_maddr(xenheap_mfn_start),
> mfn_to_maddr(xenheap_mfn_end));
> +
> + init_staticmem_pages();
> }
> #else /* CONFIG_ARM_64 */
> static void __init setup_mm(void)
> @@ -802,6 +824,8 @@ static void __init setup_mm(void)
>
> setup_frametable_mappings(ram_start, ram_end);
> max_page = PFN_DOWN(ram_end);
> +
> + init_staticmem_pages();
> }
> #endif
>
> diff --git a/xen/common/Kconfig b/xen/common/Kconfig
> index 0ddd18e11a..8f736eea82 100644
> --- a/xen/common/Kconfig
> +++ b/xen/common/Kconfig
> @@ -67,6 +67,9 @@ config MEM_ACCESS
> config NEEDS_LIBELF
> bool
>
> +config STATIC_MEMORY
> + bool
> +
> menu "Speculative hardening"
>
> config SPECULATIVE_HARDEN_ARRAY
> diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
> index a3ee5eca9e..2acb73e323 100644
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -1519,6 +1519,26 @@ static void free_heap_pages(
> spin_unlock(&heap_lock);
> }
>
> +#ifdef CONFIG_STATIC_MEMORY
> +/* Equivalent of free_heap_pages to free nr_mfns pages of static memory. */
> +void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
> + bool need_scrub)
> +{
> + mfn_t mfn = page_to_mfn(pg);
> + unsigned long i;
> +
> + for ( i = 0; i < nr_mfns; i++ )
> + {
> + mark_page_free(&pg[i], mfn_add(mfn, i));
> +
> + if ( need_scrub )
> + {
> + /* TODO: asynchronous scrubbing for pages of static memory. */
> + scrub_one_page(pg);
> + }
> + }
> +}
> +#endif
>
> /*
> * Following rules applied for page offline:
> diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
> index 667f9dac83..8e8fb5a615 100644
> --- a/xen/include/xen/mm.h
> +++ b/xen/include/xen/mm.h
> @@ -85,6 +85,12 @@ bool scrub_free_pages(void);
> } while ( false )
> #define FREE_XENHEAP_PAGE(p) FREE_XENHEAP_PAGES(p, 0)
>
> +#ifdef CONFIG_STATIC_MEMORY
> +/* These functions are for static memory */
> +void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
> + bool need_scrub);
> +#endif
> +
> /* Map machine page range in Xen virtual address space. */
> int map_pages_to_xen(
> unsigned long virt,
>
Cheers,
--
Julien Grall
Hi Julien
> -----Original Message-----
> From: Julien Grall <julien@xen.org>
> Sent: Friday, August 13, 2021 8:21 PM
> To: Penny Zheng <Penny.Zheng@arm.com>; xen-devel@lists.xenproject.org;
> sstabellini@kernel.org
> Cc: Bertrand Marquis <Bertrand.Marquis@arm.com>; Wei Chen
> <Wei.Chen@arm.com>; nd <nd@arm.com>
> Subject: Re: [PATCH V4 05/10] xen/arm: static memory initialization
>
> Hi Penny,
>
> On 28/07/2021 11:27, Penny Zheng wrote:
> > This patch introduces static memory initialization, during system boot up.
> >
> > The new function init_staticmem_pages is responsible for static memory
> > initialization.
> >
> > Helper free_staticmem_pages is the equivalent of free_heap_pages, to
> > free nr_mfns pages of static memory.
> >
> > This commit also introduces new CONFIG_STATIC_MEMORY to avoid
> bringing
> > dead codes in other archs.
> >
> > Put asynchronous scrubbing for pages of static memory in TODO list.
> >
> > Signed-off-by: Penny Zheng <penny.zheng@arm.com>
> > ---
> > v4 change:
> > - move the option CONFIG_STATIC_MEMORY to common code, and with
> Arm
> > "select"ing it
> > - replace round_pg{down,up}() with PFN_DOWN()/PFN_UP()
> > ---
> > xen/arch/arm/Kconfig | 1 +
> > xen/arch/arm/setup.c | 24 ++++++++++++++++++++++++
> > xen/common/Kconfig | 3 +++
> > xen/common/page_alloc.c | 20 ++++++++++++++++++++
> > xen/include/xen/mm.h | 6 ++++++
> > 5 files changed, 54 insertions(+)
> >
> > diff --git a/xen/arch/arm/Kconfig b/xen/arch/arm/Kconfig index
> > ecfa6822e4..cc7a943d27 100644
> > --- a/xen/arch/arm/Kconfig
> > +++ b/xen/arch/arm/Kconfig
> > @@ -15,6 +15,7 @@ config ARM
> > select HAS_PASSTHROUGH
> > select HAS_PDX
> > select IOMMU_FORCE_PT_SHARE
> > + select STATIC_MEMORY
>
> Given the list of TODOs, I think it would be better if STATIC_MEMORY is user
> selectable and gated by UNSUPPORTED.
>
> We can remove the dependency on UNSUPPORTED once every have been
> addressed.
>
Sure. I'll change it UNSUPPORTED.
config STATIC_ALLOCATION
bool "Static Allocation Support (UNSUPPORTED)" if UNSUPPORTED
> >
> > config ARCH_DEFCONFIG
> > string
> > diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c index
> > f569134317..369f6631ee 100644
> > --- a/xen/arch/arm/setup.c
> > +++ b/xen/arch/arm/setup.c
> > @@ -622,6 +622,26 @@ static void __init init_pdx(void)
> > }
> > }
> >
> > +/* Static memory initialization */
> > +static void __init init_staticmem_pages(void) {
> > + unsigned int bank;
> > +
> > + /* TODO: Considering NUMA-support scenario. */
> > + for ( bank = 0 ; bank < bootinfo.static_mem.nr_banks; bank++ )
> > + {
> > + unsigned long bank_start =
> > + PFN_UP(bootinfo.static_mem.bank[bank].start);
>
> I would prefer if bank_start is a mfn_t.
>
Sure, it's more appropriate.
> > + unsigned long bank_size =
> > + PFN_DOWN(bootinfo.static_mem.bank[bank].size);
>
> NIT: I would suggest to name it bank_pages or bank_nr_pages. This would
> make clear in the user that this contains pages.
>
Sure.
> > + unsigned long bank_end = bank_start + bank_size;
>
> mfn_t please.
>
Sure.
> > +
> > + if ( bank_end <= bank_start )
>
> This will mean you will need to use mfn_x() for both. This code would be less
> nice but at least it avoids mixing address and MFN.
>
Sure.
> > + return;
> > +
> > + free_staticmem_pages(mfn_to_page(_mfn(bank_start)),
> > + bank_size, false);
> > + }
> > +}
> > +
> > #ifdef CONFIG_ARM_32
> > static void __init setup_mm(void)
> > {
> > @@ -749,6 +769,8 @@ static void __init setup_mm(void)
> > /* Add xenheap memory that was not already added to the boot allocator.
> */
> > init_xenheap_pages(mfn_to_maddr(xenheap_mfn_start),
> > mfn_to_maddr(xenheap_mfn_end));
> > +
> > + init_staticmem_pages();
> > }
> > #else /* CONFIG_ARM_64 */
> > static void __init setup_mm(void)
> > @@ -802,6 +824,8 @@ static void __init setup_mm(void)
> >
> > setup_frametable_mappings(ram_start, ram_end);
> > max_page = PFN_DOWN(ram_end);
> > +
> > + init_staticmem_pages();
> > }
> > #endif
> >
> > diff --git a/xen/common/Kconfig b/xen/common/Kconfig index
> > 0ddd18e11a..8f736eea82 100644
> > --- a/xen/common/Kconfig
> > +++ b/xen/common/Kconfig
> > @@ -67,6 +67,9 @@ config MEM_ACCESS
> > config NEEDS_LIBELF
> > bool
> >
> > +config STATIC_MEMORY
> > + bool
> > +
> > menu "Speculative hardening"
> >
> > config SPECULATIVE_HARDEN_ARRAY
> > diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index
> > a3ee5eca9e..2acb73e323 100644
> > --- a/xen/common/page_alloc.c
> > +++ b/xen/common/page_alloc.c
> > @@ -1519,6 +1519,26 @@ static void free_heap_pages(
> > spin_unlock(&heap_lock);
> > }
> >
> > +#ifdef CONFIG_STATIC_MEMORY
> > +/* Equivalent of free_heap_pages to free nr_mfns pages of static
> > +memory. */ void __init free_staticmem_pages(struct page_info *pg,
> unsigned long nr_mfns,
> > + bool need_scrub) {
> > + mfn_t mfn = page_to_mfn(pg);
> > + unsigned long i;
> > +
> > + for ( i = 0; i < nr_mfns; i++ )
> > + {
> > + mark_page_free(&pg[i], mfn_add(mfn, i));
> > +
> > + if ( need_scrub )
> > + {
> > + /* TODO: asynchronous scrubbing for pages of static memory. */
> > + scrub_one_page(pg);
> > + }
> > + }
> > +}
> > +#endif
> >
> > /*
> > * Following rules applied for page offline:
> > diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h index
> > 667f9dac83..8e8fb5a615 100644
> > --- a/xen/include/xen/mm.h
> > +++ b/xen/include/xen/mm.h
> > @@ -85,6 +85,12 @@ bool scrub_free_pages(void);
> > } while ( false )
> > #define FREE_XENHEAP_PAGE(p) FREE_XENHEAP_PAGES(p, 0)
> >
> > +#ifdef CONFIG_STATIC_MEMORY
> > +/* These functions are for static memory */ void
> > +free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
> > + bool need_scrub); #endif
> > +
> > /* Map machine page range in Xen virtual address space. */
> > int map_pages_to_xen(
> > unsigned long virt,
> >
>
> Cheers,
>
> --
Cheers
--
> Julien Grall
Hi Penny,
On 28/07/2021 11:27, Penny Zheng wrote:
> +/* Static memory initialization */
> +static void __init init_staticmem_pages(void)
> +{
> + unsigned int bank;
> +
> + /* TODO: Considering NUMA-support scenario. */
I forgot to ask about this. What do you expect to be different with NUMA?
> + for ( bank = 0 ; bank < bootinfo.static_mem.nr_banks; bank++ )
> + {
> + unsigned long bank_start = PFN_UP(bootinfo.static_mem.bank[bank].start);
> + unsigned long bank_size = PFN_DOWN(bootinfo.static_mem.bank[bank].size);
> + unsigned long bank_end = bank_start + bank_size;
> +
> + if ( bank_end <= bank_start )
> + return;
> +
> + free_staticmem_pages(mfn_to_page(_mfn(bank_start)),
> + bank_size, false);
> + }
> +}
> +
Cheers,
--
Julien Grall
Hi Julien,
> -----Original Message-----
> From: Julien Grall <julien@xen.org>
> Sent: 2021年8月13日 20:38
> To: Penny Zheng <Penny.Zheng@arm.com>; xen-devel@lists.xenproject.org;
> sstabellini@kernel.org
> Cc: Bertrand Marquis <Bertrand.Marquis@arm.com>; Wei Chen
> <Wei.Chen@arm.com>; nd <nd@arm.com>
> Subject: Re: [PATCH V4 05/10] xen/arm: static memory initialization
>
> Hi Penny,
>
> On 28/07/2021 11:27, Penny Zheng wrote:
> > +/* Static memory initialization */
> > +static void __init init_staticmem_pages(void)
> > +{
> > + unsigned int bank;
> > +
> > + /* TODO: Considering NUMA-support scenario. */
>
> I forgot to ask about this. What do you expect to be different with NUMA?
>
From our current NUMA implementation, I think there is no difference
between NUMA and Non-NUMA system for static allocation. Maybe in the
future, we will add some checks to warning user about cross-nodes
configuration. But now, I think it's better for Penny to remove this
comment.
> > + for ( bank = 0 ; bank < bootinfo.static_mem.nr_banks; bank++ )
> > + {
> > + unsigned long bank_start =
> PFN_UP(bootinfo.static_mem.bank[bank].start);
> > + unsigned long bank_size =
> PFN_DOWN(bootinfo.static_mem.bank[bank].size);
> > + unsigned long bank_end = bank_start + bank_size;
> > +
> > + if ( bank_end <= bank_start )
> > + return;
> > +
> > + free_staticmem_pages(mfn_to_page(_mfn(bank_start)),
> > + bank_size, false);
> > + }
> > +}
> > +
>
> Cheers,
>
> --
> Julien Grall
© 2016 - 2026 Red Hat, Inc.