When a static domain populates memory through populate_physmap at runtime,
it shall retrieve reserved pages from resv_page_list to make sure that
guest RAM is still restricted in statically configured memory regions.
This commit also introduces a new helper acquire_reserved_page to make it work.
Signed-off-by: Penny Zheng <penny.zheng@arm.com>
---
v6 changes:
- drop the lock before returning
---
v5 changes:
- extract common codes for assigning pages into a helper assign_domstatic_pages
- refine commit message
- remove stub function acquire_reserved_page
- Alloc/free of memory can happen concurrently. So access to rsv_page_list
needs to be protected with a spinlock
---
v4 changes:
- miss dropping __init in acquire_domstatic_pages
- add the page back to the reserved list in case of error
- remove redundant printk
- refine log message and make it warn level
---
v3 changes:
- move is_domain_using_staticmem to the common header file
- remove #ifdef CONFIG_STATIC_MEMORY-ary
- remove meaningless page_to_mfn(page) in error log
---
v2 changes:
- introduce acquire_reserved_page to retrieve reserved pages from
resv_page_list
- forbid non-zero-order requests in populate_physmap
- let is_domain_static return ((void)(d), false) on x86
---
xen/common/memory.c | 23 ++++++++++++++
xen/common/page_alloc.c | 70 +++++++++++++++++++++++++++++++----------
xen/include/xen/mm.h | 1 +
3 files changed, 77 insertions(+), 17 deletions(-)
diff --git a/xen/common/memory.c b/xen/common/memory.c
index f2d009843a..cb330ce877 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -245,6 +245,29 @@ static void populate_physmap(struct memop_args *a)
mfn = _mfn(gpfn);
}
+ else if ( is_domain_using_staticmem(d) )
+ {
+ /*
+ * No easy way to guarantee the retrieved pages are contiguous,
+ * so forbid non-zero-order requests here.
+ */
+ if ( a->extent_order != 0 )
+ {
+ gdprintk(XENLOG_WARNING,
+ "Cannot allocate static order-%u pages for static %pd\n",
+ a->extent_order, d);
+ goto out;
+ }
+
+ mfn = acquire_reserved_page(d, a->memflags);
+ if ( mfn_eq(mfn, INVALID_MFN) )
+ {
+ gdprintk(XENLOG_WARNING,
+ "%pd: failed to retrieve a reserved page\n",
+ d);
+ goto out;
+ }
+ }
else
{
page = alloc_domheap_pages(d, a->extent_order, a->memflags);
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 9004dd41c1..57d28304df 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2661,9 +2661,8 @@ void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
}
}
-static bool __init prepare_staticmem_pages(struct page_info *pg,
- unsigned long nr_mfns,
- unsigned int memflags)
+static bool prepare_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
+ unsigned int memflags)
{
bool need_tlbflush = false;
uint32_t tlbflush_timestamp = 0;
@@ -2744,21 +2743,9 @@ static struct page_info * __init acquire_staticmem_pages(mfn_t smfn,
return pg;
}
-/*
- * Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
- * then assign them to one specific domain #d.
- */
-int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
- unsigned int nr_mfns, unsigned int memflags)
+static int assign_domstatic_pages(struct domain *d, struct page_info *pg,
+ unsigned int nr_mfns, unsigned int memflags)
{
- struct page_info *pg;
-
- ASSERT(!in_irq());
-
- pg = acquire_staticmem_pages(smfn, nr_mfns, memflags);
- if ( !pg )
- return -ENOENT;
-
if ( !d || (memflags & (MEMF_no_owner | MEMF_no_refcount)) )
{
/*
@@ -2777,6 +2764,55 @@ int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
return 0;
}
+
+/*
+ * Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
+ * then assign them to one specific domain #d.
+ */
+int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
+ unsigned int nr_mfns, unsigned int memflags)
+{
+ struct page_info *pg;
+
+ ASSERT(!in_irq());
+
+ pg = acquire_staticmem_pages(smfn, nr_mfns, memflags);
+ if ( !pg )
+ return -ENOENT;
+
+ if ( assign_domstatic_pages(d, pg, nr_mfns, memflags) )
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Acquire a page from reserved page list(resv_page_list), when populating
+ * memory for static domain on runtime.
+ */
+mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
+{
+ struct page_info *page;
+
+ spin_lock(&d->page_alloc_lock);
+ /* Acquire a page from reserved page list(resv_page_list). */
+ page = page_list_remove_head(&d->resv_page_list);
+ spin_unlock(&d->page_alloc_lock);
+ if ( unlikely(!page) )
+ return INVALID_MFN;
+
+ if ( !prepare_staticmem_pages(page, 1, memflags) )
+ goto fail;
+
+ if ( assign_domstatic_pages(d, page, 1, memflags) )
+ goto fail;
+
+ return page_to_mfn(page);
+
+ fail:
+ page_list_add_tail(page, &d->resv_page_list);
+ return INVALID_MFN;
+}
#endif
/*
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index e80b4bdcde..e100151e50 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -90,6 +90,7 @@ void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
bool need_scrub);
int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int nr_mfns,
unsigned int memflags);
+mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags);
/* Map machine page range in Xen virtual address space. */
int map_pages_to_xen(
--
2.25.1
On 07.06.2022 09:30, Penny Zheng wrote:
> +/*
> + * Acquire a page from reserved page list(resv_page_list), when populating
> + * memory for static domain on runtime.
> + */
> +mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
> +{
> + struct page_info *page;
> +
> + spin_lock(&d->page_alloc_lock);
> + /* Acquire a page from reserved page list(resv_page_list). */
> + page = page_list_remove_head(&d->resv_page_list);
> + spin_unlock(&d->page_alloc_lock);
With page removal done under lock, ...
> + if ( unlikely(!page) )
> + return INVALID_MFN;
> +
> + if ( !prepare_staticmem_pages(page, 1, memflags) )
> + goto fail;
> +
> + if ( assign_domstatic_pages(d, page, 1, memflags) )
> + goto fail;
> +
> + return page_to_mfn(page);
> +
> + fail:
> + page_list_add_tail(page, &d->resv_page_list);
> + return INVALID_MFN;
... doesn't re-adding the page to the list also need to be done
with the lock held?
Jan
Hi Jan
> -----Original Message-----
> From: Jan Beulich <jbeulich@suse.com>
> Sent: Tuesday, June 7, 2022 3:58 PM
> To: Penny Zheng <Penny.Zheng@arm.com>
> Cc: Wei Chen <Wei.Chen@arm.com>; Andrew Cooper
> <andrew.cooper3@citrix.com>; George Dunlap <george.dunlap@citrix.com>;
> Julien Grall <julien@xen.org>; Stefano Stabellini <sstabellini@kernel.org>; Wei
> Liu <wl@xen.org>; xen-devel@lists.xenproject.org
> Subject: Re: [PATCH v6 9/9] xen: retrieve reserved pages on populate_physmap
>
> On 07.06.2022 09:30, Penny Zheng wrote:
> > +/*
> > + * Acquire a page from reserved page list(resv_page_list), when
> > +populating
> > + * memory for static domain on runtime.
> > + */
> > +mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
> > +{
> > + struct page_info *page;
> > +
> > + spin_lock(&d->page_alloc_lock);
> > + /* Acquire a page from reserved page list(resv_page_list). */
> > + page = page_list_remove_head(&d->resv_page_list);
> > + spin_unlock(&d->page_alloc_lock);
>
> With page removal done under lock, ...
>
> > + if ( unlikely(!page) )
> > + return INVALID_MFN;
> > +
> > + if ( !prepare_staticmem_pages(page, 1, memflags) )
> > + goto fail;
> > +
> > + if ( assign_domstatic_pages(d, page, 1, memflags) )
> > + goto fail;
> > +
> > + return page_to_mfn(page);
> > +
> > + fail:
> > + page_list_add_tail(page, &d->resv_page_list);
> > + return INVALID_MFN;
>
> ... doesn't re-adding the page to the list also need to be done with the lock
> held?
True, Sorry about that.
Like I said in another thread with Julien, I'll add the missing half part
"
For freeing part, I shall get the lock at arch_free_heap_page(),
where we insert the page to the rsv_page_list, and release the lock at the end of
the free_staticmem_page
"
>
> Jan
© 2016 - 2026 Red Hat, Inc.