From: Hongyan Xia <hongyxia@amazon.com>
When there is not an always-mapped direct map, xenheap allocations need
to be mapped and unmapped on-demand.
Signed-off-by: Hongyan Xia <hongyxia@amazon.com>
Signed-off-by: Julien Grall <jgrall@amazon.com>
Signed-off-by: Elias El Yandouzi <eliasely@amazon.com>
----
I have left the call to map_pages_to_xen() and destroy_xen_mappings()
in the split heap for now. I am not entirely convinced this is necessary
because in that setup only the xenheap would be always mapped and
this doesn't contain any guest memory (aside the grant-table).
So map/unmapping for every allocation seems unnecessary.
Changes in v4:
* Call printk instead of dprintk()
Changes in v2:
* Fix remaining wrong indentation in alloc_xenheap_pages()
Changes since Hongyan's version:
* Rebase
* Fix indentation in alloc_xenheap_pages()
* Fix build for arm32
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index b0be246780b7..2cef521ad85a 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2243,6 +2243,7 @@ void init_xenheap_pages(paddr_t ps, paddr_t pe)
void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
{
struct page_info *pg;
+ void *virt_addr;
ASSERT_ALLOC_CONTEXT();
@@ -2251,17 +2252,36 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
if ( unlikely(pg == NULL) )
return NULL;
- return page_to_virt(pg);
+ virt_addr = page_to_virt(pg);
+
+ if ( !has_directmap() &&
+ map_pages_to_xen((unsigned long)virt_addr, page_to_mfn(pg), 1UL << order,
+ PAGE_HYPERVISOR) )
+ {
+ /* Failed to map xenheap pages. */
+ free_heap_pages(pg, order, false);
+ return NULL;
+ }
+
+ return virt_addr;
}
void free_xenheap_pages(void *v, unsigned int order)
{
+ unsigned long va = (unsigned long)v & PAGE_MASK;
+
ASSERT_ALLOC_CONTEXT();
if ( v == NULL )
return;
+ if ( !has_directmap() &&
+ destroy_xen_mappings(va, va + (PAGE_SIZE << order)) )
+ printk(XENLOG_WARNING,
+ "Error while destroying xenheap mappings at %p, order %u\n",
+ v, order);
+
free_heap_pages(virt_to_page(v), order, false);
}
@@ -2285,6 +2305,7 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
{
struct page_info *pg;
unsigned int i;
+ void *virt_addr;
ASSERT_ALLOC_CONTEXT();
@@ -2297,16 +2318,28 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
if ( unlikely(pg == NULL) )
return NULL;
+ virt_addr = page_to_virt(pg);
+
+ if ( !has_directmap() &&
+ map_pages_to_xen((unsigned long)virt_addr, page_to_mfn(pg), 1UL << order,
+ PAGE_HYPERVISOR) )
+ {
+ /* Failed to map xenheap pages. */
+ free_domheap_pages(pg, order);
+ return NULL;
+ }
+
for ( i = 0; i < (1u << order); i++ )
pg[i].count_info |= PGC_xen_heap;
- return page_to_virt(pg);
+ return virt_addr;
}
void free_xenheap_pages(void *v, unsigned int order)
{
struct page_info *pg;
unsigned int i;
+ unsigned long va = (unsigned long)v & PAGE_MASK;
ASSERT_ALLOC_CONTEXT();
@@ -2318,6 +2351,12 @@ void free_xenheap_pages(void *v, unsigned int order)
for ( i = 0; i < (1u << order); i++ )
pg[i].count_info &= ~PGC_xen_heap;
+ if ( !has_directmap() &&
+ destroy_xen_mappings(va, va + (PAGE_SIZE << order)) )
+ printk(XENLOG_WARNING
+ "Error while destroying xenheap mappings at %p, order %u\n",
+ v, order);
+
free_heap_pages(pg, order, true);
}
--
2.40.1