From: Hongyan Xia <hongyxia@amazon.com>
When we do not have a direct map, archs_mfn_in_direct_map() will always
return false, thus init_node_heap() will allocate xenheap pages from an
existing node for the metadata of a new node. This means that the
metadata of a new node is in a different node, slowing down heap
allocation.
Since we now have early vmap, vmap the metadata locally in the new node.
Signed-off-by: Hongyan Xia <hongyxia@amazon.com>
Signed-off-by: Julien Grall <jgrall@amazon.com>
Signed-off-by: Elias El Yandouzi <eliasely@amazon.com>
----
Changes in v2:
* vmap_contig_pages() was renamed to vmap_contig()
* Fix indentation and coding style
Changes from Hongyan's version:
* arch_mfn_in_direct_map() was renamed to
arch_mfns_in_direct_map()
* Use vmap_contig_pages() rather than __vmap(...).
* Add missing include (xen/vmap.h) so it compiles on Arm
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index dfb2c05322..3c0909f333 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -136,6 +136,7 @@
#include <xen/sched.h>
#include <xen/softirq.h>
#include <xen/spinlock.h>
+#include <xen/vmap.h>
#include <asm/flushtlb.h>
#include <asm/page.h>
@@ -605,22 +606,44 @@ static unsigned long init_node_heap(int node, unsigned long mfn,
needed = 0;
}
else if ( *use_tail && nr >= needed &&
- arch_mfns_in_directmap(mfn + nr - needed, needed) &&
(!xenheap_bits ||
- !((mfn + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) )
+ !((mfn + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) )
{
- _heap[node] = mfn_to_virt(mfn + nr - needed);
- avail[node] = mfn_to_virt(mfn + nr - 1) +
- PAGE_SIZE - sizeof(**avail) * NR_ZONES;
+ if ( arch_mfns_in_directmap(mfn + nr - needed, needed) )
+ {
+ _heap[node] = mfn_to_virt(mfn + nr - needed);
+ avail[node] = mfn_to_virt(mfn + nr - 1) +
+ PAGE_SIZE - sizeof(**avail) * NR_ZONES;
+ }
+ else
+ {
+ mfn_t needed_start = _mfn(mfn + nr - needed);
+
+ _heap[node] = vmap_contig(needed_start, needed);
+ BUG_ON(!_heap[node]);
+ avail[node] = (void *)(_heap[node]) + (needed << PAGE_SHIFT) -
+ sizeof(**avail) * NR_ZONES;
+ }
}
else if ( nr >= needed &&
- arch_mfns_in_directmap(mfn, needed) &&
(!xenheap_bits ||
- !((mfn + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) )
+ !((mfn + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) )
{
- _heap[node] = mfn_to_virt(mfn);
- avail[node] = mfn_to_virt(mfn + needed - 1) +
- PAGE_SIZE - sizeof(**avail) * NR_ZONES;
+ if ( arch_mfns_in_directmap(mfn, needed) )
+ {
+ _heap[node] = mfn_to_virt(mfn);
+ avail[node] = mfn_to_virt(mfn + needed - 1) +
+ PAGE_SIZE - sizeof(**avail) * NR_ZONES;
+ }
+ else
+ {
+ mfn_t needed_start = _mfn(mfn);
+
+ _heap[node] = vmap_contig(needed_start, needed);
+ BUG_ON(!_heap[node]);
+ avail[node] = (void *)(_heap[node]) + (needed << PAGE_SHIFT) -
+ sizeof(**avail) * NR_ZONES;
+ }
*use_tail = false;
}
else if ( get_order_from_bytes(sizeof(**_heap)) ==
--
2.40.1