Introduce page_set_xenheap_gfn() to encode the GFN associated with a Xen heap
page directly into the type_info field of struct page_info.
Introduce page_get_xenheap_gfn() to retrieve the GFN from a Xen heap page.
Reserve the upper 10 bits of type_info for the usage counter and frame type;
use the remaining lower bits to store the grant table frame GFN.
This is sufficient for all supported RISC-V MMU modes: Sv32 uses 22-bit GFNs,
while Sv39, Sv47, and Sv57 use up to 44-bit GFNs.
Define PGT_gfn_mask and PGT_gfn_width to ensure a consistent bit layout
across all RISC-V MMU modes, avoiding the need for mode-specific ifdefs.
Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com>
---
Changes in v3:
- Update the comment above defintions of PGT_gfn_width, PGT_gfn_mask.
- Add page_get_xenheap_gfn().
- Make commit message clearer.
---
Changes in v2:
- This changes were part of "xen/riscv: implement p2m mapping functionality".
No additional changes were done.
---
xen/arch/riscv/include/asm/mm.h | 43 ++++++++++++++++++++++++++++++---
1 file changed, 40 insertions(+), 3 deletions(-)
diff --git a/xen/arch/riscv/include/asm/mm.h b/xen/arch/riscv/include/asm/mm.h
index dd8cdc9782..7950d132c1 100644
--- a/xen/arch/riscv/include/asm/mm.h
+++ b/xen/arch/riscv/include/asm/mm.h
@@ -12,6 +12,7 @@
#include <xen/sections.h>
#include <xen/types.h>
+#include <asm/cmpxchg.h>
#include <asm/page.h>
#include <asm/page-bits.h>
@@ -247,9 +248,17 @@ static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr)
#define PGT_writable_page PG_mask(1, 1) /* has writable mappings? */
#define PGT_type_mask PG_mask(1, 1) /* Bits 31 or 63. */
-/* Count of uses of this frame as its current type. */
-#define PGT_count_width PG_shift(2)
-#define PGT_count_mask ((1UL << PGT_count_width) - 1)
+ /* 9-bit count of uses of this frame as its current type. */
+#define PGT_count_mask PG_mask(0x3FF, 10)
+
+/*
+ * Stored in bits [22:0] (Sv32) or [44:0] (Sv39,48,57) GFN if page is
+ * xenheap page.
+ */
+#define PGT_gfn_width PG_shift(10)
+#define PGT_gfn_mask (BIT(PGT_gfn_width, UL) - 1)
+
+#define PGT_INVALID_XENHEAP_GFN _gfn(PGT_gfn_mask)
/*
* Page needs to be scrubbed. Since this bit can only be set on a page that is
@@ -301,6 +310,34 @@ static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr)
#define PFN_ORDER(pg) ((pg)->v.free.order)
+/*
+ * All accesses to the GFN portion of type_info field should always be
+ * protected by the P2M lock. In case when it is not feasible to satisfy
+ * that requirement (risk of deadlock, lock inversion, etc) it is important
+ * to make sure that all non-protected updates to this field are atomic.
+ */
+static inline gfn_t page_get_xenheap_gfn(const struct page_info *p)
+{
+ gfn_t gfn = _gfn(ACCESS_ONCE(p->u.inuse.type_info) & PGT_gfn_mask);
+
+ ASSERT(is_xen_heap_page(p));
+
+ return gfn_eq(gfn, PGT_INVALID_XENHEAP_GFN) ? INVALID_GFN : gfn;
+}
+
+static inline void page_set_xenheap_gfn(struct page_info *p, gfn_t gfn)
+{
+ gfn_t gfn_ = gfn_eq(gfn, INVALID_GFN) ? PGT_INVALID_XENHEAP_GFN : gfn;
+ unsigned long x, nx, y = p->u.inuse.type_info;
+
+ ASSERT(is_xen_heap_page(p));
+
+ do {
+ x = y;
+ nx = (x & ~PGT_gfn_mask) | gfn_x(gfn_);
+ } while ( (y = cmpxchg(&p->u.inuse.type_info, x, nx)) != x );
+}
+
extern unsigned char cpu0_boot_stack[];
void setup_initial_pagetables(void);
--
2.50.1