Introduce the following things:
- Update p2m_domain structure, which describe per p2m-table state, with:
- lock to protect updates to p2m.
- pool with pages used to construct p2m.
- back pointer to domain structure.
- p2m_init() to initalize members introduced in p2m_domain structure.
Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
---
Changes in V5:
- Acked-by: Jan Beulich <jbeulich@suse.com>.
---
Changes in V4:
- Move an introduction of clean_pte member of p2m_domain structure to the
patch where it is started to be used:
xen/riscv: add root page table allocation
- Add prototype of p2m_init() to asm/p2m.h.
---
Changes in V3:
- s/p2m_type/p2m_types.
- Drop init. of p2m->clean_pte in p2m_init() as CONFIG_HAS_PASSTHROUGH is
going to be selected unconditionaly. Plus CONFIG_HAS_PASSTHROUGH isn't
ready to be used for RISC-V.
Add compilation error to not forget to init p2m->clean_pte.
- Move defintion of p2m->domain up in p2m_init().
- Add iommu_use_hap_pt() when p2m->clean_pte is initialized.
- Add the comment above p2m_types member of p2m_domain struct.
- Add need_flush member to p2m_domain structure.
- Move introduction of p2m_write_(un)lock() and p2m_tlb_flush_sync()
to the patch where they are really used:
xen/riscv: implement guest_physmap_add_entry() for mapping GFNs to MFN
- Add p2m member to arch_domain structure.
- Drop p2m_types from struct p2m_domain as P2M type for PTE will be stored
differently.
- Drop default_access as it isn't going to be used for now.
- Move defintion of p2m_is_write_locked() to "implement function to map memory
in guest p2m" where it is really used.
---
Changes in V2:
- Use introduced erlier sbi_remote_hfence_gvma_vmid() for proper implementation
of p2m_force_tlb_flush_sync() as TLB flushing needs to happen for each pCPU
which potentially has cached a mapping, what is tracked by d->dirty_cpumask.
- Drop unnecessary blanks.
- Fix code style for # of pre-processor directive.
- Drop max_mapped_gfn and lowest_mapped_gfn as they aren't used now.
- [p2m_init()] Set p2m->clean_pte=false if CONFIG_HAS_PASSTHROUGH=n.
- [p2m_init()] Update the comment above p2m->domain = d;
- Drop p2m->need_flush as it seems to be always true for RISC-V and as a
consequence drop p2m_tlb_flush_sync().
- Move to separate patch an introduction of root page table allocation.
---
xen/arch/riscv/include/asm/domain.h | 5 +++++
xen/arch/riscv/include/asm/p2m.h | 33 +++++++++++++++++++++++++++++
xen/arch/riscv/p2m.c | 20 +++++++++++++++++
3 files changed, 58 insertions(+)
diff --git a/xen/arch/riscv/include/asm/domain.h b/xen/arch/riscv/include/asm/domain.h
index aac1040658..e688980efa 100644
--- a/xen/arch/riscv/include/asm/domain.h
+++ b/xen/arch/riscv/include/asm/domain.h
@@ -5,6 +5,8 @@
#include <xen/xmalloc.h>
#include <public/hvm/params.h>
+#include <asm/p2m.h>
+
struct vcpu_vmid {
uint64_t generation;
uint16_t vmid;
@@ -24,6 +26,9 @@ struct arch_vcpu {
struct arch_domain {
struct hvm_domain hvm;
+
+ /* Virtual MMU */
+ struct p2m_domain p2m;
};
#include <xen/sched.h>
diff --git a/xen/arch/riscv/include/asm/p2m.h b/xen/arch/riscv/include/asm/p2m.h
index 3a5066f360..a129ed8392 100644
--- a/xen/arch/riscv/include/asm/p2m.h
+++ b/xen/arch/riscv/include/asm/p2m.h
@@ -3,6 +3,9 @@
#define ASM__RISCV__P2M_H
#include <xen/errno.h>
+#include <xen/mm.h>
+#include <xen/rwlock.h>
+#include <xen/types.h>
#include <asm/page-bits.h>
@@ -10,6 +13,34 @@ extern unsigned char gstage_mode;
#define paddr_bits PADDR_BITS
+/* Get host p2m table */
+#define p2m_get_hostp2m(d) (&(d)->arch.p2m)
+
+/* Per-p2m-table state */
+struct p2m_domain {
+ /*
+ * Lock that protects updates to the p2m.
+ */
+ rwlock_t lock;
+
+ /* Pages used to construct the p2m */
+ struct page_list_head pages;
+
+ /* Back pointer to domain */
+ struct domain *domain;
+
+ /*
+ * P2M updates may required TLBs to be flushed (invalidated).
+ *
+ * Flushes may be deferred by setting 'need_flush' and then flushing
+ * when the p2m write lock is released.
+ *
+ * If an immediate flush is required (e.g, if a super page is
+ * shattered), call p2m_tlb_flush_sync().
+ */
+ bool need_flush;
+};
+
/*
* List of possible type for each page in the p2m entry.
* The number of available bit per page in the pte for this purpose is 2 bits.
@@ -92,6 +123,8 @@ static inline bool arch_acquire_resource_check(struct domain *d)
void pre_gstage_init(void);
+int p2m_init(struct domain *d);
+
#endif /* ASM__RISCV__P2M_H */
/*
diff --git a/xen/arch/riscv/p2m.c b/xen/arch/riscv/p2m.c
index d8027a270f..1b5fc7ffff 100644
--- a/xen/arch/riscv/p2m.c
+++ b/xen/arch/riscv/p2m.c
@@ -3,6 +3,10 @@
#include <xen/init.h>
#include <xen/lib.h>
#include <xen/macros.h>
+#include <xen/mm.h>
+#include <xen/paging.h>
+#include <xen/rwlock.h>
+#include <xen/sched.h>
#include <xen/sections.h>
#include <asm/csr.h>
@@ -97,3 +101,19 @@ void __init pre_gstage_init(void)
vmid_init();
}
+
+int p2m_init(struct domain *d)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+ /*
+ * "Trivial" initialisation is now complete. Set the backpointer so the
+ * users of p2m could get an access to domain structure.
+ */
+ p2m->domain = d;
+
+ rwlock_init(&p2m->lock);
+ INIT_PAGE_LIST_HEAD(&p2m->pages);
+
+ return 0;
+}
--
2.51.0