xen/arch/riscv/include/asm/domain.h | 2 + xen/arch/riscv/include/asm/p2m.h | 4 ++ xen/arch/riscv/p2m.c | 100 ++++++++++++++++++++++++++++ xen/arch/riscv/traps.c | 8 +++ 4 files changed, 114 insertions(+)
Introduce helpers to manage VS-stage and G-stage translation state during
vCPU context switches.
As VSATP and HGATP cannot be updated atomically, clear VSATP on context
switch-out to prevent speculative VS-stage translations from being associated
with an incorrect VMID. On context switch-in, restore HGATP and VSATP in the
required order.
Add p2m_handle_vmenter() to perform VMID management and issue TLB flushes
only when required (e.g. on VMID reuse or generation change).
This provides the necessary infrastructure for correct p2m context switching
on RISC-V.
Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com>
---
Changes in v4:
- in p2m_handle_vmenter():
- Drop call of flush_tlb_guest_local() as it isn't necessary.
- use 'unsigned short' for old_vmid and new_vmid.
- Rename local variable c to curr.
- Update the comments inside the function.
- Update the comment above p2m_ctxt_swithch_from() as we don't care about
the order while *_ctxt_switch_from happens as CSR_* registers aren't being
updated and only stored. (Only CSR_VSATP is set to 0 to prevent speculation
when *_ctxt_switch_to() functions will be called) We only care that
p2m_ctxt_switch_from() is called before CRSs restoring is started.
- Update the comment inside p2m_ctxt_switch_from().
- p2m_ctxt_switch_to() updates:
- Update the comment above.
- Add call of flush_tlb_guest_local() after setting of VSATP to avoid usage
of stale translations for new vCPU.
- Rebase on top of staging, so check_for_pcpu_work() is introduced here
instead of vtimer patch series.
---
Changes in v3:
- Add comment above p2m_ctxt_switch_{to, from}().
- Code style fixes.
- Refactor p2m_ctxt_switch_to().
- Update the comment at the end of p2m_ctxt_switch_from().
- Refactor the code of p2m_handle_vmenter().
---
Changes in v2:
- Add vsatp field declaration to arch_vcpu.
- s/p2m_ctx_switch_{from,to}/p2m_ctxt_switch_{from,to}.
- Introduce p2m_handle_vmenter() for proper handling of VMID,
hgatp and vsatp updates.
- Introduce is_p2m_switch_finished and init it inisde
p2m_ctx_switch_to() for furhter handling in p2m_handle_vmenter().
- Code style fixes.
- Add is_idle_vcpu() check in p2m_ctxt_switch_from().
- use csr_swap() in p2m_ctxt_switch_from().
- move flush_tlb_guest_local() to the end if p2m_handle_vmenter() and
drop unnessary anymore comments.
- Correct printk()'s arguments in p2m_handle_vmenter().
---
xen/arch/riscv/include/asm/domain.h | 2 +
xen/arch/riscv/include/asm/p2m.h | 4 ++
xen/arch/riscv/p2m.c | 100 ++++++++++++++++++++++++++++
xen/arch/riscv/traps.c | 8 +++
4 files changed, 114 insertions(+)
diff --git a/xen/arch/riscv/include/asm/domain.h b/xen/arch/riscv/include/asm/domain.h
index f78f145258d6..5aec627a7adb 100644
--- a/xen/arch/riscv/include/asm/domain.h
+++ b/xen/arch/riscv/include/asm/domain.h
@@ -48,6 +48,8 @@ struct arch_vcpu {
} xen_saved_context;
struct cpu_info *cpu_info;
+
+ register_t vsatp;
};
struct paging_domain {
diff --git a/xen/arch/riscv/include/asm/p2m.h b/xen/arch/riscv/include/asm/p2m.h
index f63b5dec99b1..60f27f9b347e 100644
--- a/xen/arch/riscv/include/asm/p2m.h
+++ b/xen/arch/riscv/include/asm/p2m.h
@@ -255,6 +255,10 @@ static inline bool p2m_is_locked(const struct p2m_domain *p2m)
struct page_info *p2m_get_page_from_gfn(struct p2m_domain *p2m, gfn_t gfn,
p2m_type_t *t);
+void p2m_ctxt_switch_from(struct vcpu *p);
+void p2m_ctxt_switch_to(struct vcpu *n);
+void p2m_handle_vmenter(void);
+
#endif /* ASM__RISCV__P2M_H */
/*
diff --git a/xen/arch/riscv/p2m.c b/xen/arch/riscv/p2m.c
index 0abeb374c110..89e5db606fc8 100644
--- a/xen/arch/riscv/p2m.c
+++ b/xen/arch/riscv/p2m.c
@@ -1434,3 +1434,103 @@ struct page_info *p2m_get_page_from_gfn(struct p2m_domain *p2m, gfn_t gfn,
return get_page(page, p2m->domain) ? page : NULL;
}
+
+/*
+ * Must be called before restoring CSRs to avoid potential speculation using
+ * an incorrect set of page tables during updates of VSATP and HGATP.
+ */
+void p2m_ctxt_switch_from(struct vcpu *p)
+{
+ if ( is_idle_vcpu(p) )
+ return;
+
+ /*
+ * No mechanism is provided to atomically change vsatp and hgatp
+ * together. Hence, to prevent speculative execution causing one
+ * guest’s VS-stage translations to be cached under another guest’s
+ * VMID, world-switch code should zero vsatp, then swap hgatp, then
+ * finally write the new vsatp value what will be done in
+ * p2m_ctxt_switch_to().
+ * Note, that also HGATP update could happen in p2m_handle_vmenter().
+ */
+ p->arch.vsatp = csr_swap(CSR_VSATP, 0);
+
+ /*
+ * Nothing to do with HGATP as it will be update in p2m_ctxt_switch_to()
+ * or/and in p2m_handle_vmenter().
+ */
+}
+
+/*
+ * As speculation may occur at any time, an incorrect set of page tables could
+ * be used. Therefore, this function must be called only after all other guest
+ * CSRs have been restored. Otherwise, VS-stage translations could be populated
+ * using stale control state (e.g. SEPC still referring to the previous guest)
+ * while VSATP and HGATP already point to the new guest.
+ */
+void p2m_ctxt_switch_to(struct vcpu *n)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(n->domain);
+
+ if ( is_idle_vcpu(n) )
+ return;
+
+ csr_write(CSR_HGATP, construct_hgatp(p2m, n->arch.vmid.vmid));
+ /*
+ * As VMID is unique per vCPU and just re-used here thereby there is no
+ * need for G-stage TLB flush here.
+ */
+
+ csr_write(CSR_VSATP, n->arch.vsatp);
+
+ /*
+ * Since n->arch.vsatp.ASID may equal p->arch.vsatp.ASID,
+ * flush the VS-stage TLB to prevent the new guest from
+ * using stale (not belongs to it) translations.
+ * ASID equality is not the only potential issue here.
+ *
+ * TODO: This could be optimized by making the flush
+ * conditional.
+ */
+ flush_tlb_guest_local();
+}
+
+void p2m_handle_vmenter(void)
+{
+ struct vcpu *curr = current;
+ struct p2m_domain *p2m = p2m_get_hostp2m(curr->domain);
+ struct vcpu_vmid *p_vmid = &curr->arch.vmid;
+ unsigned short old_vmid, new_vmid;
+ bool need_flush;
+
+ BUG_ON(is_idle_vcpu(curr));
+
+ old_vmid = p_vmid->vmid;
+ need_flush = vmid_handle_vmenter(p_vmid);
+ new_vmid = p_vmid->vmid;
+
+#ifdef P2M_DEBUG
+ printk("%pv: oldvmid(%d) new_vmid(%d), need_flush(%d)\n",
+ curr, old_vmid, new_vmid, need_flush);
+#endif
+
+ /*
+ * There is no need to set VSATP to 0 to stop speculation before updating
+ * HGATP, as VSATP is not modified here.
+ */
+ if ( old_vmid != new_vmid )
+ csr_write(CSR_HGATP, construct_hgatp(p2m, p_vmid->vmid));
+
+ /*
+ * There is also no need to flush G-stage TLB unconditionally as old VMID
+ * won't be reused until need_flush is set to true.
+ */
+ if ( unlikely(need_flush) )
+ local_hfence_gvma_all();
+
+ /*
+ * There is also no need to flush the VS-stage TLB: even if speculation
+ * occurs (VSATP + old HGATP were used), it will use the old VMID, which
+ * won't be reused until need_flush is set to true.
+ */
+}
diff --git a/xen/arch/riscv/traps.c b/xen/arch/riscv/traps.c
index c81a4f79a0d2..9fca941526f6 100644
--- a/xen/arch/riscv/traps.c
+++ b/xen/arch/riscv/traps.c
@@ -169,6 +169,11 @@ static void do_unexpected_trap(const struct cpu_user_regs *regs)
die();
}
+static void check_for_pcpu_work(void)
+{
+ p2m_handle_vmenter();
+}
+
void do_trap(struct cpu_user_regs *cpu_regs)
{
register_t pc = cpu_regs->sepc;
@@ -222,6 +227,9 @@ void do_trap(struct cpu_user_regs *cpu_regs)
do_unexpected_trap(cpu_regs);
break;
}
+
+ if ( cpu_regs->hstatus & HSTATUS_SPV )
+ check_for_pcpu_work();
}
void vcpu_show_execution_state(struct vcpu *v)
--
2.53.0
On 19.02.2026 13:40, Oleksii Kurochko wrote: > Introduce helpers to manage VS-stage and G-stage translation state during > vCPU context switches. > > As VSATP and HGATP cannot be updated atomically, clear VSATP on context > switch-out to prevent speculative VS-stage translations from being associated > with an incorrect VMID. On context switch-in, restore HGATP and VSATP in the > required order. > > Add p2m_handle_vmenter() to perform VMID management and issue TLB flushes > only when required (e.g. on VMID reuse or generation change). > > This provides the necessary infrastructure for correct p2m context switching > on RISC-V. > > Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com> Acked-by: Jan Beulich <jbeulich@suse.com>
© 2016 - 2026 Red Hat, Inc.