The current code to update the Xen part of the GDT when running a PV guest
relies on caching the direct map address of all the L1 tables used to map the
GDT and LDT, so that entries can be modified.
Introduce a new function that populates the per-domain region, either using the
recursive linear mappings when the target vCPU is the current one, or by
directly modifying the L1 table of the per-domain region.
Using such function to populate per-domain addresses drops the need to keep a
reference to per-domain L1 tables previously used to change the per-domain
mappings.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
xen/arch/x86/domain.c | 11 +++-
xen/arch/x86/include/asm/desc.h | 6 +-
xen/arch/x86/include/asm/mm.h | 2 +
xen/arch/x86/include/asm/processor.h | 5 ++
xen/arch/x86/mm.c | 88 ++++++++++++++++++++++++++++
xen/arch/x86/smpboot.c | 6 +-
xen/arch/x86/traps.c | 10 ++--
7 files changed, 113 insertions(+), 15 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 1f680bf176ee..0bd0ef7e40f4 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1953,9 +1953,14 @@ static always_inline bool need_full_gdt(const struct domain *d)
static void update_xen_slot_in_full_gdt(const struct vcpu *v, unsigned int cpu)
{
- l1e_write(pv_gdt_ptes(v) + FIRST_RESERVED_GDT_PAGE,
- !is_pv_32bit_vcpu(v) ? per_cpu(gdt_l1e, cpu)
- : per_cpu(compat_gdt_l1e, cpu));
+ ASSERT(v != current);
+
+ populate_perdomain_mapping(v,
+ GDT_VIRT_START(v) +
+ (FIRST_RESERVED_GDT_PAGE << PAGE_SHIFT),
+ !is_pv_32bit_vcpu(v) ? &per_cpu(gdt_mfn, cpu)
+ : &per_cpu(compat_gdt_mfn,
+ cpu), 1);
}
static void load_full_gdt(const struct vcpu *v, unsigned int cpu)
diff --git a/xen/arch/x86/include/asm/desc.h b/xen/arch/x86/include/asm/desc.h
index a1e0807d97ed..33981bfca588 100644
--- a/xen/arch/x86/include/asm/desc.h
+++ b/xen/arch/x86/include/asm/desc.h
@@ -44,6 +44,8 @@
#ifndef __ASSEMBLY__
+#include <xen/mm-frame.h>
+
#define GUEST_KERNEL_RPL(d) (is_pv_32bit_domain(d) ? 1 : 3)
/* Fix up the RPL of a guest segment selector. */
@@ -212,10 +214,10 @@ struct __packed desc_ptr {
extern seg_desc_t boot_gdt[];
DECLARE_PER_CPU(seg_desc_t *, gdt);
-DECLARE_PER_CPU(l1_pgentry_t, gdt_l1e);
+DECLARE_PER_CPU(mfn_t, gdt_mfn);
extern seg_desc_t boot_compat_gdt[];
DECLARE_PER_CPU(seg_desc_t *, compat_gdt);
-DECLARE_PER_CPU(l1_pgentry_t, compat_gdt_l1e);
+DECLARE_PER_CPU(mfn_t, compat_gdt_mfn);
DECLARE_PER_CPU(bool, full_gdt_loaded);
static inline void lgdt(const struct desc_ptr *gdtr)
diff --git a/xen/arch/x86/include/asm/mm.h b/xen/arch/x86/include/asm/mm.h
index 6c7e66ee21ab..b50a51327b2b 100644
--- a/xen/arch/x86/include/asm/mm.h
+++ b/xen/arch/x86/include/asm/mm.h
@@ -603,6 +603,8 @@ int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
int create_perdomain_mapping(struct domain *d, unsigned long va,
unsigned int nr, l1_pgentry_t **pl1tab,
struct page_info **ppg);
+void populate_perdomain_mapping(const struct vcpu *v, unsigned long va,
+ mfn_t *mfn, unsigned long nr);
void destroy_perdomain_mapping(struct domain *d, unsigned long va,
unsigned int nr);
void free_perdomain_mappings(struct domain *d);
diff --git a/xen/arch/x86/include/asm/processor.h b/xen/arch/x86/include/asm/processor.h
index d247ef8dd226..82ee89f736c2 100644
--- a/xen/arch/x86/include/asm/processor.h
+++ b/xen/arch/x86/include/asm/processor.h
@@ -243,6 +243,11 @@ static inline unsigned long cr3_pa(unsigned long cr3)
return cr3 & X86_CR3_ADDR_MASK;
}
+static inline mfn_t cr3_mfn(unsigned long cr3)
+{
+ return maddr_to_mfn(cr3_pa(cr3));
+}
+
static inline unsigned int cr3_pcid(unsigned long cr3)
{
return IS_ENABLED(CONFIG_PV) ? cr3 & X86_CR3_PCID_MASK : 0;
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 3d5dd22b6c36..0abea792486c 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -6423,6 +6423,94 @@ int create_perdomain_mapping(struct domain *d, unsigned long va,
return rc;
}
+void populate_perdomain_mapping(const struct vcpu *v, unsigned long va,
+ mfn_t *mfn, unsigned long nr)
+{
+ l1_pgentry_t *l1tab = NULL, *pl1e;
+ const l3_pgentry_t *l3tab;
+ const l2_pgentry_t *l2tab;
+ struct domain *d = v->domain;
+
+ ASSERT(va >= PERDOMAIN_VIRT_START &&
+ va < PERDOMAIN_VIRT_SLOT(PERDOMAIN_SLOTS));
+ ASSERT(!nr || !l3_table_offset(va ^ (va + nr * PAGE_SIZE - 1)));
+
+ /* Use likely to force the optimization for the fast path. */
+ if ( likely(v == current) )
+ {
+ unsigned int i;
+
+ /* Ensure page-tables are from current (if current != curr_vcpu). */
+ sync_local_execstate();
+
+ /* Fast path: get L1 entries using the recursive linear mappings. */
+ pl1e = &__linear_l1_table[l1_linear_offset(va)];
+
+ for ( i = 0; i < nr; i++, pl1e++ )
+ {
+ if ( unlikely(perdomain_l1e_needs_freeing(*pl1e)) )
+ {
+ ASSERT_UNREACHABLE();
+ free_domheap_page(l1e_get_page(*pl1e));
+ }
+ l1e_write(pl1e, l1e_from_mfn(mfn[i], __PAGE_HYPERVISOR_RW));
+ }
+
+ return;
+ }
+
+ ASSERT(d->arch.perdomain_l3_pg);
+ l3tab = __map_domain_page(d->arch.perdomain_l3_pg);
+
+ if ( unlikely(!(l3e_get_flags(l3tab[l3_table_offset(va)]) &
+ _PAGE_PRESENT)) )
+ {
+ unmap_domain_page(l3tab);
+ gprintk(XENLOG_ERR, "unable to map at VA %lx: L3e not present\n", va);
+ ASSERT_UNREACHABLE();
+ domain_crash(d);
+
+ return;
+ }
+
+ l2tab = map_l2t_from_l3e(l3tab[l3_table_offset(va)]);
+
+ for ( ; nr--; va += PAGE_SIZE, mfn++ )
+ {
+ if ( !l1tab || !l1_table_offset(va) )
+ {
+ const l2_pgentry_t *pl2e = l2tab + l2_table_offset(va);
+
+ if ( unlikely(!(l2e_get_flags(*pl2e) & _PAGE_PRESENT)) )
+ {
+ gprintk(XENLOG_ERR, "unable to map at VA %lx: L2e not present\n",
+ va);
+ ASSERT_UNREACHABLE();
+ domain_crash(d);
+
+ break;
+ }
+
+ unmap_domain_page(l1tab);
+ l1tab = map_l1t_from_l2e(*pl2e);
+ }
+
+ pl1e = &l1tab[l1_table_offset(va)];
+
+ if ( unlikely(perdomain_l1e_needs_freeing(*pl1e)) )
+ {
+ ASSERT_UNREACHABLE();
+ free_domheap_page(l1e_get_page(*pl1e));
+ }
+
+ l1e_write(pl1e, l1e_from_mfn(*mfn, __PAGE_HYPERVISOR_RW));
+ }
+
+ unmap_domain_page(l1tab);
+ unmap_domain_page(l2tab);
+ unmap_domain_page(l3tab);
+}
+
void destroy_perdomain_mapping(struct domain *d, unsigned long va,
unsigned int nr)
{
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 79a79c54c304..a740a6402272 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -1059,8 +1059,7 @@ static int cpu_smpboot_alloc(unsigned int cpu)
if ( gdt == NULL )
goto out;
per_cpu(gdt, cpu) = gdt;
- per_cpu(gdt_l1e, cpu) =
- l1e_from_pfn(virt_to_mfn(gdt), __PAGE_HYPERVISOR_RW);
+ per_cpu(gdt_mfn, cpu) = _mfn(virt_to_mfn(gdt));
memcpy(gdt, boot_gdt, NR_RESERVED_GDT_PAGES * PAGE_SIZE);
BUILD_BUG_ON(NR_CPUS > 0x10000);
gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
@@ -1069,8 +1068,7 @@ static int cpu_smpboot_alloc(unsigned int cpu)
per_cpu(compat_gdt, cpu) = gdt = alloc_xenheap_pages(0, memflags);
if ( gdt == NULL )
goto out;
- per_cpu(compat_gdt_l1e, cpu) =
- l1e_from_pfn(virt_to_mfn(gdt), __PAGE_HYPERVISOR_RW);
+ per_cpu(compat_gdt_mfn, cpu) = _mfn(virt_to_mfn(gdt));
memcpy(gdt, boot_compat_gdt, NR_RESERVED_GDT_PAGES * PAGE_SIZE);
gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
#endif
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 487b8c5a78c5..a7f6fb611c34 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -92,10 +92,10 @@ DEFINE_PER_CPU(uint64_t, efer);
static DEFINE_PER_CPU(unsigned long, last_extable_addr);
DEFINE_PER_CPU_READ_MOSTLY(seg_desc_t *, gdt);
-DEFINE_PER_CPU_READ_MOSTLY(l1_pgentry_t, gdt_l1e);
+DEFINE_PER_CPU_READ_MOSTLY(mfn_t, gdt_mfn);
#ifdef CONFIG_PV32
DEFINE_PER_CPU_READ_MOSTLY(seg_desc_t *, compat_gdt);
-DEFINE_PER_CPU_READ_MOSTLY(l1_pgentry_t, compat_gdt_l1e);
+DEFINE_PER_CPU_READ_MOSTLY(mfn_t, compat_gdt_mfn);
#endif
/* Master table, used by CPU0. */
@@ -2219,11 +2219,9 @@ void __init trap_init(void)
init_ler();
/* Cache {,compat_}gdt_l1e now that physically relocation is done. */
- this_cpu(gdt_l1e) =
- l1e_from_pfn(virt_to_mfn(boot_gdt), __PAGE_HYPERVISOR_RW);
+ this_cpu(gdt_mfn) = _mfn(virt_to_mfn(boot_gdt));
if ( IS_ENABLED(CONFIG_PV32) )
- this_cpu(compat_gdt_l1e) =
- l1e_from_pfn(virt_to_mfn(boot_compat_gdt), __PAGE_HYPERVISOR_RW);
+ this_cpu(compat_gdt_mfn) = _mfn(virt_to_mfn(boot_compat_gdt));
percpu_traps_init();
--
2.46.0
On Wed Jan 8, 2025 at 2:26 PM GMT, Roger Pau Monne wrote:
> The current code to update the Xen part of the GDT when running a PV guest
> relies on caching the direct map address of all the L1 tables used to map the
> GDT and LDT, so that entries can be modified.
>
> Introduce a new function that populates the per-domain region, either using the
> recursive linear mappings when the target vCPU is the current one, or by
> directly modifying the L1 table of the per-domain region.
>
> Using such function to populate per-domain addresses drops the need to keep a
> reference to per-domain L1 tables previously used to change the per-domain
> mappings.
>
> Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
> ---
> xen/arch/x86/domain.c | 11 +++-
> xen/arch/x86/include/asm/desc.h | 6 +-
> xen/arch/x86/include/asm/mm.h | 2 +
> xen/arch/x86/include/asm/processor.h | 5 ++
> xen/arch/x86/mm.c | 88 ++++++++++++++++++++++++++++
> xen/arch/x86/smpboot.c | 6 +-
> xen/arch/x86/traps.c | 10 ++--
> 7 files changed, 113 insertions(+), 15 deletions(-)
>
> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> index 1f680bf176ee..0bd0ef7e40f4 100644
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -1953,9 +1953,14 @@ static always_inline bool need_full_gdt(const struct domain *d)
>
> static void update_xen_slot_in_full_gdt(const struct vcpu *v, unsigned int cpu)
> {
> - l1e_write(pv_gdt_ptes(v) + FIRST_RESERVED_GDT_PAGE,
> - !is_pv_32bit_vcpu(v) ? per_cpu(gdt_l1e, cpu)
> - : per_cpu(compat_gdt_l1e, cpu));
> + ASSERT(v != current);
For this assert, and others below. IIUC, curr_vcpu == current when we're
properly switched. When we're idling current == idle and curr_vcpu == prev_ctx.
Granted, calling this in the middle of a lazy idle loop would be weird, but
would it make sense for PT consistency to use curr_vcpu here...
> +
> + populate_perdomain_mapping(v,
> + GDT_VIRT_START(v) +
> + (FIRST_RESERVED_GDT_PAGE << PAGE_SHIFT),
> + !is_pv_32bit_vcpu(v) ? &per_cpu(gdt_mfn, cpu)
> + : &per_cpu(compat_gdt_mfn,
> + cpu), 1);
> }
>
> static void load_full_gdt(const struct vcpu *v, unsigned int cpu)
> diff --git a/xen/arch/x86/include/asm/desc.h b/xen/arch/x86/include/asm/desc.h
> index a1e0807d97ed..33981bfca588 100644
> --- a/xen/arch/x86/include/asm/desc.h
> +++ b/xen/arch/x86/include/asm/desc.h
> @@ -44,6 +44,8 @@
>
> #ifndef __ASSEMBLY__
>
> +#include <xen/mm-frame.h>
> +
> #define GUEST_KERNEL_RPL(d) (is_pv_32bit_domain(d) ? 1 : 3)
>
> /* Fix up the RPL of a guest segment selector. */
> @@ -212,10 +214,10 @@ struct __packed desc_ptr {
>
> extern seg_desc_t boot_gdt[];
> DECLARE_PER_CPU(seg_desc_t *, gdt);
> -DECLARE_PER_CPU(l1_pgentry_t, gdt_l1e);
> +DECLARE_PER_CPU(mfn_t, gdt_mfn);
> extern seg_desc_t boot_compat_gdt[];
> DECLARE_PER_CPU(seg_desc_t *, compat_gdt);
> -DECLARE_PER_CPU(l1_pgentry_t, compat_gdt_l1e);
> +DECLARE_PER_CPU(mfn_t, compat_gdt_mfn);
> DECLARE_PER_CPU(bool, full_gdt_loaded);
>
> static inline void lgdt(const struct desc_ptr *gdtr)
> diff --git a/xen/arch/x86/include/asm/mm.h b/xen/arch/x86/include/asm/mm.h
> index 6c7e66ee21ab..b50a51327b2b 100644
> --- a/xen/arch/x86/include/asm/mm.h
> +++ b/xen/arch/x86/include/asm/mm.h
> @@ -603,6 +603,8 @@ int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
> int create_perdomain_mapping(struct domain *d, unsigned long va,
> unsigned int nr, l1_pgentry_t **pl1tab,
> struct page_info **ppg);
> +void populate_perdomain_mapping(const struct vcpu *v, unsigned long va,
> + mfn_t *mfn, unsigned long nr);
> void destroy_perdomain_mapping(struct domain *d, unsigned long va,
> unsigned int nr);
> void free_perdomain_mappings(struct domain *d);
> diff --git a/xen/arch/x86/include/asm/processor.h b/xen/arch/x86/include/asm/processor.h
> index d247ef8dd226..82ee89f736c2 100644
> --- a/xen/arch/x86/include/asm/processor.h
> +++ b/xen/arch/x86/include/asm/processor.h
> @@ -243,6 +243,11 @@ static inline unsigned long cr3_pa(unsigned long cr3)
> return cr3 & X86_CR3_ADDR_MASK;
> }
>
> +static inline mfn_t cr3_mfn(unsigned long cr3)
> +{
> + return maddr_to_mfn(cr3_pa(cr3));
> +}
> +
> static inline unsigned int cr3_pcid(unsigned long cr3)
> {
> return IS_ENABLED(CONFIG_PV) ? cr3 & X86_CR3_PCID_MASK : 0;
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index 3d5dd22b6c36..0abea792486c 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -6423,6 +6423,94 @@ int create_perdomain_mapping(struct domain *d, unsigned long va,
> return rc;
> }
>
> +void populate_perdomain_mapping(const struct vcpu *v, unsigned long va,
> + mfn_t *mfn, unsigned long nr)
> +{
> + l1_pgentry_t *l1tab = NULL, *pl1e;
> + const l3_pgentry_t *l3tab;
> + const l2_pgentry_t *l2tab;
> + struct domain *d = v->domain;
> +
> + ASSERT(va >= PERDOMAIN_VIRT_START &&
> + va < PERDOMAIN_VIRT_SLOT(PERDOMAIN_SLOTS));
> + ASSERT(!nr || !l3_table_offset(va ^ (va + nr * PAGE_SIZE - 1)));
> +
> + /* Use likely to force the optimization for the fast path. */
> + if ( likely(v == current) )
... and here? In particular I'd expect using curr_vcpu here means...
> + {
> + unsigned int i;
> +
> + /* Ensure page-tables are from current (if current != curr_vcpu). */
> + sync_local_execstate();
... this should not be needed.
> +
> + /* Fast path: get L1 entries using the recursive linear mappings. */
> + pl1e = &__linear_l1_table[l1_linear_offset(va)];
> +
> + for ( i = 0; i < nr; i++, pl1e++ )
> + {
> + if ( unlikely(perdomain_l1e_needs_freeing(*pl1e)) )
> + {
> + ASSERT_UNREACHABLE();
> + free_domheap_page(l1e_get_page(*pl1e));
> + }
> + l1e_write(pl1e, l1e_from_mfn(mfn[i], __PAGE_HYPERVISOR_RW));
> + }
> +
> + return;
> + }
> +
> + ASSERT(d->arch.perdomain_l3_pg);
> + l3tab = __map_domain_page(d->arch.perdomain_l3_pg);
> +
> + if ( unlikely(!(l3e_get_flags(l3tab[l3_table_offset(va)]) &
> + _PAGE_PRESENT)) )
> + {
> + unmap_domain_page(l3tab);
> + gprintk(XENLOG_ERR, "unable to map at VA %lx: L3e not present\n", va);
> + ASSERT_UNREACHABLE();
> + domain_crash(d);
> +
> + return;
> + }
> +
> + l2tab = map_l2t_from_l3e(l3tab[l3_table_offset(va)]);
> +
> + for ( ; nr--; va += PAGE_SIZE, mfn++ )
> + {
> + if ( !l1tab || !l1_table_offset(va) )
> + {
> + const l2_pgentry_t *pl2e = l2tab + l2_table_offset(va);
> +
> + if ( unlikely(!(l2e_get_flags(*pl2e) & _PAGE_PRESENT)) )
> + {
> + gprintk(XENLOG_ERR, "unable to map at VA %lx: L2e not present\n",
> + va);
> + ASSERT_UNREACHABLE();
> + domain_crash(d);
> +
> + break;
> + }
> +
> + unmap_domain_page(l1tab);
> + l1tab = map_l1t_from_l2e(*pl2e);
> + }
> +
> + pl1e = &l1tab[l1_table_offset(va)];
> +
> + if ( unlikely(perdomain_l1e_needs_freeing(*pl1e)) )
> + {
> + ASSERT_UNREACHABLE();
> + free_domheap_page(l1e_get_page(*pl1e));
> + }
> +
> + l1e_write(pl1e, l1e_from_mfn(*mfn, __PAGE_HYPERVISOR_RW));
> + }
> +
> + unmap_domain_page(l1tab);
> + unmap_domain_page(l2tab);
> + unmap_domain_page(l3tab);
> +}
> +
> void destroy_perdomain_mapping(struct domain *d, unsigned long va,
> unsigned int nr)
> {
> diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
> index 79a79c54c304..a740a6402272 100644
> --- a/xen/arch/x86/smpboot.c
> +++ b/xen/arch/x86/smpboot.c
> @@ -1059,8 +1059,7 @@ static int cpu_smpboot_alloc(unsigned int cpu)
> if ( gdt == NULL )
> goto out;
> per_cpu(gdt, cpu) = gdt;
> - per_cpu(gdt_l1e, cpu) =
> - l1e_from_pfn(virt_to_mfn(gdt), __PAGE_HYPERVISOR_RW);
> + per_cpu(gdt_mfn, cpu) = _mfn(virt_to_mfn(gdt));
> memcpy(gdt, boot_gdt, NR_RESERVED_GDT_PAGES * PAGE_SIZE);
> BUILD_BUG_ON(NR_CPUS > 0x10000);
> gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
> @@ -1069,8 +1068,7 @@ static int cpu_smpboot_alloc(unsigned int cpu)
> per_cpu(compat_gdt, cpu) = gdt = alloc_xenheap_pages(0, memflags);
> if ( gdt == NULL )
> goto out;
> - per_cpu(compat_gdt_l1e, cpu) =
> - l1e_from_pfn(virt_to_mfn(gdt), __PAGE_HYPERVISOR_RW);
> + per_cpu(compat_gdt_mfn, cpu) = _mfn(virt_to_mfn(gdt));
> memcpy(gdt, boot_compat_gdt, NR_RESERVED_GDT_PAGES * PAGE_SIZE);
> gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
> #endif
> diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
> index 487b8c5a78c5..a7f6fb611c34 100644
> --- a/xen/arch/x86/traps.c
> +++ b/xen/arch/x86/traps.c
> @@ -92,10 +92,10 @@ DEFINE_PER_CPU(uint64_t, efer);
> static DEFINE_PER_CPU(unsigned long, last_extable_addr);
>
> DEFINE_PER_CPU_READ_MOSTLY(seg_desc_t *, gdt);
> -DEFINE_PER_CPU_READ_MOSTLY(l1_pgentry_t, gdt_l1e);
> +DEFINE_PER_CPU_READ_MOSTLY(mfn_t, gdt_mfn);
> #ifdef CONFIG_PV32
> DEFINE_PER_CPU_READ_MOSTLY(seg_desc_t *, compat_gdt);
> -DEFINE_PER_CPU_READ_MOSTLY(l1_pgentry_t, compat_gdt_l1e);
> +DEFINE_PER_CPU_READ_MOSTLY(mfn_t, compat_gdt_mfn);
> #endif
>
> /* Master table, used by CPU0. */
> @@ -2219,11 +2219,9 @@ void __init trap_init(void)
> init_ler();
>
> /* Cache {,compat_}gdt_l1e now that physically relocation is done. */
> - this_cpu(gdt_l1e) =
> - l1e_from_pfn(virt_to_mfn(boot_gdt), __PAGE_HYPERVISOR_RW);
> + this_cpu(gdt_mfn) = _mfn(virt_to_mfn(boot_gdt));
> if ( IS_ENABLED(CONFIG_PV32) )
> - this_cpu(compat_gdt_l1e) =
> - l1e_from_pfn(virt_to_mfn(boot_compat_gdt), __PAGE_HYPERVISOR_RW);
> + this_cpu(compat_gdt_mfn) = _mfn(virt_to_mfn(boot_compat_gdt));
>
> percpu_traps_init();
>
On Thu, Jan 09, 2025 at 09:55:44AM +0000, Alejandro Vallejo wrote:
> On Wed Jan 8, 2025 at 2:26 PM GMT, Roger Pau Monne wrote:
> > The current code to update the Xen part of the GDT when running a PV guest
> > relies on caching the direct map address of all the L1 tables used to map the
> > GDT and LDT, so that entries can be modified.
> >
> > Introduce a new function that populates the per-domain region, either using the
> > recursive linear mappings when the target vCPU is the current one, or by
> > directly modifying the L1 table of the per-domain region.
> >
> > Using such function to populate per-domain addresses drops the need to keep a
> > reference to per-domain L1 tables previously used to change the per-domain
> > mappings.
> >
> > Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
> > ---
> > xen/arch/x86/domain.c | 11 +++-
> > xen/arch/x86/include/asm/desc.h | 6 +-
> > xen/arch/x86/include/asm/mm.h | 2 +
> > xen/arch/x86/include/asm/processor.h | 5 ++
> > xen/arch/x86/mm.c | 88 ++++++++++++++++++++++++++++
> > xen/arch/x86/smpboot.c | 6 +-
> > xen/arch/x86/traps.c | 10 ++--
> > 7 files changed, 113 insertions(+), 15 deletions(-)
> >
> > diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> > index 1f680bf176ee..0bd0ef7e40f4 100644
> > --- a/xen/arch/x86/domain.c
> > +++ b/xen/arch/x86/domain.c
> > @@ -1953,9 +1953,14 @@ static always_inline bool need_full_gdt(const struct domain *d)
> >
> > static void update_xen_slot_in_full_gdt(const struct vcpu *v, unsigned int cpu)
> > {
> > - l1e_write(pv_gdt_ptes(v) + FIRST_RESERVED_GDT_PAGE,
> > - !is_pv_32bit_vcpu(v) ? per_cpu(gdt_l1e, cpu)
> > - : per_cpu(compat_gdt_l1e, cpu));
> > + ASSERT(v != current);
>
> For this assert, and others below. IIUC, curr_vcpu == current when we're
> properly switched. When we're idling current == idle and curr_vcpu == prev_ctx.
>
> Granted, calling this in the middle of a lazy idle loop would be weird, but
> would it make sense for PT consistency to use curr_vcpu here...
Hm, this function is called in a very specific context, and the assert
intends to reflect that. TBH I could just drop it, as
populate_perdomain_mapping() will DTRT also when v == current. The
expectation for the context is also that current == curr_vcpu.
Note however that if v == current we would need a flush after the
populate_perdomain_mapping() call, since populate_perdomain_mapping()
doesn't perform any flushing of the modified entries. The main
purpose of the ASSERT() is to notice this.
> > +
> > + populate_perdomain_mapping(v,
> > + GDT_VIRT_START(v) +
> > + (FIRST_RESERVED_GDT_PAGE << PAGE_SHIFT),
> > + !is_pv_32bit_vcpu(v) ? &per_cpu(gdt_mfn, cpu)
> > + : &per_cpu(compat_gdt_mfn,
> > + cpu), 1);
> > }
> >
> > static void load_full_gdt(const struct vcpu *v, unsigned int cpu)
> > diff --git a/xen/arch/x86/include/asm/desc.h b/xen/arch/x86/include/asm/desc.h
> > index a1e0807d97ed..33981bfca588 100644
> > --- a/xen/arch/x86/include/asm/desc.h
> > +++ b/xen/arch/x86/include/asm/desc.h
> > @@ -44,6 +44,8 @@
> >
> > #ifndef __ASSEMBLY__
> >
> > +#include <xen/mm-frame.h>
> > +
> > #define GUEST_KERNEL_RPL(d) (is_pv_32bit_domain(d) ? 1 : 3)
> >
> > /* Fix up the RPL of a guest segment selector. */
> > @@ -212,10 +214,10 @@ struct __packed desc_ptr {
> >
> > extern seg_desc_t boot_gdt[];
> > DECLARE_PER_CPU(seg_desc_t *, gdt);
> > -DECLARE_PER_CPU(l1_pgentry_t, gdt_l1e);
> > +DECLARE_PER_CPU(mfn_t, gdt_mfn);
> > extern seg_desc_t boot_compat_gdt[];
> > DECLARE_PER_CPU(seg_desc_t *, compat_gdt);
> > -DECLARE_PER_CPU(l1_pgentry_t, compat_gdt_l1e);
> > +DECLARE_PER_CPU(mfn_t, compat_gdt_mfn);
> > DECLARE_PER_CPU(bool, full_gdt_loaded);
> >
> > static inline void lgdt(const struct desc_ptr *gdtr)
> > diff --git a/xen/arch/x86/include/asm/mm.h b/xen/arch/x86/include/asm/mm.h
> > index 6c7e66ee21ab..b50a51327b2b 100644
> > --- a/xen/arch/x86/include/asm/mm.h
> > +++ b/xen/arch/x86/include/asm/mm.h
> > @@ -603,6 +603,8 @@ int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
> > int create_perdomain_mapping(struct domain *d, unsigned long va,
> > unsigned int nr, l1_pgentry_t **pl1tab,
> > struct page_info **ppg);
> > +void populate_perdomain_mapping(const struct vcpu *v, unsigned long va,
> > + mfn_t *mfn, unsigned long nr);
> > void destroy_perdomain_mapping(struct domain *d, unsigned long va,
> > unsigned int nr);
> > void free_perdomain_mappings(struct domain *d);
> > diff --git a/xen/arch/x86/include/asm/processor.h b/xen/arch/x86/include/asm/processor.h
> > index d247ef8dd226..82ee89f736c2 100644
> > --- a/xen/arch/x86/include/asm/processor.h
> > +++ b/xen/arch/x86/include/asm/processor.h
> > @@ -243,6 +243,11 @@ static inline unsigned long cr3_pa(unsigned long cr3)
> > return cr3 & X86_CR3_ADDR_MASK;
> > }
> >
> > +static inline mfn_t cr3_mfn(unsigned long cr3)
> > +{
> > + return maddr_to_mfn(cr3_pa(cr3));
> > +}
> > +
> > static inline unsigned int cr3_pcid(unsigned long cr3)
> > {
> > return IS_ENABLED(CONFIG_PV) ? cr3 & X86_CR3_PCID_MASK : 0;
> > diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> > index 3d5dd22b6c36..0abea792486c 100644
> > --- a/xen/arch/x86/mm.c
> > +++ b/xen/arch/x86/mm.c
> > @@ -6423,6 +6423,94 @@ int create_perdomain_mapping(struct domain *d, unsigned long va,
> > return rc;
> > }
> >
> > +void populate_perdomain_mapping(const struct vcpu *v, unsigned long va,
> > + mfn_t *mfn, unsigned long nr)
> > +{
> > + l1_pgentry_t *l1tab = NULL, *pl1e;
> > + const l3_pgentry_t *l3tab;
> > + const l2_pgentry_t *l2tab;
> > + struct domain *d = v->domain;
> > +
> > + ASSERT(va >= PERDOMAIN_VIRT_START &&
> > + va < PERDOMAIN_VIRT_SLOT(PERDOMAIN_SLOTS));
> > + ASSERT(!nr || !l3_table_offset(va ^ (va + nr * PAGE_SIZE - 1)));
> > +
> > + /* Use likely to force the optimization for the fast path. */
> > + if ( likely(v == current) )
>
> ... and here? In particular I'd expect using curr_vcpu here means...
I'm afraid not, this is a trap I've fallen originally when doing this
series, as I indeed had v == curr_vcpu here (and no
sync_local_execstate() call).
However as a result of an interrupt, a call to sync_local_execstate()
might happen, at which point the previous check of v == curr_vcpu
becomes stale.
> > + {
> > + unsigned int i;
> > +
> > + /* Ensure page-tables are from current (if current != curr_vcpu). */
> > + sync_local_execstate();
>
> ... this should not be needed.
As kind of mentioned above, this is required to ensure the page-tables
are in-sync with the vCPU in current, and cannot change as a result of
an interrupt triggering a call to sync_local_execstate().
Otherwise the page-tables could change while or after the call to
populate_perdomain_mapping(), and the mappings could end up being
created on the wrong page-tables.
Thanks, Roger.
On Fri Jan 10, 2025 at 2:29 PM GMT, Roger Pau Monné wrote:
> On Thu, Jan 09, 2025 at 09:55:44AM +0000, Alejandro Vallejo wrote:
> > On Wed Jan 8, 2025 at 2:26 PM GMT, Roger Pau Monne wrote:
> > > The current code to update the Xen part of the GDT when running a PV guest
> > > relies on caching the direct map address of all the L1 tables used to map the
> > > GDT and LDT, so that entries can be modified.
> > >
> > > Introduce a new function that populates the per-domain region, either using the
> > > recursive linear mappings when the target vCPU is the current one, or by
> > > directly modifying the L1 table of the per-domain region.
> > >
> > > Using such function to populate per-domain addresses drops the need to keep a
> > > reference to per-domain L1 tables previously used to change the per-domain
> > > mappings.
> > >
> > > Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
> > > ---
> > > xen/arch/x86/domain.c | 11 +++-
> > > xen/arch/x86/include/asm/desc.h | 6 +-
> > > xen/arch/x86/include/asm/mm.h | 2 +
> > > xen/arch/x86/include/asm/processor.h | 5 ++
> > > xen/arch/x86/mm.c | 88 ++++++++++++++++++++++++++++
> > > xen/arch/x86/smpboot.c | 6 +-
> > > xen/arch/x86/traps.c | 10 ++--
> > > 7 files changed, 113 insertions(+), 15 deletions(-)
> > >
> > > diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> > > index 1f680bf176ee..0bd0ef7e40f4 100644
> > > --- a/xen/arch/x86/domain.c
> > > +++ b/xen/arch/x86/domain.c
> > > @@ -1953,9 +1953,14 @@ static always_inline bool need_full_gdt(const struct domain *d)
> > >
> > > static void update_xen_slot_in_full_gdt(const struct vcpu *v, unsigned int cpu)
> > > {
> > > - l1e_write(pv_gdt_ptes(v) + FIRST_RESERVED_GDT_PAGE,
> > > - !is_pv_32bit_vcpu(v) ? per_cpu(gdt_l1e, cpu)
> > > - : per_cpu(compat_gdt_l1e, cpu));
> > > + ASSERT(v != current);
> >
> > For this assert, and others below. IIUC, curr_vcpu == current when we're
> > properly switched. When we're idling current == idle and curr_vcpu == prev_ctx.
> >
> > Granted, calling this in the middle of a lazy idle loop would be weird, but
> > would it make sense for PT consistency to use curr_vcpu here...
>
> Hm, this function is called in a very specific context, and the assert
> intends to reflect that. TBH I could just drop it, as
> populate_perdomain_mapping() will DTRT also when v == current. The
> expectation for the context is also that current == curr_vcpu.
>
> Note however that if v == current we would need a flush after the
> populate_perdomain_mapping() call, since populate_perdomain_mapping()
> doesn't perform any flushing of the modified entries. The main
> purpose of the ASSERT() is to notice this.
>
> > > +
> > > + populate_perdomain_mapping(v,
> > > + GDT_VIRT_START(v) +
> > > + (FIRST_RESERVED_GDT_PAGE << PAGE_SHIFT),
> > > + !is_pv_32bit_vcpu(v) ? &per_cpu(gdt_mfn, cpu)
> > > + : &per_cpu(compat_gdt_mfn,
> > > + cpu), 1);
> > > }
> > >
> > > static void load_full_gdt(const struct vcpu *v, unsigned int cpu)
> > > diff --git a/xen/arch/x86/include/asm/desc.h b/xen/arch/x86/include/asm/desc.h
> > > index a1e0807d97ed..33981bfca588 100644
> > > --- a/xen/arch/x86/include/asm/desc.h
> > > +++ b/xen/arch/x86/include/asm/desc.h
> > > @@ -44,6 +44,8 @@
> > >
> > > #ifndef __ASSEMBLY__
> > >
> > > +#include <xen/mm-frame.h>
> > > +
> > > #define GUEST_KERNEL_RPL(d) (is_pv_32bit_domain(d) ? 1 : 3)
> > >
> > > /* Fix up the RPL of a guest segment selector. */
> > > @@ -212,10 +214,10 @@ struct __packed desc_ptr {
> > >
> > > extern seg_desc_t boot_gdt[];
> > > DECLARE_PER_CPU(seg_desc_t *, gdt);
> > > -DECLARE_PER_CPU(l1_pgentry_t, gdt_l1e);
> > > +DECLARE_PER_CPU(mfn_t, gdt_mfn);
> > > extern seg_desc_t boot_compat_gdt[];
> > > DECLARE_PER_CPU(seg_desc_t *, compat_gdt);
> > > -DECLARE_PER_CPU(l1_pgentry_t, compat_gdt_l1e);
> > > +DECLARE_PER_CPU(mfn_t, compat_gdt_mfn);
> > > DECLARE_PER_CPU(bool, full_gdt_loaded);
> > >
> > > static inline void lgdt(const struct desc_ptr *gdtr)
> > > diff --git a/xen/arch/x86/include/asm/mm.h b/xen/arch/x86/include/asm/mm.h
> > > index 6c7e66ee21ab..b50a51327b2b 100644
> > > --- a/xen/arch/x86/include/asm/mm.h
> > > +++ b/xen/arch/x86/include/asm/mm.h
> > > @@ -603,6 +603,8 @@ int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
> > > int create_perdomain_mapping(struct domain *d, unsigned long va,
> > > unsigned int nr, l1_pgentry_t **pl1tab,
> > > struct page_info **ppg);
> > > +void populate_perdomain_mapping(const struct vcpu *v, unsigned long va,
> > > + mfn_t *mfn, unsigned long nr);
> > > void destroy_perdomain_mapping(struct domain *d, unsigned long va,
> > > unsigned int nr);
> > > void free_perdomain_mappings(struct domain *d);
> > > diff --git a/xen/arch/x86/include/asm/processor.h b/xen/arch/x86/include/asm/processor.h
> > > index d247ef8dd226..82ee89f736c2 100644
> > > --- a/xen/arch/x86/include/asm/processor.h
> > > +++ b/xen/arch/x86/include/asm/processor.h
> > > @@ -243,6 +243,11 @@ static inline unsigned long cr3_pa(unsigned long cr3)
> > > return cr3 & X86_CR3_ADDR_MASK;
> > > }
> > >
> > > +static inline mfn_t cr3_mfn(unsigned long cr3)
> > > +{
> > > + return maddr_to_mfn(cr3_pa(cr3));
> > > +}
> > > +
> > > static inline unsigned int cr3_pcid(unsigned long cr3)
> > > {
> > > return IS_ENABLED(CONFIG_PV) ? cr3 & X86_CR3_PCID_MASK : 0;
> > > diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> > > index 3d5dd22b6c36..0abea792486c 100644
> > > --- a/xen/arch/x86/mm.c
> > > +++ b/xen/arch/x86/mm.c
> > > @@ -6423,6 +6423,94 @@ int create_perdomain_mapping(struct domain *d, unsigned long va,
> > > return rc;
> > > }
> > >
> > > +void populate_perdomain_mapping(const struct vcpu *v, unsigned long va,
> > > + mfn_t *mfn, unsigned long nr)
> > > +{
> > > + l1_pgentry_t *l1tab = NULL, *pl1e;
> > > + const l3_pgentry_t *l3tab;
> > > + const l2_pgentry_t *l2tab;
> > > + struct domain *d = v->domain;
> > > +
> > > + ASSERT(va >= PERDOMAIN_VIRT_START &&
> > > + va < PERDOMAIN_VIRT_SLOT(PERDOMAIN_SLOTS));
> > > + ASSERT(!nr || !l3_table_offset(va ^ (va + nr * PAGE_SIZE - 1)));
> > > +
> > > + /* Use likely to force the optimization for the fast path. */
> > > + if ( likely(v == current) )
> >
> > ... and here? In particular I'd expect using curr_vcpu here means...
>
> I'm afraid not, this is a trap I've fallen originally when doing this
> series, as I indeed had v == curr_vcpu here (and no
> sync_local_execstate() call).
>
> However as a result of an interrupt, a call to sync_local_execstate()
> might happen, at which point the previous check of v == curr_vcpu
> becomes stale.
Wow, that's nasty! More than fair enough then. Guess the XSAVE wrappers (and
more generally all vCPU-local memory accessors) will have to take this into
account before poking into the contents of the perdomain region.
>
> > > + {
> > > + unsigned int i;
> > > +
> > > + /* Ensure page-tables are from current (if current != curr_vcpu). */
> > > + sync_local_execstate();
> >
> > ... this should not be needed.
>
> As kind of mentioned above, this is required to ensure the page-tables
> are in-sync with the vCPU in current, and cannot change as a result of
> an interrupt triggering a call to sync_local_execstate().
>
> Otherwise the page-tables could change while or after the call to
> populate_perdomain_mapping(), and the mappings could end up being
> created on the wrong page-tables.
>
> Thanks, Roger.
Cheers,
Alejandro
On 08.01.2025 15:26, Roger Pau Monne wrote:
> The current code to update the Xen part of the GDT when running a PV guest
> relies on caching the direct map address of all the L1 tables used to map the
> GDT and LDT, so that entries can be modified.
>
> Introduce a new function that populates the per-domain region, either using the
> recursive linear mappings when the target vCPU is the current one, or by
> directly modifying the L1 table of the per-domain region.
>
> Using such function to populate per-domain addresses drops the need to keep a
> reference to per-domain L1 tables previously used to change the per-domain
> mappings.
Well, yes. You now record MFNs instead. And you do so at the expense of about
100 lines of new code. I'm afraid I'm lacking justification for this price to
be paid.
> @@ -2219,11 +2219,9 @@ void __init trap_init(void)
> init_ler();
>
> /* Cache {,compat_}gdt_l1e now that physically relocation is done. */
> - this_cpu(gdt_l1e) =
> - l1e_from_pfn(virt_to_mfn(boot_gdt), __PAGE_HYPERVISOR_RW);
> + this_cpu(gdt_mfn) = _mfn(virt_to_mfn(boot_gdt));
> if ( IS_ENABLED(CONFIG_PV32) )
> - this_cpu(compat_gdt_l1e) =
> - l1e_from_pfn(virt_to_mfn(boot_compat_gdt), __PAGE_HYPERVISOR_RW);
> + this_cpu(compat_gdt_mfn) = _mfn(virt_to_mfn(boot_compat_gdt));
The comment's going stale this way.
Jan
On Thu, Jan 09, 2025 at 10:10:20AM +0100, Jan Beulich wrote:
> On 08.01.2025 15:26, Roger Pau Monne wrote:
> > The current code to update the Xen part of the GDT when running a PV guest
> > relies on caching the direct map address of all the L1 tables used to map the
> > GDT and LDT, so that entries can be modified.
> >
> > Introduce a new function that populates the per-domain region, either using the
> > recursive linear mappings when the target vCPU is the current one, or by
> > directly modifying the L1 table of the per-domain region.
> >
> > Using such function to populate per-domain addresses drops the need to keep a
> > reference to per-domain L1 tables previously used to change the per-domain
> > mappings.
>
> Well, yes. You now record MFNs instead. And you do so at the expense of about
> 100 lines of new code. I'm afraid I'm lacking justification for this price to
> be paid.
Oh, I should have been more explicit on the commit message probably.
The cover letter kind of covers this, the objective is to remove the
stashing of L1 page-table references in the domain struct. Currently
the per-vCPU GDT L1 are stored in the domain struct, so PTEs can be
easily manipulated.
When moving the per-domain slot to being per-vCPU this stashing of the
L1 tables will become much more complex, and hence I wanted to get rid
of it.
With the introduction of populate_perdomain_mapping() I'm attempting
to get rid of all those L1 references in the domain struct, by having
a generic function that allows modifying the linea address range that
belongs to the per-domain slot.
See for example how patch 8 gets rid of all the l1_pgentry_t GDT/LDT
references in the domain struct. And how patch 9 simplifies the
create_perdomain_mapping() interface to be much simpler. All this is
built upon the addition of the populate_perdomain_mapping() helper and
the dropping of the l1_pgentry_t references in the domain struct.
Hope this helps clarify the intent of the change here.
> > @@ -2219,11 +2219,9 @@ void __init trap_init(void)
> > init_ler();
> >
> > /* Cache {,compat_}gdt_l1e now that physically relocation is done. */
> > - this_cpu(gdt_l1e) =
> > - l1e_from_pfn(virt_to_mfn(boot_gdt), __PAGE_HYPERVISOR_RW);
> > + this_cpu(gdt_mfn) = _mfn(virt_to_mfn(boot_gdt));
> > if ( IS_ENABLED(CONFIG_PV32) )
> > - this_cpu(compat_gdt_l1e) =
> > - l1e_from_pfn(virt_to_mfn(boot_compat_gdt), __PAGE_HYPERVISOR_RW);
> > + this_cpu(compat_gdt_mfn) = _mfn(virt_to_mfn(boot_compat_gdt));
>
> The comment's going stale this way.
Right, the cache is still there but using a different field name. I
can adjust to:
/* Cache {,compat_}gdt_mfn now that physically relocation is done. */
Thanks, Roger.
© 2016 - 2026 Red Hat, Inc.