Add shadow and hap implementation specific helpers to perform guest
TLB flushes. Note that the code for both is exactly the same at the
moment, and is copied from hvm_flush_vcpu_tlb. This will be changed by
further patches that will add implementation specific optimizations to
them.
No functional change intended.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
xen/arch/x86/hvm/hvm.c | 51 ++----------------------------
xen/arch/x86/mm/hap/hap.c | 54 ++++++++++++++++++++++++++++++++
xen/arch/x86/mm/shadow/common.c | 55 +++++++++++++++++++++++++++++++++
xen/arch/x86/mm/shadow/multi.c | 1 -
xen/include/asm-x86/hap.h | 3 ++
xen/include/asm-x86/shadow.h | 12 +++++++
6 files changed, 127 insertions(+), 49 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 0b93609a82..96c419f0ef 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3986,55 +3986,10 @@ static void hvm_s3_resume(struct domain *d)
bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
void *ctxt)
{
- static DEFINE_PER_CPU(cpumask_t, flush_cpumask);
- cpumask_t *mask = &this_cpu(flush_cpumask);
- struct domain *d = current->domain;
- struct vcpu *v;
-
- /* Avoid deadlock if more than one vcpu tries this at the same time. */
- if ( !spin_trylock(&d->hypercall_deadlock_mutex) )
- return false;
-
- /* Pause all other vcpus. */
- for_each_vcpu ( d, v )
- if ( v != current && flush_vcpu(ctxt, v) )
- vcpu_pause_nosync(v);
-
- /* Now that all VCPUs are signalled to deschedule, we wait... */
- for_each_vcpu ( d, v )
- if ( v != current && flush_vcpu(ctxt, v) )
- while ( !vcpu_runnable(v) && v->is_running )
- cpu_relax();
-
- /* All other vcpus are paused, safe to unlock now. */
- spin_unlock(&d->hypercall_deadlock_mutex);
-
- cpumask_clear(mask);
-
- /* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */
- for_each_vcpu ( d, v )
- {
- unsigned int cpu;
-
- if ( !flush_vcpu(ctxt, v) )
- continue;
-
- paging_update_cr3(v, false);
+ struct domain *currd = current->domain;
- cpu = read_atomic(&v->dirty_cpu);
- if ( is_vcpu_dirty_cpu(cpu) )
- __cpumask_set_cpu(cpu, mask);
- }
-
- /* Flush TLBs on all CPUs with dirty vcpu state. */
- flush_tlb_mask(mask);
-
- /* Done. */
- for_each_vcpu ( d, v )
- if ( v != current && flush_vcpu(ctxt, v) )
- vcpu_unpause(v);
-
- return true;
+ return shadow_mode_enabled(currd) ? shadow_flush_tlb(flush_vcpu, ctxt)
+ : hap_flush_tlb(flush_vcpu, ctxt);
}
static bool always_flush(void *ctxt, struct vcpu *v)
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 3d93f3451c..6894c1aa38 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -669,6 +669,60 @@ static void hap_update_cr3(struct vcpu *v, int do_locking, bool noflush)
hvm_update_guest_cr3(v, noflush);
}
+bool hap_flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
+ void *ctxt)
+{
+ static DEFINE_PER_CPU(cpumask_t, flush_cpumask);
+ cpumask_t *mask = &this_cpu(flush_cpumask);
+ struct domain *d = current->domain;
+ struct vcpu *v;
+
+ /* Avoid deadlock if more than one vcpu tries this at the same time. */
+ if ( !spin_trylock(&d->hypercall_deadlock_mutex) )
+ return false;
+
+ /* Pause all other vcpus. */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ vcpu_pause_nosync(v);
+
+ /* Now that all VCPUs are signalled to deschedule, we wait... */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ while ( !vcpu_runnable(v) && v->is_running )
+ cpu_relax();
+
+ /* All other vcpus are paused, safe to unlock now. */
+ spin_unlock(&d->hypercall_deadlock_mutex);
+
+ cpumask_clear(mask);
+
+ /* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */
+ for_each_vcpu ( d, v )
+ {
+ unsigned int cpu;
+
+ if ( !flush_vcpu(ctxt, v) )
+ continue;
+
+ paging_update_cr3(v, false);
+
+ cpu = read_atomic(&v->dirty_cpu);
+ if ( is_vcpu_dirty_cpu(cpu) )
+ __cpumask_set_cpu(cpu, mask);
+ }
+
+ /* Flush TLBs on all CPUs with dirty vcpu state. */
+ flush_tlb_mask(mask);
+
+ /* Done. */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ vcpu_unpause(v);
+
+ return true;
+}
+
const struct paging_mode *
hap_paging_get_mode(struct vcpu *v)
{
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 6212ec2c4a..ee90e55b41 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3357,6 +3357,61 @@ out:
return rc;
}
+/* Fluhs TLB of selected vCPUs. */
+bool shadow_flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
+ void *ctxt)
+{
+ static DEFINE_PER_CPU(cpumask_t, flush_cpumask);
+ cpumask_t *mask = &this_cpu(flush_cpumask);
+ struct domain *d = current->domain;
+ struct vcpu *v;
+
+ /* Avoid deadlock if more than one vcpu tries this at the same time. */
+ if ( !spin_trylock(&d->hypercall_deadlock_mutex) )
+ return false;
+
+ /* Pause all other vcpus. */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ vcpu_pause_nosync(v);
+
+ /* Now that all VCPUs are signalled to deschedule, we wait... */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ while ( !vcpu_runnable(v) && v->is_running )
+ cpu_relax();
+
+ /* All other vcpus are paused, safe to unlock now. */
+ spin_unlock(&d->hypercall_deadlock_mutex);
+
+ cpumask_clear(mask);
+
+ /* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */
+ for_each_vcpu ( d, v )
+ {
+ unsigned int cpu;
+
+ if ( !flush_vcpu(ctxt, v) )
+ continue;
+
+ paging_update_cr3(v, false);
+
+ cpu = read_atomic(&v->dirty_cpu);
+ if ( is_vcpu_dirty_cpu(cpu) )
+ __cpumask_set_cpu(cpu, mask);
+ }
+
+ /* Flush TLBs on all CPUs with dirty vcpu state. */
+ flush_tlb_mask(mask);
+
+ /* Done. */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ vcpu_unpause(v);
+
+ return true;
+}
+
/**************************************************************************/
/* Shadow-control XEN_DOMCTL dispatcher */
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 26798b317c..dfe264cf83 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -4157,7 +4157,6 @@ sh_update_cr3(struct vcpu *v, int do_locking, bool noflush)
if ( do_locking ) paging_unlock(v->domain);
}
-
/**************************************************************************/
/* Functions to revoke guest rights */
diff --git a/xen/include/asm-x86/hap.h b/xen/include/asm-x86/hap.h
index b94bfb4ed0..0c6aa26b9b 100644
--- a/xen/include/asm-x86/hap.h
+++ b/xen/include/asm-x86/hap.h
@@ -46,6 +46,9 @@ int hap_track_dirty_vram(struct domain *d,
extern const struct paging_mode *hap_paging_get_mode(struct vcpu *);
int hap_set_allocation(struct domain *d, unsigned int pages, bool *preempted);
+bool hap_flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
+ void *ctxt);
+
#endif /* XEN_HAP_H */
/*
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index 907c71f497..3c1f6df478 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -95,6 +95,10 @@ void shadow_blow_tables_per_domain(struct domain *d);
int shadow_set_allocation(struct domain *d, unsigned int pages,
bool *preempted);
+/* Flush the TLB of the selected vCPUs. */
+bool shadow_flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
+ void *ctxt);
+
#else /* !CONFIG_SHADOW_PAGING */
#define shadow_teardown(d, p) ASSERT(is_pv_domain(d))
@@ -106,6 +110,14 @@ int shadow_set_allocation(struct domain *d, unsigned int pages,
#define shadow_set_allocation(d, pages, preempted) \
({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; })
+static inline bool shadow_flush_tlb(bool (*flush_vcpu)(void *ctxt,
+ struct vcpu *v),
+ void *ctxt)
+{
+ ASSERT_UNREACHABLE();
+ return -EOPNOTSUPP;
+}
+
static inline void sh_remove_shadows(struct domain *d, mfn_t gmfn,
int fast, int all) {}
--
2.25.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
On Mon, Jan 27, 2020 at 07:11:11PM +0100, Roger Pau Monne wrote:
[...]
>
> diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
> index 26798b317c..dfe264cf83 100644
> --- a/xen/arch/x86/mm/shadow/multi.c
> +++ b/xen/arch/x86/mm/shadow/multi.c
> @@ -4157,7 +4157,6 @@ sh_update_cr3(struct vcpu *v, int do_locking, bool noflush)
> if ( do_locking ) paging_unlock(v->domain);
> }
>
> -
Stray change.
> /**************************************************************************/
> /* Functions to revoke guest rights */
>
> diff --git a/xen/include/asm-x86/hap.h b/xen/include/asm-x86/hap.h
> index b94bfb4ed0..0c6aa26b9b 100644
> --- a/xen/include/asm-x86/hap.h
> +++ b/xen/include/asm-x86/hap.h
> @@ -46,6 +46,9 @@ int hap_track_dirty_vram(struct domain *d,
> extern const struct paging_mode *hap_paging_get_mode(struct vcpu *);
> int hap_set_allocation(struct domain *d, unsigned int pages, bool *preempted);
>
> +bool hap_flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
> + void *ctxt);
> +
> #endif /* XEN_HAP_H */
>
> /*
> diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
> index 907c71f497..3c1f6df478 100644
> --- a/xen/include/asm-x86/shadow.h
> +++ b/xen/include/asm-x86/shadow.h
> @@ -95,6 +95,10 @@ void shadow_blow_tables_per_domain(struct domain *d);
> int shadow_set_allocation(struct domain *d, unsigned int pages,
> bool *preempted);
>
> +/* Flush the TLB of the selected vCPUs. */
> +bool shadow_flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
> + void *ctxt);
> +
> #else /* !CONFIG_SHADOW_PAGING */
>
> #define shadow_teardown(d, p) ASSERT(is_pv_domain(d))
> @@ -106,6 +110,14 @@ int shadow_set_allocation(struct domain *d, unsigned int pages,
> #define shadow_set_allocation(d, pages, preempted) \
> ({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; })
>
> +static inline bool shadow_flush_tlb(bool (*flush_vcpu)(void *ctxt,
> + struct vcpu *v),
> + void *ctxt)
> +{
> + ASSERT_UNREACHABLE();
> + return -EOPNOTSUPP;
This function needs to return true/false per its signature.
With this fixed:
Reviewed-by: Wei Liu <wl@xen.org>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
© 2016 - 2026 Red Hat, Inc.