In order to prepare dropping the cache_disable_lock add a new hook
to struct mtrr_ops, allowing to set some global state before calling
the .set hook on all active CPUs.
Move setting of mtrr_state.var_ranges[] from generic_set_mtrr() to
the new prepare hook. Note that doing that only once outside the
cache_disable_lock is fine, as generic_set_mtrr() is called via
set_mtrr() only and this call is protected by mtrr_mutex.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
arch/x86/kernel/cpu/mtrr/generic.c | 32 ++++++++++++++++++++++++------
arch/x86/kernel/cpu/mtrr/mtrr.c | 3 +++
arch/x86/kernel/cpu/mtrr/mtrr.h | 2 ++
3 files changed, 31 insertions(+), 6 deletions(-)
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index ac95b19b01d0..bfd5a7ba17cb 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -1057,6 +1057,31 @@ void mtrr_generic_set_state(void)
cache_enable(&state);
}
+/**
+ * generic_prepare_set_mtrr - set variable MTRR register data in mtrr_state
+ *
+ * @reg: The register to set.
+ * @base: The base address of the region.
+ * @size: The size of the region. If this is 0 the region is disabled.
+ * @type: The type of the region.
+ *
+ * Returns nothing.
+ */
+static void generic_prepare_set_mtrr(unsigned int reg, unsigned long base,
+ unsigned long size, mtrr_type type)
+{
+ struct mtrr_var_range *vr = &mtrr_state.var_ranges[reg];
+
+ if (size == 0) {
+ memset(vr, 0, sizeof(struct mtrr_var_range));
+ } else {
+ vr->base_lo = base << PAGE_SHIFT | type;
+ vr->base_hi = (base >> (32 - PAGE_SHIFT)) & ~phys_hi_rsvd;
+ vr->mask_lo = -size << PAGE_SHIFT | MTRR_PHYSMASK_V;
+ vr->mask_hi = (-size >> (32 - PAGE_SHIFT)) & ~phys_hi_rsvd;
+ }
+}
+
/**
* generic_set_mtrr - set variable MTRR register on the local CPU.
*
@@ -1085,13 +1110,7 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base,
* clear the relevant mask register to disable a range.
*/
mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
- memset(vr, 0, sizeof(struct mtrr_var_range));
} else {
- vr->base_lo = base << PAGE_SHIFT | type;
- vr->base_hi = (base >> (32 - PAGE_SHIFT)) & ~phys_hi_rsvd;
- vr->mask_lo = -size << PAGE_SHIFT | MTRR_PHYSMASK_V;
- vr->mask_hi = (-size >> (32 - PAGE_SHIFT)) & ~phys_hi_rsvd;
-
mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
}
@@ -1156,6 +1175,7 @@ int positive_have_wrcomb(void)
const struct mtrr_ops generic_mtrr_ops = {
.get = generic_get_mtrr,
.get_free_region = generic_get_free_region,
+ .prepare_set = generic_prepare_set_mtrr,
.set = generic_set_mtrr,
.validate_add_page = generic_validate_add_page,
.have_wrcomb = generic_have_wrcomb,
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 4b3d492afe17..32948fb4e742 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -175,6 +175,9 @@ static void set_mtrr(unsigned int reg, unsigned long base, unsigned long size,
.smp_type = type
};
+ if (mtrr_if->prepare_set)
+ mtrr_if->prepare_set(reg, base, size, type);
+
stop_machine_cpuslocked(mtrr_rendezvous_handler, &data, cpu_online_mask);
generic_rebuild_map();
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index 2de3bd2f95d1..4d32c095cfc5 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -17,6 +17,8 @@ extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
struct mtrr_ops {
u32 var_regs;
+ void (*prepare_set)(unsigned int reg, unsigned long base,
+ unsigned long size, mtrr_type type);
void (*set)(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type);
void (*get)(unsigned int reg, unsigned long *base,
--
2.52.0