When microcode staging is initiated, operations are carried out through
an MMIO interface. Each interface is independently discoverable per CPU
via the IA32_MCU_STAGING_MBOX_ADDR MSR, which points to a set of
dword-sized registers.
Software must first ensure the microcode image is dword-aligned, then
proceed to stage the update for each exposed MMIO space as specified.
Follow these two steps to arrange staging process. Identify each unique
MMIO interface by iterating over the CPUs and reading the MSR for each
one. While this process can be tedious, it remains simple enough and
acceptable in the slow path.
Suggested-by: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com>
---
Note:
1. Initially, a per-package and parallel staging invocation approach was
considered, but it seemed overly complex. Dave helped to identify a
simpler way.
2. Ideally, the staging_work() function would be as simple as a single
WRMSR execution. If this were the case, the staging flow could be
completed with this patch. From this perspective, the software handling
for interacting with the staging firmware has been separated from this
vendor code and moved into a new file dedicated to staging logic.
---
arch/x86/kernel/cpu/microcode/intel.c | 50 ++++++++++++++++++++++++
arch/x86/kernel/cpu/microcode/internal.h | 5 +++
2 files changed, 55 insertions(+)
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index f3d534807d91..03c4b0e7e97c 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -299,6 +299,55 @@ static __init struct microcode_intel *scan_microcode(void *data, size_t size,
return size ? NULL : patch;
}
+static inline u64 staging_addr(u32 cpu)
+{
+ u32 lo, hi;
+
+ rdmsr_on_cpu(cpu, MSR_IA32_MCU_STAGING_MBOX_ADDR, &lo, &hi);
+ return lo | ((u64)hi << 32);
+}
+
+static bool need_staging(u64 *mmio_addrs, u64 pa)
+{
+ unsigned int i;
+
+ for (i = 0; mmio_addrs[i] != 0; i++) {
+ if (mmio_addrs[i] == pa)
+ return false;
+ }
+ mmio_addrs[i] = pa;
+ return true;
+}
+
+static void staging_microcode(void)
+{
+ u64 *mmio_addrs, mmio_pa;
+ unsigned int totalsize;
+ int cpu;
+
+ totalsize = get_totalsize(&ucode_patch_late->hdr);
+ if (!IS_ALIGNED(totalsize, sizeof(u32)))
+ return;
+
+ mmio_addrs = kcalloc(nr_cpu_ids, sizeof(*mmio_addrs), GFP_KERNEL);
+ if (WARN_ON_ONCE(!mmio_addrs))
+ return;
+
+ for_each_cpu(cpu, cpu_online_mask) {
+ mmio_pa = staging_addr(cpu);
+
+ if (need_staging(mmio_addrs, mmio_pa) &&
+ !staging_work(mmio_pa, ucode_patch_late, totalsize)) {
+ pr_err("Error: staging failed.\n");
+ goto out;
+ }
+ }
+
+ pr_info("Staging succeeded.\n");
+out:
+ kfree(mmio_addrs);
+}
+
static enum ucode_state __apply_microcode(struct ucode_cpu_info *uci,
struct microcode_intel *mc,
u32 *cur_rev)
@@ -627,6 +676,7 @@ static struct microcode_ops microcode_intel_ops = {
.collect_cpu_info = collect_cpu_info,
.apply_microcode = apply_microcode_late,
.finalize_late_load = finalize_late_load,
+ .staging_microcode = staging_microcode,
.use_nmi = IS_ENABLED(CONFIG_X86_64),
};
diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h
index cb58e83e4934..06c3c8db4ceb 100644
--- a/arch/x86/kernel/cpu/microcode/internal.h
+++ b/arch/x86/kernel/cpu/microcode/internal.h
@@ -120,6 +120,11 @@ void load_ucode_intel_bsp(struct early_load_data *ed);
void load_ucode_intel_ap(void);
void reload_ucode_intel(void);
struct microcode_ops *init_intel_microcode(void);
+static inline bool staging_work(u64 mmio_pa, void *ucode_ptr, unsigned int totalsize)
+{
+ pr_debug_once("Need to implement the Staging code.\n");
+ return false;
+}
#else /* CONFIG_CPU_SUP_INTEL */
static inline void load_ucode_intel_bsp(struct early_load_data *ed) { }
static inline void load_ucode_intel_ap(void) { }
--
2.43.0
On Tue, Oct 01, 2024 at 09:10:39AM -0700, Chang S. Bae wrote: > +static inline u64 staging_addr(u32 cpu) > +{ > + u32 lo, hi; > + > + rdmsr_on_cpu(cpu, MSR_IA32_MCU_STAGING_MBOX_ADDR, &lo, &hi); > + return lo | ((u64)hi << 32); > +} A single usage site. Move its code there and get rid of the function. > + > +static bool need_staging(u64 *mmio_addrs, u64 pa) > +{ > + unsigned int i; > + > + for (i = 0; mmio_addrs[i] != 0; i++) { > + if (mmio_addrs[i] == pa) > + return false; > + } > + mmio_addrs[i] = pa; This is a weird function - its name is supposed to mean it queries something but then it has side effects too. > + return true; > +} > + > +static void staging_microcode(void) stage_microcode(). Functions should have verbs in their name and have the meaning of a "do-something". > +{ > + u64 *mmio_addrs, mmio_pa; > + unsigned int totalsize; > + int cpu; > + > + totalsize = get_totalsize(&ucode_patch_late->hdr); > + if (!IS_ALIGNED(totalsize, sizeof(u32))) > + return; > + > + mmio_addrs = kcalloc(nr_cpu_ids, sizeof(*mmio_addrs), GFP_KERNEL); > + if (WARN_ON_ONCE(!mmio_addrs)) > + return; > + > + for_each_cpu(cpu, cpu_online_mask) { Oh great, and someone went and offlined one of those CPUs right here. Fun. > + mmio_pa = staging_addr(cpu); > + > + if (need_staging(mmio_addrs, mmio_pa) && > + !staging_work(mmio_pa, ucode_patch_late, totalsize)) { do_stage() > + pr_err("Error: staging failed.\n"); ... on CPU%d, err_val: 0x%x"\n" perhaps? For more info debugging something like that? > + goto out; > + } > + } > + > + pr_info("Staging succeeded.\n"); "Staging of patch revision 0x%x succeeded.\n"... more user-friendly. > +out: > + kfree(mmio_addrs); > +} > + > static enum ucode_state __apply_microcode(struct ucode_cpu_info *uci, > struct microcode_intel *mc, > u32 *cur_rev) > @@ -627,6 +676,7 @@ static struct microcode_ops microcode_intel_ops = { > .collect_cpu_info = collect_cpu_info, > .apply_microcode = apply_microcode_late, > .finalize_late_load = finalize_late_load, > + .staging_microcode = staging_microcode, > .use_nmi = IS_ENABLED(CONFIG_X86_64), > }; > > diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h > index cb58e83e4934..06c3c8db4ceb 100644 > --- a/arch/x86/kernel/cpu/microcode/internal.h > +++ b/arch/x86/kernel/cpu/microcode/internal.h > @@ -120,6 +120,11 @@ void load_ucode_intel_bsp(struct early_load_data *ed); > void load_ucode_intel_ap(void); > void reload_ucode_intel(void); > struct microcode_ops *init_intel_microcode(void); > +static inline bool staging_work(u64 mmio_pa, void *ucode_ptr, unsigned int totalsize) > +{ > + pr_debug_once("Need to implement the Staging code.\n"); > + return false; > +} > #else /* CONFIG_CPU_SUP_INTEL */ You better have an empty stub for that work function here too. -- Regards/Gruss, Boris. https://people.kernel.org/tglx/notes-about-netiquette
On 11/4/2024 3:16 AM, Borislav Petkov wrote: > On Tue, Oct 01, 2024 at 09:10:39AM -0700, Chang S. Bae wrote: >> + >> +static bool need_staging(u64 *mmio_addrs, u64 pa) >> +{ >> + unsigned int i; >> + >> + for (i = 0; mmio_addrs[i] != 0; i++) { >> + if (mmio_addrs[i] == pa) >> + return false; >> + } >> + mmio_addrs[i] = pa; > > This is a weird function - its name is supposed to mean it queries something > but then it has side effects too. I think I've carved out a bit more out of the loop and convoluted them into a single function. Instead, let the helper simply find the position in the array, static unsigned int find_pos(u64 *mmio_addrs, u64 mmio_pa) { unsigned int i; for (i = 0; mmio_addrs[i] != 0; i++) { if (mmio_addrs[i] == mmio_pa) break; } return i; } and update the array from there: static void stage_microcode(void) { unsigned int pos; ... for_each_cpu(cpu, cpu_online_mask) { /* set 'mmio_pa' from RDMSR */ pos = find_pos(mmio_addrs, mmio_pa); if (mmio_addrs[pos] == mmio_pa) continue; /* do staging */ mmio_addrs[pos] = mmio_pa; } ... } Or, even the loop code can include it: for_each_cpu(...) { /* set 'mmio_pa' from RDMSR */ /* Find either the last position or a match */ for (i = 0; mmio_addrs[i] != 0 && mmio_addrs[i] != mmio_pa; i++); if (mmio_addrs[i] == mmio_pa) continue; /* do staging */ mmio_addrs[i] = mmio_pa; } Maybe, if this unusual one line for-loop is not an issue, this can make the code simple. Otherwise, at least the helper should remain doing one thing. >> + for_each_cpu(cpu, cpu_online_mask) { > > Oh great, and someone went and offlined one of those CPUs right here. Fun. Yes, offlining matters. Let me summarize a few things related to the patchset for the record: * The staging flow is tiggered by load_late_locked(), which already holds cpus_read_lock() [1]. * When any SMT primary threads is offlined, setup_cpus() makes it exit right away [2]. * When an offlined CPU is brought up online, it follows the early loading path, which does not include staging. [1]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/x86/kernel/cpu/microcode/core.c#n705 [2]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/x86/kernel/cpu/microcode/core.c#n658 > >> + mmio_pa = staging_addr(cpu); >> + >> + if (need_staging(mmio_addrs, mmio_pa) && >> + !staging_work(mmio_pa, ucode_patch_late, totalsize)) { > > do_stage() > >> + pr_err("Error: staging failed.\n"); > > ... on CPU%d, err_val: 0x%x"\n" > > perhaps? > > For more info debugging something like that? Maybe, either one of two ways: The work function remains returning bool value, then it can print the error message from there. bool do_stage(...) { ... pr_err("Error: staging failed on CPU%d, with %s.\n", smp_processor_id(), state == UCODE_TIMEOUT ? "timeout" : "error"); return false } Then, this function may simply return on error. static void stage_microcode(void) { ... if (!do_stage(...)) goto out; ... out: kfree(mmio_addrs); } Or, let the work function return a value like an enum ucode_state indicating the status, and print the error message here. static void stage_microcode(void) { enum ucode_state state; ... state = do_stage(...); if (state != UCODE_OK) goto err_out; err_out: pr_err("Error: ...); out: ... } > >> + goto out; >> + } >> + } >> + >> + pr_info("Staging succeeded.\n"); > > "Staging of patch revision 0x%x succeeded.\n"... > > more user-friendly. Yes, that seems to be useful to put it on: + pr_info("Staging of patch revision 0x%x succeeded.\n", + ((struct microcode_header_intel *)ucode_patch_late)->rev); If successful, users will see something like this: [ 25.352716] microcode: Staging is available. [ 25.357684] microcode: Current revision: 0x1234 [ 136.203269] microcode: Staging of patch revision 0xabcd succeeded. [ 137.398491] microcode: load: updated on xx primary CPUs with yy siblings [ 137.427386] microcode: revision: 0x1234 -> 0xabcd Thanks, Chang
On Wed, Nov 06 2024 at 10:28, Chang S. Bae wrote: > On 11/4/2024 3:16 AM, Borislav Petkov wrote: >> On Tue, Oct 01, 2024 at 09:10:39AM -0700, Chang S. Bae wrote: >>> + >>> +static bool need_staging(u64 *mmio_addrs, u64 pa) >>> +{ >>> + unsigned int i; >>> + >>> + for (i = 0; mmio_addrs[i] != 0; i++) { >>> + if (mmio_addrs[i] == pa) >>> + return false; >>> + } >>> + mmio_addrs[i] = pa; >> >> This is a weird function - its name is supposed to mean it queries something >> but then it has side effects too. > > I think I've carved out a bit more out of the loop and convoluted them > into a single function. > > Instead, let the helper simply find the position in the array, > > static unsigned int find_pos(u64 *mmio_addrs, u64 mmio_pa) > { > unsigned int i; > > for (i = 0; mmio_addrs[i] != 0; i++) { > if (mmio_addrs[i] == mmio_pa) > break; > } > return i; > } > > and update the array from there: > > static void stage_microcode(void) > { > unsigned int pos; > ... > for_each_cpu(cpu, cpu_online_mask) { > /* set 'mmio_pa' from RDMSR */ > > pos = find_pos(mmio_addrs, mmio_pa); > if (mmio_addrs[pos] == mmio_pa) > continue; > > /* do staging */ > > mmio_addrs[pos] = mmio_pa; > } > ... > } > > Or, even the loop code can include it: > > for_each_cpu(...) { > /* set 'mmio_pa' from RDMSR */ > > /* Find either the last position or a match */ > for (i = 0; mmio_addrs[i] != 0 && mmio_addrs[i] != > mmio_pa; i++); > > if (mmio_addrs[i] == mmio_pa) > continue; > > /* do staging */ > > mmio_addrs[i] = mmio_pa; > } This looks all overly complicated. The documentation says: "There is one set of mailbox registers and internal staging buffers per physical processor package. Therefore, the IA32_MCU_STAGING_MBOX_ADDR MSR is package-scoped and will provide a different physical address on each physical package." So why going through loops and hoops? pkg_id = UINT_MAX; for_each_online_cpu(cpu) { if (topology_logical_package_id(cpu) == pkg_id) continue; pkg_id = topology_logical_package_id(cpu); rdmsrl_on_cpu(cpu, MSR_IA32_MCU_STAGING_MBOX_ADDR, &pa); staging_work(pa, ucode_patch_late, totalsize); } Something like that should just work, no? Thanks, tglx
On 11/6/24 17:12, Thomas Gleixner wrote: > This looks all overly complicated. The documentation says: > > "There is one set of mailbox registers and internal staging buffers per > physical processor package. Therefore, the IA32_MCU_STAGING_MBOX_ADDR > MSR is package-scoped and will provide a different physical address on > each physical package." > > So why going through loops and hoops? I'm to blame for that one. It was the smallest amount of code I could think of at the time that could work when all the CPUs in a package aren't consecutively numbered. It also happens to work even if the topology parsing or firmware goes wonky.
On 11/6/2024 5:12 PM, Thomas Gleixner wrote: > > This looks all overly complicated. The documentation says: > > "There is one set of mailbox registers and internal staging buffers per > physical processor package. Therefore, the IA32_MCU_STAGING_MBOX_ADDR > MSR is package-scoped and will provide a different physical address on > each physical package." > > So why going through loops and hoops? While the initial approach was like the one below, the aspect I bought was to avoid relying on topology knowledge by simply searching for unique addresses. But you're right -- this introduces unnecessary loops, complicating the code anyway. I should have explicitly highlighted this as a review point, so thank you for taking a closer look and correcting the approach. Given that the scope is clearly stated in the spec as packaged-scoped and should be permanent, I think there is no question in leveraging it to make the code simple as you suggested: > > pkg_id = UINT_MAX; > > for_each_online_cpu(cpu) { > if (topology_logical_package_id(cpu) == pkg_id) > continue; > pkg_id = topology_logical_package_id(cpu); > > rdmsrl_on_cpu(cpu, MSR_IA32_MCU_STAGING_MBOX_ADDR, &pa); > staging_work(pa, ucode_patch_late, totalsize); > } > > Something like that should just work, no? Yes, indeed. Then I observed the package number repeated according to the CPU number mapping. For example, all SMT sibling threads map in the later CPU numbers. Unless staging redundancy is intentional, it can be avoided by tracking package ids. But, in this case, the loop may be streamlined to SMT primary threads instead of all online CPUs, since core::setup_cpus() already ensures primary threads are online. Perhaps, /* * The MMIO address is unique per package, and all the SMT * primary threads are ensured online. Find staging addresses * by their package ids. */ for_each_cpu(cpu, cpu_primary_thread_mask) { ... } Thanks, Chang
On 11/4/24 03:16, Borislav Petkov wrote: > On Tue, Oct 01, 2024 at 09:10:39AM -0700, Chang S. Bae wrote: >> +static inline u64 staging_addr(u32 cpu) >> +{ >> + u32 lo, hi; >> + >> + rdmsr_on_cpu(cpu, MSR_IA32_MCU_STAGING_MBOX_ADDR, &lo, &hi); >> + return lo | ((u64)hi << 32); >> +} > A single usage site. Move its code there and get rid of the function. Yeah, and it'll look a lot nicer if you use: rdmsrl_on_cpu(cpu, MSR_IA32_MCU_STAGING_MBOX_ADDR, &addr); and don't have to do the high/lo munging.
On 11/4/2024 8:08 AM, Dave Hansen wrote: > On 11/4/24 03:16, Borislav Petkov wrote: >> On Tue, Oct 01, 2024 at 09:10:39AM -0700, Chang S. Bae wrote: >>> +static inline u64 staging_addr(u32 cpu) >>> +{ >>> + u32 lo, hi; >>> + >>> + rdmsr_on_cpu(cpu, MSR_IA32_MCU_STAGING_MBOX_ADDR, &lo, &hi); >>> + return lo | ((u64)hi << 32); >>> +} >> A single usage site. Move its code there and get rid of the function. > > Yeah, and it'll look a lot nicer if you use: > > rdmsrl_on_cpu(cpu, MSR_IA32_MCU_STAGING_MBOX_ADDR, &addr); > > and don't have to do the high/lo munging. Oh, silly me missed this function. Thanks.
On 11/4/2024 10:34 AM, Chang S. Bae wrote: > On 11/4/2024 8:08 AM, Dave Hansen wrote: >> On 11/4/24 03:16, Borislav Petkov wrote: >>> On Tue, Oct 01, 2024 at 09:10:39AM -0700, Chang S. Bae wrote: >>>> +static inline u64 staging_addr(u32 cpu) >>>> +{ >>>> + u32 lo, hi; >>>> + >>>> + rdmsr_on_cpu(cpu, MSR_IA32_MCU_STAGING_MBOX_ADDR, &lo, &hi); >>>> + return lo | ((u64)hi << 32); >>>> +} >>> A single usage site. Move its code there and get rid of the function. >> >> Yeah, and it'll look a lot nicer if you use: >> >> rdmsrl_on_cpu(cpu, MSR_IA32_MCU_STAGING_MBOX_ADDR, &addr); >> >> and don't have to do the high/lo munging. > > Oh, silly me missed this function. Thanks. Okay, I took another look and found a similar case: diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 0f04feb6cafa..b942cd11e179 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -73,20 +73,17 @@ static unsigned int acpi_pstate_strict; static bool boost_state(unsigned int cpu) { - u32 lo, hi; u64 msr; switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: case X86_VENDOR_CENTAUR: case X86_VENDOR_ZHAOXIN: - rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi); - msr = lo | ((u64)hi << 32); + rdmsrl_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr); return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); case X86_VENDOR_HYGON: case X86_VENDOR_AMD: - rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); - msr = lo | ((u64)hi << 32); + rdmsrl_on_cpu(cpu, MSR_K7_HWCR, &msr); return !(msr & MSR_K7_HWCR_CPB_DIS); } return false; I'll be following up with a patch to clean this up as well. Thanks, Chang
Replace the 32-bit MSR access function with a 64-bit variant to simplify
the call site, eliminating unnecessary 32-bit value manipulations.
Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com>
Cc: Rafael J. Wysocki <rafael@kernel.org>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Borislav Petkov <bp@alien8.de>
---
I've received feedback to use rdmsrl_on_cpu() instead of rdmsr_on_cpu()
for similar code in my feature-enabling series [1]. While auditing the
tree, I found this case as well, so here's another cleanup.
[1] https://lore.kernel.org/lkml/e9afefb7-3c4e-48ee-aab1-2f338c4e989d@intel.com/
---
drivers/cpufreq/acpi-cpufreq.c | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 0f04feb6cafa..b942cd11e179 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -73,20 +73,17 @@ static unsigned int acpi_pstate_strict;
static bool boost_state(unsigned int cpu)
{
- u32 lo, hi;
u64 msr;
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
case X86_VENDOR_CENTAUR:
case X86_VENDOR_ZHAOXIN:
- rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
- msr = lo | ((u64)hi << 32);
+ rdmsrl_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr);
return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
- rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
- msr = lo | ((u64)hi << 32);
+ rdmsrl_on_cpu(cpu, MSR_K7_HWCR, &msr);
return !(msr & MSR_K7_HWCR_CPB_DIS);
}
return false;
--
2.34.1
On Wed, Nov 6, 2024 at 7:23 PM Chang S. Bae <chang.seok.bae@intel.com> wrote: > > Replace the 32-bit MSR access function with a 64-bit variant to simplify > the call site, eliminating unnecessary 32-bit value manipulations. > > Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com> > Cc: Rafael J. Wysocki <rafael@kernel.org> > Cc: Viresh Kumar <viresh.kumar@linaro.org> > Cc: Dave Hansen <dave.hansen@intel.com> > Cc: Borislav Petkov <bp@alien8.de> > --- > I've received feedback to use rdmsrl_on_cpu() instead of rdmsr_on_cpu() > for similar code in my feature-enabling series [1]. While auditing the > tree, I found this case as well, so here's another cleanup. > > [1] https://lore.kernel.org/lkml/e9afefb7-3c4e-48ee-aab1-2f338c4e989d@intel.com/ > --- > drivers/cpufreq/acpi-cpufreq.c | 7 ++----- > 1 file changed, 2 insertions(+), 5 deletions(-) > > diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c > index 0f04feb6cafa..b942cd11e179 100644 > --- a/drivers/cpufreq/acpi-cpufreq.c > +++ b/drivers/cpufreq/acpi-cpufreq.c > @@ -73,20 +73,17 @@ static unsigned int acpi_pstate_strict; > > static bool boost_state(unsigned int cpu) > { > - u32 lo, hi; > u64 msr; > > switch (boot_cpu_data.x86_vendor) { > case X86_VENDOR_INTEL: > case X86_VENDOR_CENTAUR: > case X86_VENDOR_ZHAOXIN: > - rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi); > - msr = lo | ((u64)hi << 32); > + rdmsrl_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr); > return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); > case X86_VENDOR_HYGON: > case X86_VENDOR_AMD: > - rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); > - msr = lo | ((u64)hi << 32); > + rdmsrl_on_cpu(cpu, MSR_K7_HWCR, &msr); > return !(msr & MSR_K7_HWCR_CPB_DIS); > } > return false; > -- Applied as 6.13 material, thanks!
© 2016 - 2024 Red Hat, Inc.