arch/x86/include/asm/msr-index.h | 10 + arch/x86/include/asm/topology.h | 12 +- arch/x86/kernel/cpu/microcode/amd.c | 107 +++++---- arch/x86/kernel/cpu/microcode/core.c | 13 +- arch/x86/kernel/cpu/microcode/intel.c | 362 +++++++++++++++++++++++++++++++ arch/x86/kernel/cpu/microcode/internal.h | 4 +- arch/x86/kernel/cpu/topology.c | 4 - arch/x86/kernel/cpu/topology_common.c | 3 + arch/x86/kernel/smpboot.c | 3 - 9 files changed, 465 insertions(+), 53 deletions(-)
Hi Linus,
please pull the x86/microcode lineup for v6.19-rc1.
There will be a merge conflict with your tree because we had to expedite
urgent fixes.
The resolution - at the end of this message - consists basically of three new
microcode revisions getting added to the Entrysign cutoff check.
Thx.
---
The following changes since commit 84dfce65a7ae7b11c7b13285a1b23e9a94ad37b7:
x86/bugs: Remove dead code which might prevent from building (2025-10-24 09:42:00 -0700)
are available in the Git repository at:
ssh://git@gitolite.kernel.org/pub/scm/linux/kernel/git/tip/tip tags/x86_microcode_for_v6.19_rc1
for you to fetch changes up to ca8313fd83399ea1d18e695c2ae9b259985c9e1f:
x86/microcode: Mark early_parse_cmdline() as __init (2025-10-30 14:33:31 +0100)
----------------------------------------------------------------
- Add microcode staging support on Intel: it moves the sole microcode
blobs loading to a non-critical path so that microcode loading
latencies are kept at minimum. The actual "directing" the hardware to
load microcode is the only step which is done on the critical path.
This scheme is also opportunistic as in: on a failure, the machinery
falls back to normal loading
- Add the capability to the AMD side of the loader to select one of two
per-family/model/stepping patches: one is pre-Entrysign and the other
is post-Entrysign; with the goal to take care of machines which
haven't updated their BIOS yet - something they should absolutely do
as this is the only proper Entrysign fix
- Other small cleanups and fixlets
----------------------------------------------------------------
Borislav Petkov (AMD) (2):
Merge tag 'x86_urgent_for_v6.18_rc3' into x86/microcode
x86/microcode/AMD: Select which microcode patch to load
Chang S. Bae (7):
x86/cpu/topology: Make primary thread mask available with SMP=n
x86/microcode: Introduce staging step to reduce late-loading time
x86/microcode/intel: Establish staging control logic
x86/microcode/intel: Define staging state struct
x86/microcode/intel: Implement staging handler
x86/microcode/intel: Support mailbox transfer
x86/microcode/intel: Enable staging when available
Yu Peng (1):
x86/microcode: Mark early_parse_cmdline() as __init
arch/x86/include/asm/msr-index.h | 10 +
arch/x86/include/asm/topology.h | 12 +-
arch/x86/kernel/cpu/microcode/amd.c | 107 +++++----
arch/x86/kernel/cpu/microcode/core.c | 13 +-
arch/x86/kernel/cpu/microcode/intel.c | 362 +++++++++++++++++++++++++++++++
arch/x86/kernel/cpu/microcode/internal.h | 4 +-
arch/x86/kernel/cpu/topology.c | 4 -
arch/x86/kernel/cpu/topology_common.c | 3 +
arch/x86/kernel/smpboot.c | 3 -
9 files changed, 465 insertions(+), 53 deletions(-)
---
Merge resolution:
commit e5e36163483b2f2284e9feebf12c684be3f7d74c (HEAD -> refs/heads/test)
Merge: 577411b0a957 ca8313fd8339
Author: Borislav Petkov (AMD) <bp@alien8.de>
Date: Mon Dec 1 13:44:29 2025 +0100
Merge tag 'x86_microcode_for_v6.19_rc1' into test
- Add microcode staging support on Intel: it moves the sole microcode
blobs loading to a non-critical path so that microcode loading
latencies are kept at minimum. The actual "directing" the hardware to
load microcode is the only step which is done on the critical path.
This scheme is also opportunistic as in: on a failure, the machinery
falls back to normal loading
- Add the capability to the AMD side of the loader to select one of two
per-family/model/stepping patches: one is pre-Entrysign and the other
is post-Entrysign; with the goal to take care of machines which
haven't updated their BIOS yet - something they should absolutely do
as this is the only proper Entrysign fix
- Other small cleanups and fixlets
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Conflicts:
arch/x86/kernel/cpu/microcode/amd.c
diff --cc arch/x86/kernel/cpu/microcode/amd.c
index a881bf4c2011,8d3d1114881b..3821a985f4ff
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@@ -186,8 -186,50 +186,53 @@@ static u32 cpuid_to_ucode_rev(unsigned
return p.ucode_rev;
}
+ static u32 get_cutoff_revision(u32 rev)
+ {
+ switch (rev >> 8) {
+ case 0x80012: return 0x8001277; break;
+ case 0x80082: return 0x800820f; break;
+ case 0x83010: return 0x830107c; break;
+ case 0x86001: return 0x860010e; break;
+ case 0x86081: return 0x8608108; break;
+ case 0x87010: return 0x8701034; break;
+ case 0x8a000: return 0x8a0000a; break;
+ case 0xa0010: return 0xa00107a; break;
+ case 0xa0011: return 0xa0011da; break;
+ case 0xa0012: return 0xa001243; break;
+ case 0xa0082: return 0xa00820e; break;
+ case 0xa1011: return 0xa101153; break;
+ case 0xa1012: return 0xa10124e; break;
+ case 0xa1081: return 0xa108109; break;
+ case 0xa2010: return 0xa20102f; break;
+ case 0xa2012: return 0xa201212; break;
+ case 0xa4041: return 0xa404109; break;
+ case 0xa5000: return 0xa500013; break;
+ case 0xa6012: return 0xa60120a; break;
+ case 0xa7041: return 0xa704109; break;
+ case 0xa7052: return 0xa705208; break;
+ case 0xa7080: return 0xa708009; break;
+ case 0xa70c0: return 0xa70C009; break;
+ case 0xaa001: return 0xaa00116; break;
+ case 0xaa002: return 0xaa00218; break;
+ case 0xb0021: return 0xb002146; break;
++ case 0xb0081: return 0xb008111; break;
+ case 0xb1010: return 0xb101046; break;
+ case 0xb2040: return 0xb204031; break;
+ case 0xb4040: return 0xb404031; break;
++ case 0xb4041: return 0xb404101; break;
+ case 0xb6000: return 0xb600031; break;
++ case 0xb6080: return 0xb608031; break;
+ case 0xb7000: return 0xb700031; break;
+ default: break;
+
+ }
+ return 0;
+ }
+
static bool need_sha_check(u32 cur_rev)
{
+ u32 cutoff;
+
if (!cur_rev) {
cur_rev = cpuid_to_ucode_rev(bsp_cpuid_1_eax);
pr_info_once("No current revision, generating the lowest one: 0x%x\n", cur_rev);
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
© 2016 - 2025 Red Hat, Inc.