Amend docs to reflect ucode= command/stanza depend on MICROCODE_LOADING
being set.
The new MICROCODE_OP() is a conditional setter for the microcode_ops
struct. By using IS_ENABLED() there ratehr than ifdef we allow DCE to
remove all statics no longer used when microcode loading is disabled
without tagging them with __maybe_unused.
Signed-off-by: Alejandro Vallejo <alejandro.garciavallejo@amd.com>
---
v3:
* Moved MICROCODE_SCAN_DEFAULT to a separate patch
* Added doc change to admin-guide
* Added Andrew's suggestion for Kconfig help text
* Inlined can_apply_microcode() macro in core.c
* Kept the platform-ops, gating them on CONFIG_MICROCODE_LOADING
through the new MICROCODE_OP() macro.
---
docs/admin-guide/microcode-loading.rst | 2 ++
docs/misc/efi.pandoc | 2 ++
docs/misc/xen-command-line.pandoc | 2 +-
xen/arch/x86/Kconfig | 14 ++++++++++++++
xen/arch/x86/cpu/microcode/amd.c | 16 +++++++++-------
xen/arch/x86/cpu/microcode/core.c | 15 ++++++++++++---
xen/arch/x86/cpu/microcode/intel.c | 11 +++++++----
xen/arch/x86/cpu/microcode/private.h | 2 ++
xen/arch/x86/efi/efi-boot.h | 3 ++-
xen/arch/x86/platform_hypercall.c | 22 +++++++++++++++-------
10 files changed, 66 insertions(+), 23 deletions(-)
diff --git a/docs/admin-guide/microcode-loading.rst b/docs/admin-guide/microcode-loading.rst
index a07e25802f..148bc8559b 100644
--- a/docs/admin-guide/microcode-loading.rst
+++ b/docs/admin-guide/microcode-loading.rst
@@ -23,6 +23,8 @@ TSX errata which necessitated disabling the feature entirely), or the addition
of brand new features (e.g. the Spectre v2 controls to work around speculative
execution vulnerabilities).
+Microcode loading support in Xen is controlled by the
+``CONFIG_MICROCODE_LOADING`` Kconfig option.
Boot time microcode loading
---------------------------
diff --git a/docs/misc/efi.pandoc b/docs/misc/efi.pandoc
index 11c1ac3346..c3fb1f3a30 100644
--- a/docs/misc/efi.pandoc
+++ b/docs/misc/efi.pandoc
@@ -104,6 +104,8 @@ Specifies an XSM module to load.
Specifies a CPU microcode blob to load. (x86 only)
+Only available when Xen is compiled with CONFIG_MICROCODE_LOADING.
+
###`dtb=<filename>`
Specifies a device tree file to load. The platform firmware may provide a
diff --git a/docs/misc/xen-command-line.pandoc b/docs/misc/xen-command-line.pandoc
index 15f7a315a4..866fa2f951 100644
--- a/docs/misc/xen-command-line.pandoc
+++ b/docs/misc/xen-command-line.pandoc
@@ -2748,7 +2748,7 @@ performance.
### ucode
> `= List of [ <integer> | scan=<bool>, nmi=<bool>, digest-check=<bool> ]`
- Applicability: x86
+ Applicability: x86 with CONFIG_MICROCODE_LOADING active
Default: `scan` is selectable via Kconfig, `nmi,digest-check`
Controls for CPU microcode loading. For early loading, this parameter can
diff --git a/xen/arch/x86/Kconfig b/xen/arch/x86/Kconfig
index d5705e4bff..61f58aa829 100644
--- a/xen/arch/x86/Kconfig
+++ b/xen/arch/x86/Kconfig
@@ -331,8 +331,22 @@ config REQUIRE_NX
was unavailable. However, if enabled, Xen will no longer boot on
any CPU which is lacking NX support.
+config MICROCODE_LOADING
+ bool "Microcode loading"
+ default y
+ help
+ Microcode updates for CPUs fix errata and provide new functionality for
+ software to work around bugs, such as the speculative execution
+ vulnerabilities. It is common for OSes to carry updated microcode as
+ software tends to get updated more frequently than firmware.
+
+ For embedded environments where a full firmware/software stack is being
+ provided, it might not be necessary for Xen to need to load microcode
+ itself.
+
config MICROCODE_SCAN_DEFAULT
bool "Scan for microcode by default"
+ depends on MICROCODE_LOADING
help
During boot, Xen can scan the multiboot images for a CPIO archive
containing CPU microcode to be loaded, which is Linux's mechanism for
diff --git a/xen/arch/x86/cpu/microcode/amd.c b/xen/arch/x86/cpu/microcode/amd.c
index 71b041c84b..86706a21a6 100644
--- a/xen/arch/x86/cpu/microcode/amd.c
+++ b/xen/arch/x86/cpu/microcode/amd.c
@@ -561,11 +561,11 @@ static const char __initconst amd_cpio_path[] =
"kernel/x86/microcode/AuthenticAMD.bin";
static const struct microcode_ops __initconst_cf_clobber amd_ucode_ops = {
- .cpu_request_microcode = cpu_request_microcode,
+ .cpu_request_microcode = MICROCODE_OP(cpu_request_microcode),
.collect_cpu_info = collect_cpu_info,
- .apply_microcode = apply_microcode,
- .compare = amd_compare,
- .cpio_path = amd_cpio_path,
+ .apply_microcode = MICROCODE_OP(apply_microcode),
+ .compare = MICROCODE_OP(amd_compare),
+ .cpio_path = MICROCODE_OP(amd_cpio_path),
};
void __init ucode_probe_amd(struct microcode_ops *ops)
@@ -574,7 +574,8 @@ void __init ucode_probe_amd(struct microcode_ops *ops)
* The Entrysign vulnerability (SB-7033, CVE-2024-36347) affects Zen1-5
* CPUs. Taint Xen if digest checking is turned off.
*/
- if ( boot_cpu_data.family >= 0x17 && boot_cpu_data.family <= 0x1a &&
+ if ( IS_ENABLED(CONFIG_MICROCODE_LOADING) &&
+ boot_cpu_data.family >= 0x17 && boot_cpu_data.family <= 0x1a &&
!opt_digest_check )
{
printk(XENLOG_WARNING
@@ -614,8 +615,9 @@ void __init amd_check_entrysign(void)
unsigned int curr_rev;
uint8_t fixed_rev;
- if ( boot_cpu_data.vendor != X86_VENDOR_AMD ||
- boot_cpu_data.family < 0x17 ||
+ if ( !IS_ENABLED(CONFIG_MICROCODE_LOADING) ||
+ boot_cpu_data.vendor != X86_VENDOR_AMD ||
+ boot_cpu_data.family < 0x17 ||
boot_cpu_data.family > 0x1a )
return;
diff --git a/xen/arch/x86/cpu/microcode/core.c b/xen/arch/x86/cpu/microcode/core.c
index dabdb95b4c..efaf808f1a 100644
--- a/xen/arch/x86/cpu/microcode/core.c
+++ b/xen/arch/x86/cpu/microcode/core.c
@@ -58,7 +58,7 @@
*/
#define MICROCODE_UPDATE_TIMEOUT_US 1000000
-static bool __initdata ucode_mod_forced;
+static bool __initdata __maybe_unused ucode_mod_forced;
static unsigned int nr_cores;
/*
@@ -104,6 +104,7 @@ static int __initdata opt_mod_idx;
static bool __initdata opt_scan = IS_ENABLED(CONFIG_MICROCODE_SCAN_DEFAULT);
bool __ro_after_init opt_digest_check = true;
+#ifdef CONFIG_MICROCODE_LOADING
/*
* Used by the EFI path only, when xen.cfg identifies an explicit microcode
* file. Overrides ucode=<int>|scan on the regular command line.
@@ -162,6 +163,7 @@ static int __init cf_check parse_ucode(const char *s)
return rc;
}
custom_param("ucode", parse_ucode);
+#endif /* CONFIG_MICROCODE_LOADING */
static struct microcode_ops __ro_after_init ucode_ops;
@@ -469,7 +471,7 @@ struct ucode_buf {
char buffer[];
};
-static long cf_check ucode_update_hcall_cont(void *data)
+static long cf_check __maybe_unused ucode_update_hcall_cont(void *data)
{
struct microcode_patch *patch = NULL;
int ret, result;
@@ -613,6 +615,7 @@ static long cf_check ucode_update_hcall_cont(void *data)
return ret;
}
+#ifdef CONFIG_MICROCODE_LOADING
int ucode_update_hcall(XEN_GUEST_HANDLE(const_void) buf,
unsigned long len, unsigned int flags)
{
@@ -645,6 +648,7 @@ int ucode_update_hcall(XEN_GUEST_HANDLE(const_void) buf,
*/
return continue_hypercall_on_cpu(0, ucode_update_hcall_cont, buffer);
}
+#endif /* CONFIG_MICROCODE_LOADING */
/* Load a cached update to current cpu */
int microcode_update_one(void)
@@ -658,7 +662,7 @@ int microcode_update_one(void)
if ( ucode_ops.collect_cpu_info )
alternative_vcall(ucode_ops.collect_cpu_info);
- if ( !ucode_ops.apply_microcode )
+ if ( !IS_ENABLED(CONFIG_MICROCODE_LOADING) || !ucode_ops.apply_microcode )
return -EOPNOTSUPP;
spin_lock(µcode_mutex);
@@ -678,6 +682,7 @@ int microcode_update_one(void)
*/
static int __initdata early_mod_idx = -1;
+#ifdef CONFIG_MICROCODE_LOADING
static int __init cf_check microcode_init_cache(void)
{
struct boot_info *bi = &xen_boot_info;
@@ -734,6 +739,7 @@ static int __init cf_check microcode_init_cache(void)
return rc;
}
presmp_initcall(microcode_init_cache);
+#endif /* CONFIG_MICROCODE_LOADING */
/*
* There are several tasks:
@@ -898,6 +904,9 @@ int __init early_microcode_init(struct boot_info *bi)
printk(XENLOG_INFO "BSP microcode revision: 0x%08x\n", this_cpu(cpu_sig).rev);
+ if ( !IS_ENABLED(CONFIG_MICROCODE_LOADING) )
+ return -ENODEV;
+
/*
* Some hypervisors deliberately report a microcode revision of -1 to
* mean that they will not accept microcode updates.
diff --git a/xen/arch/x86/cpu/microcode/intel.c b/xen/arch/x86/cpu/microcode/intel.c
index 281993e725..ba99f4ffdc 100644
--- a/xen/arch/x86/cpu/microcode/intel.c
+++ b/xen/arch/x86/cpu/microcode/intel.c
@@ -408,17 +408,20 @@ static const char __initconst intel_cpio_path[] =
"kernel/x86/microcode/GenuineIntel.bin";
static const struct microcode_ops __initconst_cf_clobber intel_ucode_ops = {
- .cpu_request_microcode = cpu_request_microcode,
+ .cpu_request_microcode = MICROCODE_OP(cpu_request_microcode),
.collect_cpu_info = collect_cpu_info,
- .apply_microcode = apply_microcode,
- .compare = intel_compare,
- .cpio_path = intel_cpio_path,
+ .apply_microcode = MICROCODE_OP(apply_microcode),
+ .compare = MICROCODE_OP(intel_compare),
+ .cpio_path = MICROCODE_OP(intel_cpio_path),
};
void __init ucode_probe_intel(struct microcode_ops *ops)
{
*ops = intel_ucode_ops;
+ if ( !IS_ENABLED(CONFIG_MICROCODE_LOADING) )
+ return;
+
if ( !can_load_microcode() )
ops->apply_microcode = NULL;
}
diff --git a/xen/arch/x86/cpu/microcode/private.h b/xen/arch/x86/cpu/microcode/private.h
index e6c965dc99..1167b79db1 100644
--- a/xen/arch/x86/cpu/microcode/private.h
+++ b/xen/arch/x86/cpu/microcode/private.h
@@ -93,4 +93,6 @@ void ucode_probe_intel(struct microcode_ops *ops);
static inline void ucode_probe_intel(struct microcode_ops *ops) {}
#endif
+#define MICROCODE_OP(x) (IS_ENABLED(CONFIG_MICROCODE_LOADING) ? (x) : NULL)
+
#endif /* ASM_X86_MICROCODE_PRIVATE_H */
diff --git a/xen/arch/x86/efi/efi-boot.h b/xen/arch/x86/efi/efi-boot.h
index 0194720003..42a2c46b5e 100644
--- a/xen/arch/x86/efi/efi-boot.h
+++ b/xen/arch/x86/efi/efi-boot.h
@@ -295,7 +295,8 @@ static void __init efi_arch_cfg_file_late(const EFI_LOADED_IMAGE *image,
{
union string name;
- if ( read_section(image, L"ucode", &ucode, NULL) )
+ if ( !IS_ENABLED(CONFIG_MICROCODE_LOADING) ||
+ read_section(image, L"ucode", &ucode, NULL) )
return;
name.s = get_value(&cfg, section, "ucode");
diff --git a/xen/arch/x86/platform_hypercall.c b/xen/arch/x86/platform_hypercall.c
index 79bb99e0b6..a55060e662 100644
--- a/xen/arch/x86/platform_hypercall.c
+++ b/xen/arch/x86/platform_hypercall.c
@@ -309,22 +309,30 @@ ret_t do_platform_op(
case XENPF_microcode_update:
{
- XEN_GUEST_HANDLE(const_void) data;
+ ret = -EOPNOTSUPP;
+ if ( IS_ENABLED(CONFIG_MICROCODE_LOADING) )
+ {
+ XEN_GUEST_HANDLE(const_void) data;
- guest_from_compat_handle(data, op->u.microcode.data);
+ guest_from_compat_handle(data, op->u.microcode.data);
+ ret = ucode_update_hcall(data, op->u.microcode.length, 0);
+ }
- ret = ucode_update_hcall(data, op->u.microcode.length, 0);
break;
}
case XENPF_microcode_update2:
{
- XEN_GUEST_HANDLE(const_void) data;
+ ret = -EOPNOTSUPP;
+ if ( IS_ENABLED(CONFIG_MICROCODE_LOADING) )
+ {
+ XEN_GUEST_HANDLE(const_void) data;
- guest_from_compat_handle(data, op->u.microcode2.data);
+ guest_from_compat_handle(data, op->u.microcode2.data);
+ ret = ucode_update_hcall(data, op->u.microcode2.length,
+ op->u.microcode2.flags);
+ }
- ret = ucode_update_hcall(data, op->u.microcode2.length,
- op->u.microcode2.flags);
break;
}
--
2.43.0
On 13.01.2026 13:21, Alejandro Vallejo wrote:
> @@ -469,7 +471,7 @@ struct ucode_buf {
> char buffer[];
> };
>
> -static long cf_check ucode_update_hcall_cont(void *data)
> +static long cf_check __maybe_unused ucode_update_hcall_cont(void *data)
> {
> struct microcode_patch *patch = NULL;
> int ret, result;
Why this change when ...
> @@ -613,6 +615,7 @@ static long cf_check ucode_update_hcall_cont(void *data)
> return ret;
> }
>
> +#ifdef CONFIG_MICROCODE_LOADING
... this can simply be moved up accordingly? After all ...
> int ucode_update_hcall(XEN_GUEST_HANDLE(const_void) buf,
> unsigned long len, unsigned int flags)
> {
> @@ -645,6 +648,7 @@ int ucode_update_hcall(XEN_GUEST_HANDLE(const_void) buf,
> */
> return continue_hypercall_on_cpu(0, ucode_update_hcall_cont, buffer);
... this is the only user of that other function.
> --- a/xen/arch/x86/cpu/microcode/intel.c
> +++ b/xen/arch/x86/cpu/microcode/intel.c
> @@ -408,17 +408,20 @@ static const char __initconst intel_cpio_path[] =
> "kernel/x86/microcode/GenuineIntel.bin";
>
> static const struct microcode_ops __initconst_cf_clobber intel_ucode_ops = {
> - .cpu_request_microcode = cpu_request_microcode,
> + .cpu_request_microcode = MICROCODE_OP(cpu_request_microcode),
> .collect_cpu_info = collect_cpu_info,
> - .apply_microcode = apply_microcode,
> - .compare = intel_compare,
> - .cpio_path = intel_cpio_path,
> + .apply_microcode = MICROCODE_OP(apply_microcode),
> + .compare = MICROCODE_OP(intel_compare),
> + .cpio_path = MICROCODE_OP(intel_cpio_path),
> };
While I appreciate the intention with MICROCODE_OP(), I'm not really happy
with function pointer members left in place just for them to be NULL
everywhere. What if a call site remains unguarded? With PV guests that
would be a privilege escalation XSA.
Jan
On Tue Jan 13, 2026 at 4:27 PM CET, Jan Beulich wrote:
> On 13.01.2026 13:21, Alejandro Vallejo wrote:
>> @@ -469,7 +471,7 @@ struct ucode_buf {
>> char buffer[];
>> };
>>
>> -static long cf_check ucode_update_hcall_cont(void *data)
>> +static long cf_check __maybe_unused ucode_update_hcall_cont(void *data)
>> {
>> struct microcode_patch *patch = NULL;
>> int ret, result;
>
> Why this change when ...
>
>> @@ -613,6 +615,7 @@ static long cf_check ucode_update_hcall_cont(void *data)
>> return ret;
>> }
>>
>> +#ifdef CONFIG_MICROCODE_LOADING
>
> ... this can simply be moved up accordingly? After all ...
>
>> int ucode_update_hcall(XEN_GUEST_HANDLE(const_void) buf,
>> unsigned long len, unsigned int flags)
>> {
>> @@ -645,6 +648,7 @@ int ucode_update_hcall(XEN_GUEST_HANDLE(const_void) buf,
>> */
>> return continue_hypercall_on_cpu(0, ucode_update_hcall_cont, buffer);
>
> ... this is the only user of that other function.
To minimise the scope of the ifdef. It's hard to know where things start/end
when they cover several functions. This way it's (imo) clearer.
I don't mind much though.
>
>> --- a/xen/arch/x86/cpu/microcode/intel.c
>> +++ b/xen/arch/x86/cpu/microcode/intel.c
>> @@ -408,17 +408,20 @@ static const char __initconst intel_cpio_path[] =
>> "kernel/x86/microcode/GenuineIntel.bin";
>>
>> static const struct microcode_ops __initconst_cf_clobber intel_ucode_ops = {
>> - .cpu_request_microcode = cpu_request_microcode,
>> + .cpu_request_microcode = MICROCODE_OP(cpu_request_microcode),
>> .collect_cpu_info = collect_cpu_info,
>> - .apply_microcode = apply_microcode,
>> - .compare = intel_compare,
>> - .cpio_path = intel_cpio_path,
>> + .apply_microcode = MICROCODE_OP(apply_microcode),
>> + .compare = MICROCODE_OP(intel_compare),
>> + .cpio_path = MICROCODE_OP(intel_cpio_path),
>> };
>
> While I appreciate the intention with MICROCODE_OP(), I'm not really happy
> with function pointer members left in place just for them to be NULL
> everywhere. What if a call site remains unguarded? With PV guests that
> would be a privilege escalation XSA.
I see where you're coming from, but these are already NULL if microcode
loading is not exposed by the underlying hypervisor (if any), or is blocked by
hardware in Intel, so arguably that worry is orthogonal to this.
Also, only a privileged domain can perform late microcode loading, so the XSM
check would prevent any such XSA from coming to pass. dom0 crashing the system
on a bad hypercall (while wrong) would just be unfortunate, not a security
issue, as far as I can tell.
I could indeed get rid of it all. But that'd cause a sea a ifdefs anywhere the
fields are accessed, which is contrary to the current intent of relying on DCE
for readability.
Cheers,
Alejandro
On 13.01.2026 17:12, Alejandro Vallejo wrote:
> On Tue Jan 13, 2026 at 4:27 PM CET, Jan Beulich wrote:
>> On 13.01.2026 13:21, Alejandro Vallejo wrote:
>>> @@ -469,7 +471,7 @@ struct ucode_buf {
>>> char buffer[];
>>> };
>>>
>>> -static long cf_check ucode_update_hcall_cont(void *data)
>>> +static long cf_check __maybe_unused ucode_update_hcall_cont(void *data)
>>> {
>>> struct microcode_patch *patch = NULL;
>>> int ret, result;
>>
>> Why this change when ...
>>
>>> @@ -613,6 +615,7 @@ static long cf_check ucode_update_hcall_cont(void *data)
>>> return ret;
>>> }
>>>
>>> +#ifdef CONFIG_MICROCODE_LOADING
>>
>> ... this can simply be moved up accordingly? After all ...
>>
>>> int ucode_update_hcall(XEN_GUEST_HANDLE(const_void) buf,
>>> unsigned long len, unsigned int flags)
>>> {
>>> @@ -645,6 +648,7 @@ int ucode_update_hcall(XEN_GUEST_HANDLE(const_void) buf,
>>> */
>>> return continue_hypercall_on_cpu(0, ucode_update_hcall_cont, buffer);
>>
>> ... this is the only user of that other function.
>
> To minimise the scope of the ifdef. It's hard to know where things start/end
> when they cover several functions. This way it's (imo) clearer.
>
> I don't mind much though.
>
>>
>>> --- a/xen/arch/x86/cpu/microcode/intel.c
>>> +++ b/xen/arch/x86/cpu/microcode/intel.c
>>> @@ -408,17 +408,20 @@ static const char __initconst intel_cpio_path[] =
>>> "kernel/x86/microcode/GenuineIntel.bin";
>>>
>>> static const struct microcode_ops __initconst_cf_clobber intel_ucode_ops = {
>>> - .cpu_request_microcode = cpu_request_microcode,
>>> + .cpu_request_microcode = MICROCODE_OP(cpu_request_microcode),
>>> .collect_cpu_info = collect_cpu_info,
>>> - .apply_microcode = apply_microcode,
>>> - .compare = intel_compare,
>>> - .cpio_path = intel_cpio_path,
>>> + .apply_microcode = MICROCODE_OP(apply_microcode),
>>> + .compare = MICROCODE_OP(intel_compare),
>>> + .cpio_path = MICROCODE_OP(intel_cpio_path),
>>> };
>>
>> While I appreciate the intention with MICROCODE_OP(), I'm not really happy
>> with function pointer members left in place just for them to be NULL
>> everywhere. What if a call site remains unguarded? With PV guests that
>> would be a privilege escalation XSA.
>
> I see where you're coming from, but these are already NULL if microcode
> loading is not exposed by the underlying hypervisor (if any), or is blocked by
> hardware in Intel, so arguably that worry is orthogonal to this.
Yes and no. Paths taken differ between what we have now and what we will have
when your work has gone in.
> Also, only a privileged domain can perform late microcode loading, so the XSM
> check would prevent any such XSA from coming to pass. dom0 crashing the system
> on a bad hypercall (while wrong) would just be unfortunate, not a security
> issue, as far as I can tell.
Okay, together with Andrew's response it wouldn't be calls through NULL, so
perhaps indeed not an XSA. The hypercall being Dom0-only I am, however, less
convinced would necessarily matter here. We interact with remote CPUs, after
all, and hence having one which happens to run a PV DomU call through NULL
would still be in need of an XSA.
Jan
On 13/01/2026 4:12 pm, Alejandro Vallejo wrote:
>>> --- a/xen/arch/x86/cpu/microcode/intel.c
>>> +++ b/xen/arch/x86/cpu/microcode/intel.c
>>> @@ -408,17 +408,20 @@ static const char __initconst intel_cpio_path[] =
>>> "kernel/x86/microcode/GenuineIntel.bin";
>>>
>>> static const struct microcode_ops __initconst_cf_clobber intel_ucode_ops = {
>>> - .cpu_request_microcode = cpu_request_microcode,
>>> + .cpu_request_microcode = MICROCODE_OP(cpu_request_microcode),
>>> .collect_cpu_info = collect_cpu_info,
>>> - .apply_microcode = apply_microcode,
>>> - .compare = intel_compare,
>>> - .cpio_path = intel_cpio_path,
>>> + .apply_microcode = MICROCODE_OP(apply_microcode),
>>> + .compare = MICROCODE_OP(intel_compare),
>>> + .cpio_path = MICROCODE_OP(intel_cpio_path),
>>> };
>> While I appreciate the intention with MICROCODE_OP(), I'm not really happy
>> with function pointer members left in place just for them to be NULL
>> everywhere. What if a call site remains unguarded? With PV guests that
>> would be a privilege escalation XSA.
> I see where you're coming from, but these are already NULL if microcode
> loading is not exposed by the underlying hypervisor (if any), or is blocked by
> hardware in Intel, so arguably that worry is orthogonal to this.
Also because they're cf_clobber, the calls are turned into UDs. We
won't follow a function pointer to 0.
~Andrew
On 13.01.2026 17:47, Andrew Cooper wrote:
> On 13/01/2026 4:12 pm, Alejandro Vallejo wrote:
>>>> --- a/xen/arch/x86/cpu/microcode/intel.c
>>>> +++ b/xen/arch/x86/cpu/microcode/intel.c
>>>> @@ -408,17 +408,20 @@ static const char __initconst intel_cpio_path[] =
>>>> "kernel/x86/microcode/GenuineIntel.bin";
>>>>
>>>> static const struct microcode_ops __initconst_cf_clobber intel_ucode_ops = {
>>>> - .cpu_request_microcode = cpu_request_microcode,
>>>> + .cpu_request_microcode = MICROCODE_OP(cpu_request_microcode),
>>>> .collect_cpu_info = collect_cpu_info,
>>>> - .apply_microcode = apply_microcode,
>>>> - .compare = intel_compare,
>>>> - .cpio_path = intel_cpio_path,
>>>> + .apply_microcode = MICROCODE_OP(apply_microcode),
>>>> + .compare = MICROCODE_OP(intel_compare),
>>>> + .cpio_path = MICROCODE_OP(intel_cpio_path),
>>>> };
>>> While I appreciate the intention with MICROCODE_OP(), I'm not really happy
>>> with function pointer members left in place just for them to be NULL
>>> everywhere. What if a call site remains unguarded? With PV guests that
>>> would be a privilege escalation XSA.
>> I see where you're coming from, but these are already NULL if microcode
>> loading is not exposed by the underlying hypervisor (if any), or is blocked by
>> hardware in Intel, so arguably that worry is orthogonal to this.
>
> Also because they're cf_clobber, the calls are turned into UDs. We
> won't follow a function pointer to 0.
Hmm, yes, the alternative patching will guarantee that. That hasn't got
anything to do with cf_clobber though, I don't think.
Jan
On 13/01/2026 12:21 pm, Alejandro Vallejo wrote:
> Amend docs to reflect ucode= command/stanza depend on MICROCODE_LOADING
> being set.
>
> The new MICROCODE_OP() is a conditional setter for the microcode_ops
> struct. By using IS_ENABLED() there ratehr than ifdef we allow DCE to
rather
> remove all statics no longer used when microcode loading is disabled
> without tagging them with __maybe_unused.
>
> Signed-off-by: Alejandro Vallejo <alejandro.garciavallejo@amd.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> with a couple
more minor points, all of which I can fix on commit.
AFAICT, this can be reordered with the earlycpuio patch 3 ?
> diff --git a/docs/misc/efi.pandoc b/docs/misc/efi.pandoc
> index 11c1ac3346..c3fb1f3a30 100644
> --- a/docs/misc/efi.pandoc
> +++ b/docs/misc/efi.pandoc
> @@ -104,6 +104,8 @@ Specifies an XSM module to load.
>
> Specifies a CPU microcode blob to load. (x86 only)
>
> +Only available when Xen is compiled with CONFIG_MICROCODE_LOADING.
enabled.
> +
> ###`dtb=<filename>`
>
> Specifies a device tree file to load. The platform firmware may provide a
> diff --git a/docs/misc/xen-command-line.pandoc b/docs/misc/xen-command-line.pandoc
> index 15f7a315a4..866fa2f951 100644
> --- a/docs/misc/xen-command-line.pandoc
> +++ b/docs/misc/xen-command-line.pandoc
> @@ -2748,7 +2748,7 @@ performance.
> ### ucode
> > `= List of [ <integer> | scan=<bool>, nmi=<bool>, digest-check=<bool> ]`
>
> - Applicability: x86
> + Applicability: x86 with CONFIG_MICROCODE_LOADING active
> Default: `scan` is selectable via Kconfig, `nmi,digest-check`
This isn't actually how we provide such information. It's usually a
sentence in the first paragraph.
diff --git a/docs/misc/xen-command-line.pandoc
b/docs/misc/xen-command-line.pandoc
index 50d7edb2488e..5b6ff94f441a 100644
--- a/docs/misc/xen-command-line.pandoc
+++ b/docs/misc/xen-command-line.pandoc
@@ -2751,9 +2751,10 @@ performance.
Applicability: x86
Default: `scan` is selectable via Kconfig, `nmi,digest-check`
-Controls for CPU microcode loading. For early loading, this parameter can
-specify how and where to find the microcode update blob. For late loading,
-this parameter specifies if the update happens within a NMI handler.
+Controls for CPU microcode loading, available when
`CONFIG_MICROCODE_LOADING`
+is enabled. For early loading, this parameter can specify how and where to
+find the microcode update blob. For late loading, this parameter
specifies if
+the update happens within a NMI handler.
'integer' specifies the CPU microcode update blob module index. When
positive,
this specifies the n-th module (in the GrUB entry, zero based) to be used
which is the re-wrapped version of inserting ', available when
`CONFIG_MICROCODE_LOADING` is enabled'.
> diff --git a/xen/arch/x86/cpu/microcode/amd.c b/xen/arch/x86/cpu/microcode/amd.c
> index 71b041c84b..86706a21a6 100644
> --- a/xen/arch/x86/cpu/microcode/amd.c
> +++ b/xen/arch/x86/cpu/microcode/amd.c
> @@ -561,11 +561,11 @@ static const char __initconst amd_cpio_path[] =
> "kernel/x86/microcode/AuthenticAMD.bin";
>
> static const struct microcode_ops __initconst_cf_clobber amd_ucode_ops = {
> - .cpu_request_microcode = cpu_request_microcode,
> + .cpu_request_microcode = MICROCODE_OP(cpu_request_microcode),
> .collect_cpu_info = collect_cpu_info,
> - .apply_microcode = apply_microcode,
> - .compare = amd_compare,
> - .cpio_path = amd_cpio_path,
> + .apply_microcode = MICROCODE_OP(apply_microcode),
> + .compare = MICROCODE_OP(amd_compare),
> + .cpio_path = MICROCODE_OP(amd_cpio_path),
> };
Reordering cpu_request_microcode and collect_cpu_info like before makes
this easier to read.
> diff --git a/xen/arch/x86/cpu/microcode/private.h b/xen/arch/x86/cpu/microcode/private.h
> index e6c965dc99..1167b79db1 100644
> --- a/xen/arch/x86/cpu/microcode/private.h
> +++ b/xen/arch/x86/cpu/microcode/private.h
> @@ -93,4 +93,6 @@ void ucode_probe_intel(struct microcode_ops *ops);
> static inline void ucode_probe_intel(struct microcode_ops *ops) {}
> #endif
>
> +#define MICROCODE_OP(x) (IS_ENABLED(CONFIG_MICROCODE_LOADING) ? (x) : NULL)
> +
This wants /* Aids dead-code elimination of the static hooks */
and wants to be up beside struct microcode_ops.
> diff --git a/xen/arch/x86/platform_hypercall.c b/xen/arch/x86/platform_hypercall.c
> index 79bb99e0b6..a55060e662 100644
> --- a/xen/arch/x86/platform_hypercall.c
> +++ b/xen/arch/x86/platform_hypercall.c
> @@ -309,22 +309,30 @@ ret_t do_platform_op(
>
> case XENPF_microcode_update:
> {
> - XEN_GUEST_HANDLE(const_void) data;
> + ret = -EOPNOTSUPP;
> + if ( IS_ENABLED(CONFIG_MICROCODE_LOADING) )
> + {
> + XEN_GUEST_HANDLE(const_void) data;
>
> - guest_from_compat_handle(data, op->u.microcode.data);
> + guest_from_compat_handle(data, op->u.microcode.data);
> + ret = ucode_update_hcall(data, op->u.microcode.length, 0);
> + }
>
> - ret = ucode_update_hcall(data, op->u.microcode.length, 0);
> break;
> }
>
> case XENPF_microcode_update2:
> {
> - XEN_GUEST_HANDLE(const_void) data;
> + ret = -EOPNOTSUPP;
> + if ( IS_ENABLED(CONFIG_MICROCODE_LOADING) )
> + {
> + XEN_GUEST_HANDLE(const_void) data;
>
> - guest_from_compat_handle(data, op->u.microcode2.data);
> + guest_from_compat_handle(data, op->u.microcode2.data);
> + ret = ucode_update_hcall(data, op->u.microcode2.length,
> + op->u.microcode2.flags);
> + }
>
> - ret = ucode_update_hcall(data, op->u.microcode2.length,
> - op->u.microcode2.flags);
> break;
> }
>
Both these hunks will be smaller as:
ret = -EOPNOTSUPP;
if ( !IS_ENABLED(CONFIG_MICROCODE_LOADING) )
break;
and that's the preferred style too.
Otherwise, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> and I
can fix all of these con commit.
AFAICT, this can be reordered with the earlycpio patch 3 ?
~Andrew
© 2016 - 2026 Red Hat, Inc.