MSR_ARCH_CAPS data is now included in featureset information. Replace
opencoded checks with regular feature ones.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: Wei Liu <wl@xen.org>
---
xen/arch/x86/include/asm/cpufeature.h | 7 ++++
xen/arch/x86/spec_ctrl.c | 56 +++++++++++++--------------
2 files changed, 33 insertions(+), 30 deletions(-)
diff --git a/xen/arch/x86/include/asm/cpufeature.h b/xen/arch/x86/include/asm/cpufeature.h
index deca5bfc2629..00a43123ac82 100644
--- a/xen/arch/x86/include/asm/cpufeature.h
+++ b/xen/arch/x86/include/asm/cpufeature.h
@@ -184,8 +184,15 @@ static inline bool boot_cpu_has(unsigned int feat)
#define cpu_has_avx_ne_convert boot_cpu_has(X86_FEATURE_AVX_NE_CONVERT)
/* MSR_ARCH_CAPS 10A */
+#define cpu_has_rdcl_no boot_cpu_has(X86_FEATURE_RDCL_NO)
+#define cpu_has_eibrs boot_cpu_has(X86_FEATURE_EIBRS)
+#define cpu_has_rsba boot_cpu_has(X86_FEATURE_RSBA)
+#define cpu_has_skip_l1dfl boot_cpu_has(X86_FEATURE_SKIP_L1DFL)
+#define cpu_has_mds_no boot_cpu_has(X86_FEATURE_MDS_NO)
#define cpu_has_if_pschange_mc_no boot_cpu_has(X86_FEATURE_IF_PSCHANGE_MC_NO)
#define cpu_has_tsx_ctrl boot_cpu_has(X86_FEATURE_TSX_CTRL)
+#define cpu_has_taa_no boot_cpu_has(X86_FEATURE_TAA_NO)
+#define cpu_has_fb_clear boot_cpu_has(X86_FEATURE_FB_CLEAR)
/* Synthesized. */
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
index f81db2143328..50d467f74cf8 100644
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -282,12 +282,10 @@ custom_param("spec-ctrl", parse_spec_ctrl);
int8_t __read_mostly opt_xpti_hwdom = -1;
int8_t __read_mostly opt_xpti_domu = -1;
-static __init void xpti_init_default(uint64_t caps)
+static __init void xpti_init_default(void)
{
- if ( boot_cpu_data.x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
- caps = ARCH_CAPS_RDCL_NO;
-
- if ( caps & ARCH_CAPS_RDCL_NO )
+ if ( (boot_cpu_data.x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) ||
+ cpu_has_rdcl_no )
{
if ( opt_xpti_hwdom < 0 )
opt_xpti_hwdom = 0;
@@ -390,9 +388,10 @@ static int __init cf_check parse_pv_l1tf(const char *s)
}
custom_param("pv-l1tf", parse_pv_l1tf);
-static void __init print_details(enum ind_thunk thunk, uint64_t caps)
+static void __init print_details(enum ind_thunk thunk)
{
unsigned int _7d0 = 0, _7d2 = 0, e8b = 0, max = 0, tmp;
+ uint64_t caps = 0;
/* Collect diagnostics about available mitigations. */
if ( boot_cpu_data.cpuid_level >= 7 )
@@ -401,6 +400,8 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
cpuid_count(7, 2, &tmp, &tmp, &tmp, &_7d2);
if ( boot_cpu_data.extended_cpuid_level >= 0x80000008 )
cpuid(0x80000008, &tmp, &e8b, &tmp, &tmp);
+ if ( cpu_has_arch_caps )
+ rdmsrl(MSR_ARCH_CAPABILITIES, caps);
printk("Speculative mitigation facilities:\n");
@@ -578,7 +579,7 @@ static bool __init check_smt_enabled(void)
}
/* Calculate whether Retpoline is known-safe on this CPU. */
-static bool __init retpoline_safe(uint64_t caps)
+static bool __init retpoline_safe(void)
{
unsigned int ucode_rev = this_cpu(cpu_sig).rev;
@@ -596,7 +597,7 @@ static bool __init retpoline_safe(uint64_t caps)
* Processors offering Enhanced IBRS are not guarenteed to be
* repoline-safe.
*/
- if ( caps & (ARCH_CAPS_RSBA | ARCH_CAPS_IBRS_ALL) )
+ if ( cpu_has_rsba || cpu_has_eibrs )
return false;
switch ( boot_cpu_data.x86_model )
@@ -845,7 +846,7 @@ static void __init ibpb_calculations(void)
}
/* Calculate whether this CPU is vulnerable to L1TF. */
-static __init void l1tf_calculations(uint64_t caps)
+static __init void l1tf_calculations(void)
{
bool hit_default = false;
@@ -933,7 +934,7 @@ static __init void l1tf_calculations(uint64_t caps)
}
/* Any processor advertising RDCL_NO should be not vulnerable to L1TF. */
- if ( caps & ARCH_CAPS_RDCL_NO )
+ if ( cpu_has_rdcl_no )
cpu_has_bug_l1tf = false;
if ( cpu_has_bug_l1tf && hit_default )
@@ -992,7 +993,7 @@ static __init void l1tf_calculations(uint64_t caps)
}
/* Calculate whether this CPU is vulnerable to MDS. */
-static __init void mds_calculations(uint64_t caps)
+static __init void mds_calculations(void)
{
/* MDS is only known to affect Intel Family 6 processors at this time. */
if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
@@ -1000,7 +1001,7 @@ static __init void mds_calculations(uint64_t caps)
return;
/* Any processor advertising MDS_NO should be not vulnerable to MDS. */
- if ( caps & ARCH_CAPS_MDS_NO )
+ if ( cpu_has_mds_no )
return;
switch ( boot_cpu_data.x86_model )
@@ -1113,10 +1114,6 @@ void __init init_speculation_mitigations(void)
enum ind_thunk thunk = THUNK_DEFAULT;
bool has_spec_ctrl, ibrs = false, hw_smt_enabled;
bool cpu_has_bug_taa;
- uint64_t caps = 0;
-
- if ( cpu_has_arch_caps )
- rdmsrl(MSR_ARCH_CAPABILITIES, caps);
hw_smt_enabled = check_smt_enabled();
@@ -1163,7 +1160,7 @@ void __init init_speculation_mitigations(void)
* On all hardware, we'd like to use retpoline in preference to
* IBRS, but only if it is safe on this hardware.
*/
- if ( retpoline_safe(caps) )
+ if ( retpoline_safe() )
thunk = THUNK_RETPOLINE;
else if ( has_spec_ctrl )
ibrs = true;
@@ -1392,13 +1389,13 @@ void __init init_speculation_mitigations(void)
* threads. Activate this if SMT is enabled, and Xen is using a non-zero
* MSR_SPEC_CTRL setting.
*/
- if ( boot_cpu_has(X86_FEATURE_IBRSB) && !(caps & ARCH_CAPS_IBRS_ALL) &&
+ if ( boot_cpu_has(X86_FEATURE_IBRSB) && !cpu_has_eibrs &&
hw_smt_enabled && default_xen_spec_ctrl )
setup_force_cpu_cap(X86_FEATURE_SC_MSR_IDLE);
- xpti_init_default(caps);
+ xpti_init_default();
- l1tf_calculations(caps);
+ l1tf_calculations();
/*
* By default, enable PV domU L1TF mitigations on all L1TF-vulnerable
@@ -1419,7 +1416,7 @@ void __init init_speculation_mitigations(void)
if ( !boot_cpu_has(X86_FEATURE_L1D_FLUSH) )
opt_l1d_flush = 0;
else if ( opt_l1d_flush == -1 )
- opt_l1d_flush = cpu_has_bug_l1tf && !(caps & ARCH_CAPS_SKIP_L1DFL);
+ opt_l1d_flush = cpu_has_bug_l1tf && !cpu_has_skip_l1dfl;
/* We compile lfence's in by default, and nop them out if requested. */
if ( !opt_branch_harden )
@@ -1442,7 +1439,7 @@ void __init init_speculation_mitigations(void)
"enabled. Please assess your configuration and choose an\n"
"explicit 'smt=<bool>' setting. See XSA-273.\n");
- mds_calculations(caps);
+ mds_calculations();
/*
* Parts which enumerate FB_CLEAR are those which are post-MDS_NO and have
@@ -1454,7 +1451,7 @@ void __init init_speculation_mitigations(void)
* the return-to-guest path.
*/
if ( opt_unpriv_mmio )
- opt_fb_clear_mmio = caps & ARCH_CAPS_FB_CLEAR;
+ opt_fb_clear_mmio = cpu_has_fb_clear;
/*
* By default, enable PV and HVM mitigations on MDS-vulnerable hardware.
@@ -1484,7 +1481,7 @@ void __init init_speculation_mitigations(void)
*/
if ( opt_md_clear_pv || opt_md_clear_hvm || opt_fb_clear_mmio )
setup_force_cpu_cap(X86_FEATURE_SC_VERW_IDLE);
- opt_md_clear_hvm &= !(caps & ARCH_CAPS_SKIP_L1DFL) && !opt_l1d_flush;
+ opt_md_clear_hvm &= !cpu_has_skip_l1dfl && !opt_l1d_flush;
/*
* Warn the user if they are on MLPDS/MFBDS-vulnerable hardware with HT
@@ -1515,8 +1512,7 @@ void __init init_speculation_mitigations(void)
* we check both to spot TSX in a microcode/cmdline independent way.
*/
cpu_has_bug_taa =
- (cpu_has_rtm || (caps & ARCH_CAPS_TSX_CTRL)) &&
- (caps & (ARCH_CAPS_MDS_NO | ARCH_CAPS_TAA_NO)) == ARCH_CAPS_MDS_NO;
+ (cpu_has_rtm || cpu_has_tsx_ctrl) && cpu_has_mds_no && !cpu_has_taa_no;
/*
* On TAA-affected hardware, disabling TSX is the preferred mitigation, vs
@@ -1535,7 +1531,7 @@ void __init init_speculation_mitigations(void)
* plausibly value TSX higher than Hyperthreading...), disable TSX to
* mitigate TAA.
*/
- if ( opt_tsx == -1 && cpu_has_bug_taa && (caps & ARCH_CAPS_TSX_CTRL) &&
+ if ( opt_tsx == -1 && cpu_has_bug_taa && cpu_has_tsx_ctrl &&
((hw_smt_enabled && opt_smt) ||
!boot_cpu_has(X86_FEATURE_SC_VERW_IDLE)) )
{
@@ -1560,15 +1556,15 @@ void __init init_speculation_mitigations(void)
if ( cpu_has_srbds_ctrl )
{
if ( opt_srb_lock == -1 && !opt_unpriv_mmio &&
- (caps & (ARCH_CAPS_MDS_NO|ARCH_CAPS_TAA_NO)) == ARCH_CAPS_MDS_NO &&
- (!cpu_has_hle || ((caps & ARCH_CAPS_TSX_CTRL) && rtm_disabled)) )
+ cpu_has_mds_no && !cpu_has_taa_no &&
+ (!cpu_has_hle || (cpu_has_tsx_ctrl && rtm_disabled)) )
opt_srb_lock = 0;
set_in_mcu_opt_ctrl(MCU_OPT_CTRL_RNGDS_MITG_DIS,
opt_srb_lock ? 0 : MCU_OPT_CTRL_RNGDS_MITG_DIS);
}
- print_details(thunk, caps);
+ print_details(thunk);
/*
* If MSR_SPEC_CTRL is available, apply Xen's default setting and discard
--
2.30.2
On 16.05.2023 16:53, Andrew Cooper wrote: > @@ -401,6 +400,8 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps) > cpuid_count(7, 2, &tmp, &tmp, &tmp, &_7d2); > if ( boot_cpu_data.extended_cpuid_level >= 0x80000008 ) > cpuid(0x80000008, &tmp, &e8b, &tmp, &tmp); > + if ( cpu_has_arch_caps ) > + rdmsrl(MSR_ARCH_CAPABILITIES, caps); Why do you read the MSR again? I would have expected this to come out of raw_cpu_policy now (and incrementally the CPUID pieces as well, later on). Apart from this, with all the uses further down gone, perhaps there's not even a need for the raw value, if you used the bitfields in the printk(). Which in turn raises the question whether the #define-s in msr-index.h are of much use then anymore. Jan
On 17/05/2023 3:47 pm, Jan Beulich wrote: > On 16.05.2023 16:53, Andrew Cooper wrote: >> @@ -401,6 +400,8 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps) >> cpuid_count(7, 2, &tmp, &tmp, &tmp, &_7d2); >> if ( boot_cpu_data.extended_cpuid_level >= 0x80000008 ) >> cpuid(0x80000008, &tmp, &e8b, &tmp, &tmp); >> + if ( cpu_has_arch_caps ) >> + rdmsrl(MSR_ARCH_CAPABILITIES, caps); > Why do you read the MSR again? I would have expected this to come out > of raw_cpu_policy now (and incrementally the CPUID pieces as well, > later on). Consistency with the surrounding logic. Also because the raw and host policies don't get sorted until much later in boot. > Apart from this, with all the uses further down gone, perhaps there's > not even a need for the raw value, if you used the bitfields in the > printk(). Which in turn raises the question whether the #define-s in > msr-index.h are of much use then anymore. One of the next phases of work is synthesizing these in the host policy for CPUs which didn't receive microcode updates (for whatever reason). There is a valid discussion for whether we ought to render the raw or host info here (currently we do raw), but I'm not adjusting that in this patch. ~Andrew
On 17.05.2023 18:35, Andrew Cooper wrote:
> On 17/05/2023 3:47 pm, Jan Beulich wrote:
>> On 16.05.2023 16:53, Andrew Cooper wrote:
>>> @@ -401,6 +400,8 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
>>> cpuid_count(7, 2, &tmp, &tmp, &tmp, &_7d2);
>>> if ( boot_cpu_data.extended_cpuid_level >= 0x80000008 )
>>> cpuid(0x80000008, &tmp, &e8b, &tmp, &tmp);
>>> + if ( cpu_has_arch_caps )
>>> + rdmsrl(MSR_ARCH_CAPABILITIES, caps);
>> Why do you read the MSR again? I would have expected this to come out
>> of raw_cpu_policy now (and incrementally the CPUID pieces as well,
>> later on).
>
> Consistency with the surrounding logic.
I view this as relevant only when the code invoking CPUID directly is
intended to stay.
> Also because the raw and host policies don't get sorted until much later
> in boot.
identify_cpu(), which invokes init_host_cpu_policies(), is called
ahead of init_speculation_mitigations(), isn't it?
>> Apart from this, with all the uses further down gone, perhaps there's
>> not even a need for the raw value, if you used the bitfields in the
>> printk(). Which in turn raises the question whether the #define-s in
>> msr-index.h are of much use then anymore.
>
> One of the next phases of work is synthesizing these in the host policy
> for CPUs which didn't receive microcode updates (for whatever reason).
>
> There is a valid discussion for whether we ought to render the raw or
> host info here (currently we do raw), but I'm not adjusting that in this
> patch.
In the end I think both have their merits to log. So far it was my
assumption that "Hardware {hints,features}:" was intended to cover
raw, while "Xen settings:" was meaning to be close to "host" (but of
course there's quite a bit of a delta).
Jan
On 19/05/2023 7:00 am, Jan Beulich wrote: > On 17.05.2023 18:35, Andrew Cooper wrote: >> On 17/05/2023 3:47 pm, Jan Beulich wrote: >>> On 16.05.2023 16:53, Andrew Cooper wrote: >>>> @@ -401,6 +400,8 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps) >>>> cpuid_count(7, 2, &tmp, &tmp, &tmp, &_7d2); >>>> if ( boot_cpu_data.extended_cpuid_level >= 0x80000008 ) >>>> cpuid(0x80000008, &tmp, &e8b, &tmp, &tmp); >>>> + if ( cpu_has_arch_caps ) >>>> + rdmsrl(MSR_ARCH_CAPABILITIES, caps); >>> Why do you read the MSR again? I would have expected this to come out >>> of raw_cpu_policy now (and incrementally the CPUID pieces as well, >>> later on). >> Consistency with the surrounding logic. > I view this as relevant only when the code invoking CPUID directly is > intended to stay. Quite the contrary. It stays because this patch, and changing the semantics of the print block are unrelated things and should not be mixed together. >> Also because the raw and host policies don't get sorted until much later >> in boot. > identify_cpu(), which invokes init_host_cpu_policies(), is called > ahead of init_speculation_mitigations(), isn't it? What is init_host_cpu_policies() ? I have a plan for what it's going to be if prior MSR work hadn't ground to a halt, but it's a bit too late for that now. (To answer the question properly, no the policies aren't set up until just before building dom0, and that's not something that is trivial to change.) ~Andrew
On 19.05.2023 16:38, Andrew Cooper wrote: > On 19/05/2023 7:00 am, Jan Beulich wrote: >> On 17.05.2023 18:35, Andrew Cooper wrote: >>> On 17/05/2023 3:47 pm, Jan Beulich wrote: >>>> On 16.05.2023 16:53, Andrew Cooper wrote: >>>>> @@ -401,6 +400,8 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps) >>>>> cpuid_count(7, 2, &tmp, &tmp, &tmp, &_7d2); >>>>> if ( boot_cpu_data.extended_cpuid_level >= 0x80000008 ) >>>>> cpuid(0x80000008, &tmp, &e8b, &tmp, &tmp); >>>>> + if ( cpu_has_arch_caps ) >>>>> + rdmsrl(MSR_ARCH_CAPABILITIES, caps); >>>> Why do you read the MSR again? I would have expected this to come out >>>> of raw_cpu_policy now (and incrementally the CPUID pieces as well, >>>> later on). >>> Consistency with the surrounding logic. >> I view this as relevant only when the code invoking CPUID directly is >> intended to stay. > > Quite the contrary. It stays because this patch, and changing the > semantics of the print block are unrelated things and should not be > mixed together. Hmm. On one hand I can see your point, yet otoh we move things in a longer term intended direction in other cases where we need to touch code anyway. While I'm not going to refuse to ack this change just because of this, I don't fell like you've answered the original question. In particular I don't see how taking the value from a memory location we've already cached it in is changing any semantics here. While some masking may apply even to the raw policy (to zap unknown bits), this should be meaningless here. No bit used here should be unmentioned in the policy. >>> Also because the raw and host policies don't get sorted until much later >>> in boot. >> identify_cpu(), which invokes init_host_cpu_policies(), is called >> ahead of init_speculation_mitigations(), isn't it? > > What is init_host_cpu_policies() ? Oops. I did use my own tree as reference during review. See the long pending "x86/xstate: drop xstate_offsets[] and xstate_sizes[]" [1]. Maybe you simply didn't tell me yet ... > I have a plan for what it's going to be if prior MSR work hadn't ground > to a halt, but it's a bit too late for that now. > > (To answer the question properly, no the policies aren't set up until > just before building dom0, and that's not something that is trivial to > change.) ... that what I'm doing there is too simplistic? Jan [1] https://lists.xen.org/archives/html/xen-devel/2021-04/msg01335.html
On 22/05/2023 8:10 am, Jan Beulich wrote: > On 19.05.2023 16:38, Andrew Cooper wrote: >> On 19/05/2023 7:00 am, Jan Beulich wrote: >>> On 17.05.2023 18:35, Andrew Cooper wrote: >>>> On 17/05/2023 3:47 pm, Jan Beulich wrote: >>>>> On 16.05.2023 16:53, Andrew Cooper wrote: >>>>>> @@ -401,6 +400,8 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps) >>>>>> cpuid_count(7, 2, &tmp, &tmp, &tmp, &_7d2); >>>>>> if ( boot_cpu_data.extended_cpuid_level >= 0x80000008 ) >>>>>> cpuid(0x80000008, &tmp, &e8b, &tmp, &tmp); >>>>>> + if ( cpu_has_arch_caps ) >>>>>> + rdmsrl(MSR_ARCH_CAPABILITIES, caps); >>>>> Why do you read the MSR again? I would have expected this to come out >>>>> of raw_cpu_policy now (and incrementally the CPUID pieces as well, >>>>> later on). >>>> Consistency with the surrounding logic. >>> I view this as relevant only when the code invoking CPUID directly is >>> intended to stay. >> Quite the contrary. It stays because this patch, and changing the >> semantics of the print block are unrelated things and should not be >> mixed together. > Hmm. On one hand I can see your point, yet otoh we move things in a longer > term intended direction in other cases where we need to touch code anyway. > While I'm not going to refuse to ack this change just because of this, I > don't fell like you've answered the original question. In particular I > don't see how taking the value from a memory location we've already cached > it in is changing any semantics here. While some masking may apply even to > the raw policy (to zap unknown bits), this should be meaningless here. No > bit used here should be unmentioned in the policy. The very next thing I'm going to need to do is start synthesizing arch caps bits for the hardware with known properties but without appropriate enumerations. This is necessary to make migration work. Because we have not taken a decision about the what printed block means, it needs to not change when I start using setup_force_cpu_cap(). >>>> Also because the raw and host policies don't get sorted until much later >>>> in boot. >>> identify_cpu(), which invokes init_host_cpu_policies(), is called >>> ahead of init_speculation_mitigations(), isn't it? >> What is init_host_cpu_policies() ? > Oops. I did use my own tree as reference during review. See the long > pending "x86/xstate: drop xstate_offsets[] and xstate_sizes[]" [1]. Maybe > you simply didn't tell me yet ... > >> I have a plan for what it's going to be if prior MSR work hadn't ground >> to a halt, but it's a bit too late for that now. >> >> (To answer the question properly, no the policies aren't set up until >> just before building dom0, and that's not something that is trivial to >> change.) > ... that what I'm doing there is too simplistic? Raw is fine. I found complexities with Host when doing that. ~Andrew
On 22.05.2023 16:14, Andrew Cooper wrote: > On 22/05/2023 8:10 am, Jan Beulich wrote: >> On 19.05.2023 16:38, Andrew Cooper wrote: >>> On 19/05/2023 7:00 am, Jan Beulich wrote: >>>> On 17.05.2023 18:35, Andrew Cooper wrote: >>>>> On 17/05/2023 3:47 pm, Jan Beulich wrote: >>>>>> On 16.05.2023 16:53, Andrew Cooper wrote: >>>>>>> @@ -401,6 +400,8 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps) >>>>>>> cpuid_count(7, 2, &tmp, &tmp, &tmp, &_7d2); >>>>>>> if ( boot_cpu_data.extended_cpuid_level >= 0x80000008 ) >>>>>>> cpuid(0x80000008, &tmp, &e8b, &tmp, &tmp); >>>>>>> + if ( cpu_has_arch_caps ) >>>>>>> + rdmsrl(MSR_ARCH_CAPABILITIES, caps); >>>>>> Why do you read the MSR again? I would have expected this to come out >>>>>> of raw_cpu_policy now (and incrementally the CPUID pieces as well, >>>>>> later on). >>>>> Consistency with the surrounding logic. >>>> I view this as relevant only when the code invoking CPUID directly is >>>> intended to stay. >>> Quite the contrary. It stays because this patch, and changing the >>> semantics of the print block are unrelated things and should not be >>> mixed together. >> Hmm. On one hand I can see your point, yet otoh we move things in a longer >> term intended direction in other cases where we need to touch code anyway. >> While I'm not going to refuse to ack this change just because of this, I >> don't fell like you've answered the original question. In particular I >> don't see how taking the value from a memory location we've already cached >> it in is changing any semantics here. While some masking may apply even to >> the raw policy (to zap unknown bits), this should be meaningless here. No >> bit used here should be unmentioned in the policy. > > The very next thing I'm going to need to do is start synthesizing arch > caps bits for the hardware with known properties but without appropriate > enumerations. This is necessary to make migration work. But you wouldn't alter the raw featureset, would you? As much as ... > Because we have not taken a decision about the what printed block means, > it needs to not change when I start using setup_force_cpu_cap(). ... setup_force_cpu_cap() doesn't affect raw. Jan
© 2016 - 2026 Red Hat, Inc.