[PATCH for-4.21??? 3/3] x86/vLAPIC: properly support the CMCI LVT

Jan Beulich posted 3 patches 3 weeks, 1 day ago
There is a newer version of this series
[PATCH for-4.21??? 3/3] x86/vLAPIC: properly support the CMCI LVT
Posted by Jan Beulich 3 weeks, 1 day ago
Rather than unconditionally accepting reads and writes while discarding
the value written, make accesses properly conditional upon CMCI being
exposed via MCG_CAP, and arrange to actually retain the value written.
Also reflect the extra LVT in LVR.

Note that this doesn't change the status quo of us never delivering any
interrupt through this LVT.

Fixes: 70173dbb9948 ("x86/HVM: fix miscellaneous aspects of x2APIC emulation")
Fixes: 8d0a20587e4e ("x86/hvm: further restrict access to x2apic MSRs")
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
The Fixes: tags are referencing where the explicit mentioning of APIC_CMCI
in what are now guest_{rd,wr}msr_x2apic() was introduced; the mis-handling
really pre-dates that, though.

In principle the later assignment to "nr" in vlapic_do_init() could now be
dropped again. I wasn't quite sure though whether that's a good idea.

--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -31,10 +31,13 @@
 #include <public/hvm/ioreq.h>
 #include <public/hvm/params.h>
 
-#define LVT_BIAS(reg)                   (((reg) - APIC_LVTT) >> 4)
+#include <../cpu/mcheck/x86_mca.h> /* MCG_CMCI_P */
+
+#define LVT_BIAS(reg)                   (((reg) - APIC_CMCI) >> 4)
 
 #define LVTS \
-    LVT(LVTT), LVT(LVTTHMR), LVT(LVTPC), LVT(LVT0), LVT(LVT1), LVT(LVTERR),
+    LVT(LVTT), LVT(LVTTHMR), LVT(LVTPC), LVT(LVT0), LVT(LVT1), LVT(LVTERR), \
+    LVT(CMCI),
 
 static const unsigned int lvt_reg[] = {
 #define LVT(which) APIC_ ## which
@@ -57,6 +60,7 @@ static const unsigned int lvt_valid[] =
 #define LVT0_VALID    LINT_MASK
 #define LVT1_VALID    LINT_MASK
 #define LVTERR_VALID  LVT_MASK
+#define CMCI_VALID    (LVT_MASK | APIC_DM_MASK)
 #define LVT(which)    [LVT_BIAS(APIC_ ## which)] = which ## _VALID
     LVTS
 #undef LVT
@@ -697,8 +701,17 @@ int guest_rdmsr_x2apic(const struct vcpu
         return X86EMUL_EXCEPTION;
 
     offset = reg << 4;
-    if ( offset == APIC_ICR )
+    switch ( offset )
+    {
+    case APIC_ICR:
         high = (uint64_t)vlapic_read_aligned(vlapic, APIC_ICR2) << 32;
+        break;
+
+    case APIC_CMCI:
+        if ( !(v->arch.vmce.mcg_cap & MCG_CMCI_P) )
+            return X86EMUL_EXCEPTION;
+        break;
+    }
 
     *val = high | vlapic_read_aligned(vlapic, offset);
 
@@ -868,6 +881,10 @@ void vlapic_reg_write(struct vcpu *v, un
         vlapic_set_reg(vlapic, APIC_ICR2, val & 0xff000000U);
         break;
 
+    case APIC_CMCI:         /* LVT CMCI */
+        if ( !(v->arch.vmce.mcg_cap & MCG_CMCI_P) )
+            break;
+        fallthrough;
     case APIC_LVTT:         /* LVT Timer Reg */
         if ( vlapic_lvtt_tdt(vlapic) !=
              ((val & APIC_TIMER_MODE_MASK) == APIC_TIMER_MODE_TSC_DEADLINE) )
@@ -1024,9 +1041,12 @@ int guest_wrmsr_x2apic(struct vcpu *v, u
             return X86EMUL_EXCEPTION;
         break;
 
+    case APIC_CMCI:
+        if ( !(v->arch.vmce.mcg_cap & MCG_CMCI_P) )
+            return X86EMUL_EXCEPTION;
+        fallthrough;
     case APIC_LVTTHMR:
     case APIC_LVTPC:
-    case APIC_CMCI:
         if ( val & ~(LVT_MASK | APIC_DM_MASK) )
             return X86EMUL_EXCEPTION;
         break;
@@ -1438,7 +1458,9 @@ static void vlapic_do_init(struct vlapic
     if ( !has_vlapic(vlapic_vcpu(vlapic)->domain) )
         return;
 
-    vlapic_set_reg(vlapic, APIC_LVR, 0x00050014);
+    nr = 6 + !!(vlapic_vcpu(vlapic)->arch.vmce.mcg_cap & MCG_CMCI_P);
+    vlapic_set_reg(vlapic, APIC_LVR,
+                   0x00000014 | MASK_INSR(nr - 1, APIC_LVR_MAXLVT_MASK));
 
     for ( i = 0; i < 8; i++ )
     {
--- a/xen/arch/x86/include/asm/apicdef.h
+++ b/xen/arch/x86/include/asm/apicdef.h
@@ -15,7 +15,10 @@
 #define			GET_xAPIC_ID(x)		(((x)>>24)&0xFFu)
 #define			SET_xAPIC_ID(x)		(((x)<<24))
 #define		APIC_LVR	0x30
-#define			APIC_LVR_MASK		0xFF00FF
+#define			APIC_LVR_VERSION_MASK	0xff
+#define			APIC_LVR_MAXLVT_MASK	0xff0000
+#define			APIC_LVR_MASK		(APIC_LVR_VERSION_MASK | \
+						 APIC_LVR_MAXLVT_MASK)
 #define			APIC_LVR_DIRECTED_EOI	(1 << 24)
 #define			GET_APIC_VERSION(x)	((x)&0xFF)
 #define			GET_APIC_MAXLVT(x)	(((x)>>16)&0xFF)
Re: [PATCH for-4.21??? 3/3] x86/vLAPIC: properly support the CMCI LVT
Posted by Andrew Cooper 2 weeks, 1 day ago
On 08/10/2025 1:09 pm, Jan Beulich wrote:
> --- a/xen/arch/x86/hvm/vlapic.c
> +++ b/xen/arch/x86/hvm/vlapic.c
> @@ -697,8 +701,17 @@ int guest_rdmsr_x2apic(const struct vcpu
>          return X86EMUL_EXCEPTION;
>  
>      offset = reg << 4;
> -    if ( offset == APIC_ICR )
> +    switch ( offset )
> +    {
> +    case APIC_ICR:
>          high = (uint64_t)vlapic_read_aligned(vlapic, APIC_ICR2) << 32;
> +        break;
> +
> +    case APIC_CMCI:
> +        if ( !(v->arch.vmce.mcg_cap & MCG_CMCI_P) )
> +            return X86EMUL_EXCEPTION;
> +        break;
> +    }
>  
>      *val = high | vlapic_read_aligned(vlapic, offset);
>  
> @@ -868,6 +881,10 @@ void vlapic_reg_write(struct vcpu *v, un
>          vlapic_set_reg(vlapic, APIC_ICR2, val & 0xff000000U);
>          break;
>  
> +    case APIC_CMCI:         /* LVT CMCI */
> +        if ( !(v->arch.vmce.mcg_cap & MCG_CMCI_P) )
> +            break;
> +        fallthrough;
>      case APIC_LVTT:         /* LVT Timer Reg */
>          if ( vlapic_lvtt_tdt(vlapic) !=
>               ((val & APIC_TIMER_MODE_MASK) == APIC_TIMER_MODE_TSC_DEADLINE) )
> @@ -1024,9 +1041,12 @@ int guest_wrmsr_x2apic(struct vcpu *v, u
>              return X86EMUL_EXCEPTION;
>          break;
>  
> +    case APIC_CMCI:
> +        if ( !(v->arch.vmce.mcg_cap & MCG_CMCI_P) )
> +            return X86EMUL_EXCEPTION;
> +        fallthrough;
>      case APIC_LVTTHMR:
>      case APIC_LVTPC:
> -    case APIC_CMCI:
>          if ( val & ~(LVT_MASK | APIC_DM_MASK) )
>              return X86EMUL_EXCEPTION;
>          break;

This is almost certainly not how real hardware behaves.

The APIC is a discrete block of logic, whether it's integrated into the
core or not.  A new LVT is "just" another interrupt source, and if
nothing is wired into that pin, then it's just a register which never
produces an interrupt.

Accessibility of LVT_CMCI will depend on MAXLVT and nothing else.  In
silicon, I'm pretty sure it will be hardcoded as fully absent or
present, because I can't see any reason to make this configurable.

At this point, things get more complicated.

On Intel, there's no such thing as x2APIC capable (irrespective of
x2APIC enabled) without LVT_CMCI which is why there are no additional
access constraints on the register.

On AMD, there's no LVT_CMCI even on systems which support x2APIC. 
Instead, ELVTs are used and it is MCE-configuration based which ELVT the
interrupt is delivered through.

Choosing a default MAXLVT based on MCG_CMCI_P is probably fine (although
it certainly is ugly to tie APIC and vMCE together), but controls on the
access to APIC_CMCI should be based on MAXLVT.

~Andrew

Re: [PATCH for-4.21??? 3/3] x86/vLAPIC: properly support the CMCI LVT
Posted by Jan Beulich 2 weeks, 1 day ago
On 14.10.2025 21:36, Andrew Cooper wrote:
> On 08/10/2025 1:09 pm, Jan Beulich wrote:
>> --- a/xen/arch/x86/hvm/vlapic.c
>> +++ b/xen/arch/x86/hvm/vlapic.c
>> @@ -697,8 +701,17 @@ int guest_rdmsr_x2apic(const struct vcpu
>>          return X86EMUL_EXCEPTION;
>>  
>>      offset = reg << 4;
>> -    if ( offset == APIC_ICR )
>> +    switch ( offset )
>> +    {
>> +    case APIC_ICR:
>>          high = (uint64_t)vlapic_read_aligned(vlapic, APIC_ICR2) << 32;
>> +        break;
>> +
>> +    case APIC_CMCI:
>> +        if ( !(v->arch.vmce.mcg_cap & MCG_CMCI_P) )
>> +            return X86EMUL_EXCEPTION;
>> +        break;
>> +    }
>>  
>>      *val = high | vlapic_read_aligned(vlapic, offset);
>>  
>> @@ -868,6 +881,10 @@ void vlapic_reg_write(struct vcpu *v, un
>>          vlapic_set_reg(vlapic, APIC_ICR2, val & 0xff000000U);
>>          break;
>>  
>> +    case APIC_CMCI:         /* LVT CMCI */
>> +        if ( !(v->arch.vmce.mcg_cap & MCG_CMCI_P) )
>> +            break;
>> +        fallthrough;
>>      case APIC_LVTT:         /* LVT Timer Reg */
>>          if ( vlapic_lvtt_tdt(vlapic) !=
>>               ((val & APIC_TIMER_MODE_MASK) == APIC_TIMER_MODE_TSC_DEADLINE) )
>> @@ -1024,9 +1041,12 @@ int guest_wrmsr_x2apic(struct vcpu *v, u
>>              return X86EMUL_EXCEPTION;
>>          break;
>>  
>> +    case APIC_CMCI:
>> +        if ( !(v->arch.vmce.mcg_cap & MCG_CMCI_P) )
>> +            return X86EMUL_EXCEPTION;
>> +        fallthrough;
>>      case APIC_LVTTHMR:
>>      case APIC_LVTPC:
>> -    case APIC_CMCI:
>>          if ( val & ~(LVT_MASK | APIC_DM_MASK) )
>>              return X86EMUL_EXCEPTION;
>>          break;
> 
> This is almost certainly not how real hardware behaves.
> 
> The APIC is a discrete block of logic, whether it's integrated into the
> core or not.  A new LVT is "just" another interrupt source, and if
> nothing is wired into that pin, then it's just a register which never
> produces an interrupt.
> 
> Accessibility of LVT_CMCI will depend on MAXLVT and nothing else.  In
> silicon, I'm pretty sure it will be hardcoded as fully absent or
> present, because I can't see any reason to make this configurable.
> 
> At this point, things get more complicated.
> 
> On Intel, there's no such thing as x2APIC capable (irrespective of
> x2APIC enabled) without LVT_CMCI which is why there are no additional
> access constraints on the register.
> 
> On AMD, there's no LVT_CMCI even on systems which support x2APIC. 
> Instead, ELVTs are used and it is MCE-configuration based which ELVT the
> interrupt is delivered through.
> 
> Choosing a default MAXLVT based on MCG_CMCI_P is probably fine (although
> it certainly is ugly to tie APIC and vMCE together), but controls on the
> access to APIC_CMCI should be based on MAXLVT.

As you ask for it, I can certainly do so. I didn't because the way it's
done now the checks are cheaper overall.

Jan

Re: [PATCH for-4.21??? 3/3] x86/vLAPIC: properly support the CMCI LVT
Posted by Grygorii Strashko 3 weeks ago
Hi Jan

On 08.10.25 15:09, Jan Beulich wrote:
> Rather than unconditionally accepting reads and writes while discarding
> the value written, make accesses properly conditional upon CMCI being
> exposed via MCG_CAP, and arrange to actually retain the value written.
> Also reflect the extra LVT in LVR.
> 
> Note that this doesn't change the status quo of us never delivering any
> interrupt through this LVT.
> 
> Fixes: 70173dbb9948 ("x86/HVM: fix miscellaneous aspects of x2APIC emulation")
> Fixes: 8d0a20587e4e ("x86/hvm: further restrict access to x2apic MSRs")
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> The Fixes: tags are referencing where the explicit mentioning of APIC_CMCI
> in what are now guest_{rd,wr}msr_x2apic() was introduced; the mis-handling
> really pre-dates that, though.
> 
> In principle the later assignment to "nr" in vlapic_do_init() could now be
> dropped again. I wasn't quite sure though whether that's a good idea.
> 
> --- a/xen/arch/x86/hvm/vlapic.c
> +++ b/xen/arch/x86/hvm/vlapic.c
> @@ -31,10 +31,13 @@
>   #include <public/hvm/ioreq.h>
>   #include <public/hvm/params.h>
>   
> -#define LVT_BIAS(reg)                   (((reg) - APIC_LVTT) >> 4)
> +#include <../cpu/mcheck/x86_mca.h> /* MCG_CMCI_P */

This include... You probably do not like it also
It is dependency outside HVM code.

I've been thinking about something like vlapic->caps which can be filed before vlapic_init()
or passed as parameter, but seems x86 toolstack is considered to be able overwrite anything,
including v->arch.vmce.

Seems, no better options here.

> +
> +#define LVT_BIAS(reg)                   (((reg) - APIC_CMCI) >> 4)
>   
>   #define LVTS \
> -    LVT(LVTT), LVT(LVTTHMR), LVT(LVTPC), LVT(LVT0), LVT(LVT1), LVT(LVTERR),
> +    LVT(LVTT), LVT(LVTTHMR), LVT(LVTPC), LVT(LVT0), LVT(LVT1), LVT(LVTERR), \
> +    LVT(CMCI),
>   
>   static const unsigned int lvt_reg[] = {
>   #define LVT(which) APIC_ ## which
> @@ -57,6 +60,7 @@ static const unsigned int lvt_valid[] =
>   #define LVT0_VALID    LINT_MASK
>   #define LVT1_VALID    LINT_MASK
>   #define LVTERR_VALID  LVT_MASK
> +#define CMCI_VALID    (LVT_MASK | APIC_DM_MASK)
>   #define LVT(which)    [LVT_BIAS(APIC_ ## which)] = which ## _VALID
>       LVTS
>   #undef LVT
> @@ -697,8 +701,17 @@ int guest_rdmsr_x2apic(const struct vcpu
>           return X86EMUL_EXCEPTION;
>   
>       offset = reg << 4;
> -    if ( offset == APIC_ICR )
> +    switch ( offset )
> +    {
> +    case APIC_ICR:
>           high = (uint64_t)vlapic_read_aligned(vlapic, APIC_ICR2) << 32;
> +        break;
> +
> +    case APIC_CMCI:
> +        if ( !(v->arch.vmce.mcg_cap & MCG_CMCI_P) )

Could it be done using wrapper, like vmce_has_cmci()?
As this is Intel specific it's candidate to be opt-out eventually.

> +            return X86EMUL_EXCEPTION;
> +        break;
> +    }
>   
>       *val = high | vlapic_read_aligned(vlapic, offset);
>   
> @@ -868,6 +881,10 @@ void vlapic_reg_write(struct vcpu *v, un
>           vlapic_set_reg(vlapic, APIC_ICR2, val & 0xff000000U);
>           break;
>   
> +    case APIC_CMCI:         /* LVT CMCI */
> +        if ( !(v->arch.vmce.mcg_cap & MCG_CMCI_P) )
> +            break;
> +        fallthrough;
>       case APIC_LVTT:         /* LVT Timer Reg */
>           if ( vlapic_lvtt_tdt(vlapic) !=
>                ((val & APIC_TIMER_MODE_MASK) == APIC_TIMER_MODE_TSC_DEADLINE) )
> @@ -1024,9 +1041,12 @@ int guest_wrmsr_x2apic(struct vcpu *v, u
>               return X86EMUL_EXCEPTION;
>           break;
>   
> +    case APIC_CMCI:
> +        if ( !(v->arch.vmce.mcg_cap & MCG_CMCI_P) )
> +            return X86EMUL_EXCEPTION;
> +        fallthrough;
>       case APIC_LVTTHMR:
>       case APIC_LVTPC:
> -    case APIC_CMCI:
>           if ( val & ~(LVT_MASK | APIC_DM_MASK) )
>               return X86EMUL_EXCEPTION;
>           break;
> @@ -1438,7 +1458,9 @@ static void vlapic_do_init(struct vlapic
>       if ( !has_vlapic(vlapic_vcpu(vlapic)->domain) )
>           return;
>   
> -    vlapic_set_reg(vlapic, APIC_LVR, 0x00050014);
> +    nr = 6 + !!(vlapic_vcpu(vlapic)->arch.vmce.mcg_cap & MCG_CMCI_P);
> +    vlapic_set_reg(vlapic, APIC_LVR,
> +                   0x00000014 | MASK_INSR(nr - 1, APIC_LVR_MAXLVT_MASK));
>   
>       for ( i = 0; i < 8; i++ )
>       {
> --- a/xen/arch/x86/include/asm/apicdef.h
> +++ b/xen/arch/x86/include/asm/apicdef.h
> @@ -15,7 +15,10 @@
>   #define			GET_xAPIC_ID(x)		(((x)>>24)&0xFFu)
>   #define			SET_xAPIC_ID(x)		(((x)<<24))
>   #define		APIC_LVR	0x30
> -#define			APIC_LVR_MASK		0xFF00FF
> +#define			APIC_LVR_VERSION_MASK	0xff
> +#define			APIC_LVR_MAXLVT_MASK	0xff0000
> +#define			APIC_LVR_MASK		(APIC_LVR_VERSION_MASK | \
> +						 APIC_LVR_MAXLVT_MASK)
>   #define			APIC_LVR_DIRECTED_EOI	(1 << 24)
>   #define			GET_APIC_VERSION(x)	((x)&0xFF)
>   #define			GET_APIC_MAXLVT(x)	(((x)>>16)&0xFF)
> 

-- 
Best regards,
-grygorii
Re: [PATCH for-4.21??? 3/3] x86/vLAPIC: properly support the CMCI LVT
Posted by Jan Beulich 3 weeks ago
On 09.10.2025 17:08, Grygorii Strashko wrote:
> On 08.10.25 15:09, Jan Beulich wrote:
>> --- a/xen/arch/x86/hvm/vlapic.c
>> +++ b/xen/arch/x86/hvm/vlapic.c
>> @@ -31,10 +31,13 @@
>>   #include <public/hvm/ioreq.h>
>>   #include <public/hvm/params.h>
>>   
>> -#define LVT_BIAS(reg)                   (((reg) - APIC_LVTT) >> 4)
>> +#include <../cpu/mcheck/x86_mca.h> /* MCG_CMCI_P */
> 
> This include... You probably do not like it also
> It is dependency outside HVM code.
> 
> I've been thinking about something like vlapic->caps which can be filed before vlapic_init()
> or passed as parameter, but seems x86 toolstack is considered to be able overwrite anything,
> including v->arch.vmce.
> 
> Seems, no better options here.

Same here, hence why I used it despite not liking it.

>> @@ -697,8 +701,17 @@ int guest_rdmsr_x2apic(const struct vcpu
>>           return X86EMUL_EXCEPTION;
>>   
>>       offset = reg << 4;
>> -    if ( offset == APIC_ICR )
>> +    switch ( offset )
>> +    {
>> +    case APIC_ICR:
>>           high = (uint64_t)vlapic_read_aligned(vlapic, APIC_ICR2) << 32;
>> +        break;
>> +
>> +    case APIC_CMCI:
>> +        if ( !(v->arch.vmce.mcg_cap & MCG_CMCI_P) )
> 
> Could it be done using wrapper, like vmce_has_cmci()?
> As this is Intel specific it's candidate to be opt-out eventually.

Possible. I wanted to limit the churn, hence why I preferred not to introduce
a wrapper. Such an abstraction I wouldn't like to be a function taking a vCPU;
really this should be a domain property imo.

Jan
Re: [PATCH for-4.21??? 3/3] x86/vLAPIC: properly support the CMCI LVT
Posted by Grygorii Strashko 2 weeks, 2 days ago

On 09.10.25 18:37, Jan Beulich wrote:
> On 09.10.2025 17:08, Grygorii Strashko wrote:
>> On 08.10.25 15:09, Jan Beulich wrote:
>>> --- a/xen/arch/x86/hvm/vlapic.c
>>> +++ b/xen/arch/x86/hvm/vlapic.c
>>> @@ -31,10 +31,13 @@
>>>    #include <public/hvm/ioreq.h>
>>>    #include <public/hvm/params.h>
>>>    
>>> -#define LVT_BIAS(reg)                   (((reg) - APIC_LVTT) >> 4)
>>> +#include <../cpu/mcheck/x86_mca.h> /* MCG_CMCI_P */
>>
>> This include... You probably do not like it also
>> It is dependency outside HVM code.
>>
>> I've been thinking about something like vlapic->caps which can be filed before vlapic_init()
>> or passed as parameter, but seems x86 toolstack is considered to be able overwrite anything,
>> including v->arch.vmce.
>>
>> Seems, no better options here.
> 
> Same here, hence why I used it despite not liking it.
> 
>>> @@ -697,8 +701,17 @@ int guest_rdmsr_x2apic(const struct vcpu
>>>            return X86EMUL_EXCEPTION;
>>>    
>>>        offset = reg << 4;
>>> -    if ( offset == APIC_ICR )
>>> +    switch ( offset )
>>> +    {
>>> +    case APIC_ICR:
>>>            high = (uint64_t)vlapic_read_aligned(vlapic, APIC_ICR2) << 32;
>>> +        break;
>>> +
>>> +    case APIC_CMCI:
>>> +        if ( !(v->arch.vmce.mcg_cap & MCG_CMCI_P) )
>>
>> Could it be done using wrapper, like vmce_has_cmci()?
>> As this is Intel specific it's candidate to be opt-out eventually.
> 
> Possible. I wanted to limit the churn, hence why I preferred not to introduce
> a wrapper. Such an abstraction I wouldn't like to be a function taking a vCPU;
> really this should be a domain property imo.

My intention was to limit spreading direct access to "vmce" data over vlapic code:

static bool vlapic_has_cmci(const struct vcpu *v)
{
	return v->arch.vmce.mcg_cap & MCG_CMCI_P;
}

Just expressing opinion.

-- 
Best regards,
-grygorii
Re: [PATCH for-4.21??? 3/3] x86/vLAPIC: properly support the CMCI LVT
Posted by Jan Beulich 2 weeks, 2 days ago
On 14.10.2025 16:12, Grygorii Strashko wrote:
> 
> 
> On 09.10.25 18:37, Jan Beulich wrote:
>> On 09.10.2025 17:08, Grygorii Strashko wrote:
>>> On 08.10.25 15:09, Jan Beulich wrote:
>>>> --- a/xen/arch/x86/hvm/vlapic.c
>>>> +++ b/xen/arch/x86/hvm/vlapic.c
>>>> @@ -31,10 +31,13 @@
>>>>    #include <public/hvm/ioreq.h>
>>>>    #include <public/hvm/params.h>
>>>>    
>>>> -#define LVT_BIAS(reg)                   (((reg) - APIC_LVTT) >> 4)
>>>> +#include <../cpu/mcheck/x86_mca.h> /* MCG_CMCI_P */
>>>
>>> This include... You probably do not like it also
>>> It is dependency outside HVM code.
>>>
>>> I've been thinking about something like vlapic->caps which can be filed before vlapic_init()
>>> or passed as parameter, but seems x86 toolstack is considered to be able overwrite anything,
>>> including v->arch.vmce.
>>>
>>> Seems, no better options here.
>>
>> Same here, hence why I used it despite not liking it.
>>
>>>> @@ -697,8 +701,17 @@ int guest_rdmsr_x2apic(const struct vcpu
>>>>            return X86EMUL_EXCEPTION;
>>>>    
>>>>        offset = reg << 4;
>>>> -    if ( offset == APIC_ICR )
>>>> +    switch ( offset )
>>>> +    {
>>>> +    case APIC_ICR:
>>>>            high = (uint64_t)vlapic_read_aligned(vlapic, APIC_ICR2) << 32;
>>>> +        break;
>>>> +
>>>> +    case APIC_CMCI:
>>>> +        if ( !(v->arch.vmce.mcg_cap & MCG_CMCI_P) )
>>>
>>> Could it be done using wrapper, like vmce_has_cmci()?
>>> As this is Intel specific it's candidate to be opt-out eventually.
>>
>> Possible. I wanted to limit the churn, hence why I preferred not to introduce
>> a wrapper. Such an abstraction I wouldn't like to be a function taking a vCPU;
>> really this should be a domain property imo.
> 
> My intention was to limit spreading direct access to "vmce" data over vlapic code:
> 
> static bool vlapic_has_cmci(const struct vcpu *v)
> {
> 	return v->arch.vmce.mcg_cap & MCG_CMCI_P;
> }

"vlapic" in the name makes it seemingly better, but not really. As before: I
think such a predicate should be taking a const struct domain * as input.
Unless of course we expected that different vCPU-s in a guest may have
different settings of the MCG_CMCI_P bit.

Jan