Only explicit writes are subject to e.g. the checking of the MSR intercept
bitmap, which extends to VM-event's hvm_monitor_msr(). Indicate, by way of
a new boolean parameter, to the hook functions which variant it is.
Fixes: 6eb43fcf8a0b ("x86emul: support SWAPGS")
Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
Later, in particular for nested, ->read_msr() may need to gain a similar
parameter.
Prior to nested making use of it, the new parameter is effectively dead
code with VM_EVENT=n. If we accepted Misra rule 2.2, something would
likely need doing about this.
I've suitably re-based "x86emul: misc additions" on top of this, but I
don't think I'll re-post it just for that.
--- a/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
+++ b/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
@@ -569,7 +569,8 @@ static int fuzz_read_msr(
static int fuzz_write_msr(
unsigned int reg,
uint64_t val,
- struct x86_emulate_ctxt *ctxt)
+ struct x86_emulate_ctxt *ctxt,
+ bool explicit)
{
struct fuzz_state *s = ctxt->data;
struct fuzz_corpus *c = s->corpus;
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1705,7 +1705,8 @@ static int cf_check hvmemul_write_io_dis
static int cf_check hvmemul_write_msr_discard(
unsigned int reg,
uint64_t val,
- struct x86_emulate_ctxt *ctxt)
+ struct x86_emulate_ctxt *ctxt,
+ bool explicit)
{
return X86EMUL_OKAY;
}
@@ -2427,9 +2428,10 @@ static int cf_check hvmemul_read_msr(
static int cf_check hvmemul_write_msr(
unsigned int reg,
uint64_t val,
- struct x86_emulate_ctxt *ctxt)
+ struct x86_emulate_ctxt *ctxt,
+ bool explicit)
{
- int rc = hvm_msr_write_intercept(reg, val, true);
+ int rc = hvm_msr_write_intercept(reg, val, explicit);
if ( rc == X86EMUL_EXCEPTION )
x86_emul_hw_exception(X86_EXC_GP, 0, ctxt);
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -1038,7 +1038,8 @@ static int cf_check read_msr(
}
static int cf_check write_msr(
- unsigned int reg, uint64_t val, struct x86_emulate_ctxt *ctxt)
+ unsigned int reg, uint64_t val, struct x86_emulate_ctxt *ctxt,
+ bool explicit)
{
struct vcpu *curr = current;
const struct domain *currd = curr->domain;
--- a/xen/arch/x86/x86_emulate/0f01.c
+++ b/xen/arch/x86/x86_emulate/0f01.c
@@ -40,7 +40,7 @@ int x86emul_0f01(struct x86_emulate_stat
fail_if(!ops->write_msr);
rc = ops->write_msr(regs->ecx,
((uint64_t)regs->r(dx) << 32) | regs->eax,
- ctxt);
+ ctxt, true);
goto done;
}
generate_exception(X86_EXC_UD);
@@ -194,7 +194,7 @@ int x86emul_0f01(struct x86_emulate_stat
(rc = ops->read_msr(MSR_SHADOW_GS_BASE, &msr_val,
ctxt)) != X86EMUL_OKAY ||
(rc = ops->write_msr(MSR_SHADOW_GS_BASE, sreg.base,
- ctxt)) != X86EMUL_OKAY )
+ ctxt, false)) != X86EMUL_OKAY )
goto done;
sreg.base = msr_val;
if ( (rc = ops->write_segment(x86_seg_gs, &sreg,
@@ -202,7 +202,7 @@ int x86emul_0f01(struct x86_emulate_stat
{
/* Best effort unwind (i.e. no real error checking). */
if ( ops->write_msr(MSR_SHADOW_GS_BASE, msr_val,
- ctxt) == X86EMUL_EXCEPTION )
+ ctxt, false) == X86EMUL_EXCEPTION )
x86_emul_reset_event(ctxt);
goto done;
}
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -3814,7 +3814,7 @@ x86_emulate(
fail_if(ops->write_msr == NULL);
if ( (rc = ops->write_msr(_regs.ecx,
((uint64_t)_regs.r(dx) << 32) | _regs.eax,
- ctxt)) != 0 )
+ ctxt, true)) != 0 )
goto done;
break;
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -470,13 +470,15 @@ struct x86_emulate_ops
struct x86_emulate_ctxt *ctxt);
/*
- * write_dr: Write to model-specific register.
- * @reg: [IN ] Register to write.
+ * write_msr: Write to model-specific register.
+ * @reg: [IN ] Register to write.
+ * @explicit: [IN ] Whether this is an explicit WRMSR or alike.
*/
int (*write_msr)(
unsigned int reg,
uint64_t val,
- struct x86_emulate_ctxt *ctxt);
+ struct x86_emulate_ctxt *ctxt,
+ bool explicit);
/*
* cache_op: Write-back and/or invalidate cache contents.
On Tue, Jan 27, 2026 at 11:21:06AM +0100, Jan Beulich wrote:
> Only explicit writes are subject to e.g. the checking of the MSR intercept
> bitmap, which extends to VM-event's hvm_monitor_msr(). Indicate, by way of
> a new boolean parameter, to the hook functions which variant it is.
>
> Fixes: 6eb43fcf8a0b ("x86emul: support SWAPGS")
> Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Roger Pau Monné <roger.pau@citrix.com>
> ---
> Later, in particular for nested, ->read_msr() may need to gain a similar
> parameter.
>
> Prior to nested making use of it, the new parameter is effectively dead
> code with VM_EVENT=n. If we accepted Misra rule 2.2, something would
> likely need doing about this.
Hm, yes, it propagates into hvm_msr_write_intercept() which then turns
into `if ( may_defer && false )` in the VM_EVENT=n. But then you
could say the same about the code inside the if block above for the
VM_EVENT=n, it's also effectively unreachable code.
> I've suitably re-based "x86emul: misc additions" on top of this, but I
> don't think I'll re-post it just for that.
>
> --- a/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
> +++ b/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
> @@ -569,7 +569,8 @@ static int fuzz_read_msr(
> static int fuzz_write_msr(
> unsigned int reg,
> uint64_t val,
> - struct x86_emulate_ctxt *ctxt)
> + struct x86_emulate_ctxt *ctxt,
> + bool explicit)
> {
> struct fuzz_state *s = ctxt->data;
> struct fuzz_corpus *c = s->corpus;
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -1705,7 +1705,8 @@ static int cf_check hvmemul_write_io_dis
> static int cf_check hvmemul_write_msr_discard(
> unsigned int reg,
> uint64_t val,
> - struct x86_emulate_ctxt *ctxt)
> + struct x86_emulate_ctxt *ctxt,
> + bool explicit)
> {
> return X86EMUL_OKAY;
> }
> @@ -2427,9 +2428,10 @@ static int cf_check hvmemul_read_msr(
> static int cf_check hvmemul_write_msr(
> unsigned int reg,
> uint64_t val,
> - struct x86_emulate_ctxt *ctxt)
> + struct x86_emulate_ctxt *ctxt,
> + bool explicit)
> {
> - int rc = hvm_msr_write_intercept(reg, val, true);
> + int rc = hvm_msr_write_intercept(reg, val, explicit);
I've wondered whether we also want to rename the parameter of
hvm_msr_write_intercept() into something different than may_defer. It
feels weird to translate a parameter that denotes an explicit MSR
access into one that signals whether it's fine to defer the operation
or not.
Thanks, Roger.
On 30.01.2026 11:00, Roger Pau Monné wrote:
> On Tue, Jan 27, 2026 at 11:21:06AM +0100, Jan Beulich wrote:
>> Only explicit writes are subject to e.g. the checking of the MSR intercept
>> bitmap, which extends to VM-event's hvm_monitor_msr(). Indicate, by way of
>> a new boolean parameter, to the hook functions which variant it is.
>>
>> Fixes: 6eb43fcf8a0b ("x86emul: support SWAPGS")
>> Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> Acked-by: Roger Pau Monné <roger.pau@citrix.com>
Thanks.
>> ---
>> Later, in particular for nested, ->read_msr() may need to gain a similar
>> parameter.
>>
>> Prior to nested making use of it, the new parameter is effectively dead
>> code with VM_EVENT=n. If we accepted Misra rule 2.2, something would
>> likely need doing about this.
>
> Hm, yes, it propagates into hvm_msr_write_intercept() which then turns
> into `if ( may_defer && false )` in the VM_EVENT=n. But then you
> could say the same about the code inside the if block above for the
> VM_EVENT=n, it's also effectively unreachable code.
Unreachable and dead code are different things to Misra, though. It is my
understanding that we deliberately permit constructs reducing to "if (0)"
in certain configurations, relying on DCE: There's then no generated code
for the construct, and hence nothing that cannot be reached. The
situation is different for a parameter that has no use: Its removal (in
the specific configuration) wouldn't alter the behavior. Hence the
parameter and all arguments passed for it are "dead".
>> @@ -2427,9 +2428,10 @@ static int cf_check hvmemul_read_msr(
>> static int cf_check hvmemul_write_msr(
>> unsigned int reg,
>> uint64_t val,
>> - struct x86_emulate_ctxt *ctxt)
>> + struct x86_emulate_ctxt *ctxt,
>> + bool explicit)
>> {
>> - int rc = hvm_msr_write_intercept(reg, val, true);
>> + int rc = hvm_msr_write_intercept(reg, val, explicit);
>
> I've wondered whether we also want to rename the parameter of
> hvm_msr_write_intercept() into something different than may_defer. It
> feels weird to translate a parameter that denotes an explicit MSR
> access into one that signals whether it's fine to defer the operation
> or not.
I did think the same, just that - considering all use sites - I couldn't
even come close to thinking of some sensible new name.
Jan
On Fri, Jan 30, 2026 at 03:01:28PM +0100, Jan Beulich wrote:
> On 30.01.2026 11:00, Roger Pau Monné wrote:
> > On Tue, Jan 27, 2026 at 11:21:06AM +0100, Jan Beulich wrote:
> >> Only explicit writes are subject to e.g. the checking of the MSR intercept
> >> bitmap, which extends to VM-event's hvm_monitor_msr(). Indicate, by way of
> >> a new boolean parameter, to the hook functions which variant it is.
> >>
> >> Fixes: 6eb43fcf8a0b ("x86emul: support SWAPGS")
> >> Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
> >> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> >
> > Acked-by: Roger Pau Monné <roger.pau@citrix.com>
>
> Thanks.
>
> >> ---
> >> Later, in particular for nested, ->read_msr() may need to gain a similar
> >> parameter.
> >>
> >> Prior to nested making use of it, the new parameter is effectively dead
> >> code with VM_EVENT=n. If we accepted Misra rule 2.2, something would
> >> likely need doing about this.
> >
> > Hm, yes, it propagates into hvm_msr_write_intercept() which then turns
> > into `if ( may_defer && false )` in the VM_EVENT=n. But then you
> > could say the same about the code inside the if block above for the
> > VM_EVENT=n, it's also effectively unreachable code.
>
> Unreachable and dead code are different things to Misra, though. It is my
> understanding that we deliberately permit constructs reducing to "if (0)"
> in certain configurations, relying on DCE: There's then no generated code
> for the construct, and hence nothing that cannot be reached. The
> situation is different for a parameter that has no use: Its removal (in
> the specific configuration) wouldn't alter the behavior. Hence the
> parameter and all arguments passed for it are "dead".
Yeah, I think dead is a good definition, the variable is possible
evaluated, but it's value doesn't change the flow of execution in the
VM_EVENT=n case.
> >> @@ -2427,9 +2428,10 @@ static int cf_check hvmemul_read_msr(
> >> static int cf_check hvmemul_write_msr(
> >> unsigned int reg,
> >> uint64_t val,
> >> - struct x86_emulate_ctxt *ctxt)
> >> + struct x86_emulate_ctxt *ctxt,
> >> + bool explicit)
> >> {
> >> - int rc = hvm_msr_write_intercept(reg, val, true);
> >> + int rc = hvm_msr_write_intercept(reg, val, explicit);
> >
> > I've wondered whether we also want to rename the parameter of
> > hvm_msr_write_intercept() into something different than may_defer. It
> > feels weird to translate a parameter that denotes an explicit MSR
> > access into one that signals whether it's fine to defer the operation
> > or not.
>
> I did think the same, just that - considering all use sites - I couldn't
> even come close to thinking of some sensible new name.
Let's leave as-is then. Since maybe_defer is tied to the monitor
logic it might be helpful to give it that meaning, but I'm having a
hard time coming with a proper way to name it that's not too verbose.
Let's leave as-is for now then.
Thanks, Roger.
On 2026-01-27 05:21, Jan Beulich wrote:
> Only explicit writes are subject to e.g. the checking of the MSR intercept
> bitmap, which extends to VM-event's hvm_monitor_msr(). Indicate, by way of
> a new boolean parameter, to the hook functions which variant it is.
>
> Fixes: 6eb43fcf8a0b ("x86emul: support SWAPGS")
> Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Jason Andryuk <jason.andryuk@amd.com>
© 2016 - 2026 Red Hat, Inc.