FEP is now the only reason for the #UD handler to run.
Signed-off-by: Alejandro Vallejo <alejandro.garciavallejo@amd.com>
---
xen/arch/x86/hvm/hvm.c | 43 +++++++++++++++++++++---------------------
1 file changed, 21 insertions(+), 22 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c8cae41705d..4c00cf4c4fe 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3837,35 +3837,34 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
struct vcpu *cur = current;
bool should_emulate = false;
struct hvm_emulate_ctxt ctxt;
+ const struct segment_register *cs;
+ uint32_t walk;
+ unsigned long addr;
+ char sig[5]; /* ud2; .ascii "xen" */
hvm_emulate_init_once(&ctxt, NULL, regs);
- if ( opt_hvm_fep )
+ cs = &ctxt.seg_reg[x86_seg_cs];
+ walk = ((ctxt.seg_reg[x86_seg_ss].dpl == 3)
+ ? PFEC_user_mode : 0) | PFEC_insn_fetch;
+
+ if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
+ sizeof(sig), hvm_access_insn_fetch,
+ cs, &addr) &&
+ (hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
+ walk, NULL) == HVMTRANS_okay) &&
+ (memcmp(sig, "\xf\xb" "xen", sizeof(sig)) == 0) )
{
- const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs];
- uint32_t walk = ((ctxt.seg_reg[x86_seg_ss].dpl == 3)
- ? PFEC_user_mode : 0) | PFEC_insn_fetch;
- unsigned long addr;
- char sig[5]; /* ud2; .ascii "xen" */
-
- if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
- sizeof(sig), hvm_access_insn_fetch,
- cs, &addr) &&
- (hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
- walk, NULL) == HVMTRANS_okay) &&
- (memcmp(sig, "\xf\xb" "xen", sizeof(sig)) == 0) )
- {
- regs->rip += sizeof(sig);
- regs->eflags &= ~X86_EFLAGS_RF;
+ regs->rip += sizeof(sig);
+ regs->eflags &= ~X86_EFLAGS_RF;
- /* Zero the upper 32 bits of %rip if not in 64bit mode. */
- if ( !(hvm_long_mode_active(cur) && cs->l) )
- regs->rip = (uint32_t)regs->rip;
+ /* Zero the upper 32 bits of %rip if not in 64bit mode. */
+ if ( !(hvm_long_mode_active(cur) && cs->l) )
+ regs->rip = (uint32_t)regs->rip;
- add_taint(TAINT_HVM_FEP);
+ add_taint(TAINT_HVM_FEP);
- should_emulate = true;
- }
+ should_emulate = true;
}
if ( !should_emulate )
--
2.43.0
On 12.03.2026 12:21, Alejandro Vallejo wrote:
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -3837,35 +3837,34 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
> struct vcpu *cur = current;
> bool should_emulate = false;
> struct hvm_emulate_ctxt ctxt;
> + const struct segment_register *cs;
> + uint32_t walk;
> + unsigned long addr;
> + char sig[5]; /* ud2; .ascii "xen" */
>
> hvm_emulate_init_once(&ctxt, NULL, regs);
>
> - if ( opt_hvm_fep )
> + cs = &ctxt.seg_reg[x86_seg_cs];
> + walk = ((ctxt.seg_reg[x86_seg_ss].dpl == 3)
> + ? PFEC_user_mode : 0) | PFEC_insn_fetch;
While of course functionally everything's fine this way, I'm now entirely lost:
Why are what were ...
> + if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
> + sizeof(sig), hvm_access_insn_fetch,
> + cs, &addr) &&
> + (hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
> + walk, NULL) == HVMTRANS_okay) &&
> + (memcmp(sig, "\xf\xb" "xen", sizeof(sig)) == 0) )
> {
> - const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs];
> - uint32_t walk = ((ctxt.seg_reg[x86_seg_ss].dpl == 3)
> - ? PFEC_user_mode : 0) | PFEC_insn_fetch;
... initializers before not initializers anymore, when all you're doing is
(supposedly) re-indentation (and, necessarily, moving decls up to the top of
the scope they need to live in)?
> - unsigned long addr;
> - char sig[5]; /* ud2; .ascii "xen" */
> -
> - if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
> - sizeof(sig), hvm_access_insn_fetch,
> - cs, &addr) &&
> - (hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
> - walk, NULL) == HVMTRANS_okay) &&
> - (memcmp(sig, "\xf\xb" "xen", sizeof(sig)) == 0) )
> - {
> - regs->rip += sizeof(sig);
> - regs->eflags &= ~X86_EFLAGS_RF;
> + regs->rip += sizeof(sig);
> + regs->eflags &= ~X86_EFLAGS_RF;
>
> - /* Zero the upper 32 bits of %rip if not in 64bit mode. */
> - if ( !(hvm_long_mode_active(cur) && cs->l) )
> - regs->rip = (uint32_t)regs->rip;
> + /* Zero the upper 32 bits of %rip if not in 64bit mode. */
> + if ( !(hvm_long_mode_active(cur) && cs->l) )
> + regs->rip = (uint32_t)regs->rip;
>
> - add_taint(TAINT_HVM_FEP);
> + add_taint(TAINT_HVM_FEP);
>
> - should_emulate = true;
> - }
> + should_emulate = true;
> }
>
> if ( !should_emulate )
With this, the purpose of the should_emulate variable effectively vanishes,
without it actually being purged (unlike you had it earlier).
Jan
On Thu Mar 12, 2026 at 12:42 PM CET, Jan Beulich wrote:
> On 12.03.2026 12:21, Alejandro Vallejo wrote:
>> --- a/xen/arch/x86/hvm/hvm.c
>> +++ b/xen/arch/x86/hvm/hvm.c
>> @@ -3837,35 +3837,34 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
>> struct vcpu *cur = current;
>> bool should_emulate = false;
>> struct hvm_emulate_ctxt ctxt;
>> + const struct segment_register *cs;
>> + uint32_t walk;
>> + unsigned long addr;
>> + char sig[5]; /* ud2; .ascii "xen" */
>>
>> hvm_emulate_init_once(&ctxt, NULL, regs);
>>
>> - if ( opt_hvm_fep )
>> + cs = &ctxt.seg_reg[x86_seg_cs];
>> + walk = ((ctxt.seg_reg[x86_seg_ss].dpl == 3)
>> + ? PFEC_user_mode : 0) | PFEC_insn_fetch;
>
> While of course functionally everything's fine this way, I'm now entirely lost:
> Why are what were ...
>
>> + if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
>> + sizeof(sig), hvm_access_insn_fetch,
>> + cs, &addr) &&
>> + (hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
>> + walk, NULL) == HVMTRANS_okay) &&
>> + (memcmp(sig, "\xf\xb" "xen", sizeof(sig)) == 0) )
>> {
>> - const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs];
>> - uint32_t walk = ((ctxt.seg_reg[x86_seg_ss].dpl == 3)
>> - ? PFEC_user_mode : 0) | PFEC_insn_fetch;
>
> ... initializers before not initializers anymore, when all you're doing is
> (supposedly) re-indentation (and, necessarily, moving decls up to the top of
> the scope they need to live in)?
walk cannot be initialised at the delcaration site because it relies on
hvm_emulate_init_once() having run and you made a strong argument to preserve
the current form so I left cs next to walk, as it was before.
>
>> - unsigned long addr;
>> - char sig[5]; /* ud2; .ascii "xen" */
>> -
>> - if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
>> - sizeof(sig), hvm_access_insn_fetch,
>> - cs, &addr) &&
>> - (hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
>> - walk, NULL) == HVMTRANS_okay) &&
>> - (memcmp(sig, "\xf\xb" "xen", sizeof(sig)) == 0) )
>> - {
>> - regs->rip += sizeof(sig);
>> - regs->eflags &= ~X86_EFLAGS_RF;
>> + regs->rip += sizeof(sig);
>> + regs->eflags &= ~X86_EFLAGS_RF;
>>
>> - /* Zero the upper 32 bits of %rip if not in 64bit mode. */
>> - if ( !(hvm_long_mode_active(cur) && cs->l) )
>> - regs->rip = (uint32_t)regs->rip;
>> + /* Zero the upper 32 bits of %rip if not in 64bit mode. */
>> + if ( !(hvm_long_mode_active(cur) && cs->l) )
>> + regs->rip = (uint32_t)regs->rip;
>>
>> - add_taint(TAINT_HVM_FEP);
>> + add_taint(TAINT_HVM_FEP);
>>
>> - should_emulate = true;
>> - }
>> + should_emulate = true;
>> }
>>
>> if ( !should_emulate )
>
> With this, the purpose of the should_emulate variable effectively vanishes,
> without it actually being purged (unlike you had it earlier).
That goes away with patch 6, that I neglected to send before.
>
> Jan
© 2016 - 2026 Red Hat, Inc.