From: Ashish Kalra <ashish.kalra@amd.com>
Add checks in interrupt exit code paths in case of returns
to user mode to check if currently executing the #HV handler
then don't follow the irqentry_exit_to_user_mode path as
that can potentially cause the #HV handler to be
preempted and rescheduled on another CPU. Rescheduled #HV
handler on another cpu will cause interrupts to be handled
on a different cpu than the injected one, causing
invalid EOIs and missed/lost guest interrupts and
corresponding hangs and/or per-cpu IRQs handled on
non-intended cpu.
Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
---
arch/x86/include/asm/idtentry.h | 66 +++++++++++++++++++++++++++++++++
arch/x86/kernel/sev.c | 30 +++++++++++++++
2 files changed, 96 insertions(+)
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
index 652fea10d377..45b47132be7c 100644
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -13,6 +13,10 @@
#include <asm/irq_stack.h>
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+noinstr void irqentry_exit_hv_cond(struct pt_regs *regs, irqentry_state_t state);
+#endif
+
/**
* DECLARE_IDTENTRY - Declare functions for simple IDT entry points
* No error code pushed by hardware
@@ -176,6 +180,7 @@ __visible noinstr void func(struct pt_regs *regs, unsigned long error_code)
#define DECLARE_IDTENTRY_IRQ(vector, func) \
DECLARE_IDTENTRY_ERRORCODE(vector, func)
+#ifndef CONFIG_AMD_MEM_ENCRYPT
/**
* DEFINE_IDTENTRY_IRQ - Emit code for device interrupt IDT entry points
* @func: Function name of the entry point
@@ -205,6 +210,26 @@ __visible noinstr void func(struct pt_regs *regs, \
} \
\
static noinline void __##func(struct pt_regs *regs, u32 vector)
+#else
+
+#define DEFINE_IDTENTRY_IRQ(func) \
+static void __##func(struct pt_regs *regs, u32 vector); \
+ \
+__visible noinstr void func(struct pt_regs *regs, \
+ unsigned long error_code) \
+{ \
+ irqentry_state_t state = irqentry_enter(regs); \
+ u32 vector = (u32)(u8)error_code; \
+ \
+ instrumentation_begin(); \
+ kvm_set_cpu_l1tf_flush_l1d(); \
+ run_irq_on_irqstack_cond(__##func, regs, vector); \
+ instrumentation_end(); \
+ irqentry_exit_hv_cond(regs, state); \
+} \
+ \
+static noinline void __##func(struct pt_regs *regs, u32 vector)
+#endif
/**
* DECLARE_IDTENTRY_SYSVEC - Declare functions for system vector entry points
@@ -221,6 +246,7 @@ static noinline void __##func(struct pt_regs *regs, u32 vector)
#define DECLARE_IDTENTRY_SYSVEC(vector, func) \
DECLARE_IDTENTRY(vector, func)
+#ifndef CONFIG_AMD_MEM_ENCRYPT
/**
* DEFINE_IDTENTRY_SYSVEC - Emit code for system vector IDT entry points
* @func: Function name of the entry point
@@ -245,6 +271,26 @@ __visible noinstr void func(struct pt_regs *regs) \
} \
\
static noinline void __##func(struct pt_regs *regs)
+#else
+
+#define DEFINE_IDTENTRY_SYSVEC(func) \
+static void __##func(struct pt_regs *regs); \
+ \
+__visible noinstr void func(struct pt_regs *regs) \
+{ \
+ irqentry_state_t state = irqentry_enter(regs); \
+ \
+ instrumentation_begin(); \
+ kvm_set_cpu_l1tf_flush_l1d(); \
+ run_sysvec_on_irqstack_cond(__##func, regs); \
+ instrumentation_end(); \
+ irqentry_exit_hv_cond(regs, state); \
+} \
+ \
+static noinline void __##func(struct pt_regs *regs)
+#endif
+
+#ifndef CONFIG_AMD_MEM_ENCRYPT
/**
* DEFINE_IDTENTRY_SYSVEC_SIMPLE - Emit code for simple system vector IDT
@@ -274,6 +320,26 @@ __visible noinstr void func(struct pt_regs *regs) \
} \
\
static __always_inline void __##func(struct pt_regs *regs)
+#else
+
+#define DEFINE_IDTENTRY_SYSVEC_SIMPLE(func) \
+static __always_inline void __##func(struct pt_regs *regs); \
+ \
+__visible noinstr void func(struct pt_regs *regs) \
+{ \
+ irqentry_state_t state = irqentry_enter(regs); \
+ \
+ instrumentation_begin(); \
+ __irq_enter_raw(); \
+ kvm_set_cpu_l1tf_flush_l1d(); \
+ __##func(regs); \
+ __irq_exit_raw(); \
+ instrumentation_end(); \
+ irqentry_exit_hv_cond(regs, state); \
+} \
+ \
+static __always_inline void __##func(struct pt_regs *regs)
+#endif
/**
* DECLARE_IDTENTRY_XENCB - Declare functions for XEN HV callback entry point
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index b1a98c2a52f8..23f15e95838b 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -147,6 +147,10 @@ struct sev_hv_doorbell_page {
struct sev_snp_runtime_data {
struct sev_hv_doorbell_page hv_doorbell_page;
+ /*
+ * Indication that we are currently handling #HV events.
+ */
+ bool hv_handling_events;
};
static DEFINE_PER_CPU(struct sev_snp_runtime_data*, snp_runtime_data);
@@ -200,6 +204,8 @@ static void do_exc_hv(struct pt_regs *regs)
union hv_pending_events pending_events;
u8 vector;
+ this_cpu_read(snp_runtime_data)->hv_handling_events = true;
+
while (sev_hv_pending()) {
pending_events.events = xchg(
&sev_snp_current_doorbell_page()->pending_events.events,
@@ -234,6 +240,8 @@ static void do_exc_hv(struct pt_regs *regs)
common_interrupt(regs, pending_events.vector);
}
}
+
+ this_cpu_read(snp_runtime_data)->hv_handling_events = false;
}
static __always_inline bool on_vc_stack(struct pt_regs *regs)
@@ -2529,3 +2537,25 @@ static int __init snp_init_platform_device(void)
return 0;
}
device_initcall(snp_init_platform_device);
+
+noinstr void irqentry_exit_hv_cond(struct pt_regs *regs, irqentry_state_t state)
+{
+ /*
+ * Check whether this returns to user mode, if so and if
+ * we are currently executing the #HV handler then we don't
+ * want to follow the irqentry_exit_to_user_mode path as
+ * that can potentially cause the #HV handler to be
+ * preempted and rescheduled on another CPU. Rescheduled #HV
+ * handler on another cpu will cause interrupts to be handled
+ * on a different cpu than the injected one, causing
+ * invalid EOIs and missed/lost guest interrupts and
+ * corresponding hangs and/or per-cpu IRQs handled on
+ * non-intended cpu.
+ */
+ if (user_mode(regs) &&
+ this_cpu_read(snp_runtime_data)->hv_handling_events)
+ return;
+
+ /* follow normal interrupt return/exit path */
+ irqentry_exit(regs, state);
+}
--
2.25.1
On 1/22/2023 3:46 AM, Tianyu Lan wrote: > From: Ashish Kalra <ashish.kalra@amd.com> > > Add checks in interrupt exit code paths in case of returns > to user mode to check if currently executing the #HV handler > then don't follow the irqentry_exit_to_user_mode path as > that can potentially cause the #HV handler to be > preempted and rescheduled on another CPU. Rescheduled #HV > handler on another cpu will cause interrupts to be handled > on a different cpu than the injected one, causing > invalid EOIs and missed/lost guest interrupts and > corresponding hangs and/or per-cpu IRQs handled on > non-intended cpu. > > Signed-off-by: Ashish Kalra <ashish.kalra@amd.com> > --- > arch/x86/include/asm/idtentry.h | 66 +++++++++++++++++++++++++++++++++ > arch/x86/kernel/sev.c | 30 +++++++++++++++ > 2 files changed, 96 insertions(+) > > diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h > index 652fea10d377..45b47132be7c 100644 > --- a/arch/x86/include/asm/idtentry.h > +++ b/arch/x86/include/asm/idtentry.h > @@ -13,6 +13,10 @@ > > #include <asm/irq_stack.h> > > +#ifdef CONFIG_AMD_MEM_ENCRYPT > +noinstr void irqentry_exit_hv_cond(struct pt_regs *regs, irqentry_state_t state); > +#endif > + > /** > * DECLARE_IDTENTRY - Declare functions for simple IDT entry points > * No error code pushed by hardware > @@ -176,6 +180,7 @@ __visible noinstr void func(struct pt_regs *regs, unsigned long error_code) > #define DECLARE_IDTENTRY_IRQ(vector, func) \ > DECLARE_IDTENTRY_ERRORCODE(vector, func) > > +#ifndef CONFIG_AMD_MEM_ENCRYPT > /** > * DEFINE_IDTENTRY_IRQ - Emit code for device interrupt IDT entry points > * @func: Function name of the entry point > @@ -205,6 +210,26 @@ __visible noinstr void func(struct pt_regs *regs, \ > } \ > \ > static noinline void __##func(struct pt_regs *regs, u32 vector) > +#else > + > +#define DEFINE_IDTENTRY_IRQ(func) \ > +static void __##func(struct pt_regs *regs, u32 vector); \ > + \ > +__visible noinstr void func(struct pt_regs *regs, \ > + unsigned long error_code) \ > +{ \ > + irqentry_state_t state = irqentry_enter(regs); \ > + u32 vector = (u32)(u8)error_code; \ > + \ > + instrumentation_begin(); \ > + kvm_set_cpu_l1tf_flush_l1d(); \ > + run_irq_on_irqstack_cond(__##func, regs, vector); \ > + instrumentation_end(); \ > + irqentry_exit_hv_cond(regs, state); \ > +} \ > + \ > +static noinline void __##func(struct pt_regs *regs, u32 vector) > +#endif > > /** > * DECLARE_IDTENTRY_SYSVEC - Declare functions for system vector entry points > @@ -221,6 +246,7 @@ static noinline void __##func(struct pt_regs *regs, u32 vector) > #define DECLARE_IDTENTRY_SYSVEC(vector, func) \ > DECLARE_IDTENTRY(vector, func) > > +#ifndef CONFIG_AMD_MEM_ENCRYPT > /** > * DEFINE_IDTENTRY_SYSVEC - Emit code for system vector IDT entry points > * @func: Function name of the entry point > @@ -245,6 +271,26 @@ __visible noinstr void func(struct pt_regs *regs) \ > } \ > \ > static noinline void __##func(struct pt_regs *regs) > +#else > + > +#define DEFINE_IDTENTRY_SYSVEC(func) \ > +static void __##func(struct pt_regs *regs); \ > + \ > +__visible noinstr void func(struct pt_regs *regs) \ > +{ \ > + irqentry_state_t state = irqentry_enter(regs); \ > + \ > + instrumentation_begin(); \ > + kvm_set_cpu_l1tf_flush_l1d(); \ > + run_sysvec_on_irqstack_cond(__##func, regs); \ > + instrumentation_end(); \ > + irqentry_exit_hv_cond(regs, state); \ > +} \ > + \ > +static noinline void __##func(struct pt_regs *regs) > +#endif > + > +#ifndef CONFIG_AMD_MEM_ENCRYPT > > /** > * DEFINE_IDTENTRY_SYSVEC_SIMPLE - Emit code for simple system vector IDT > @@ -274,6 +320,26 @@ __visible noinstr void func(struct pt_regs *regs) \ > } \ > \ > static __always_inline void __##func(struct pt_regs *regs) > +#else > + > +#define DEFINE_IDTENTRY_SYSVEC_SIMPLE(func) \ > +static __always_inline void __##func(struct pt_regs *regs); \ > + \ > +__visible noinstr void func(struct pt_regs *regs) \ > +{ \ > + irqentry_state_t state = irqentry_enter(regs); \ > + \ > + instrumentation_begin(); \ > + __irq_enter_raw(); \ > + kvm_set_cpu_l1tf_flush_l1d(); \ > + __##func(regs); \ > + __irq_exit_raw(); \ > + instrumentation_end(); \ > + irqentry_exit_hv_cond(regs, state); \ > +} \ > + \ > +static __always_inline void __##func(struct pt_regs *regs) > +#endif > > /** > * DECLARE_IDTENTRY_XENCB - Declare functions for XEN HV callback entry point > diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c > index b1a98c2a52f8..23f15e95838b 100644 > --- a/arch/x86/kernel/sev.c > +++ b/arch/x86/kernel/sev.c > @@ -147,6 +147,10 @@ struct sev_hv_doorbell_page { > > struct sev_snp_runtime_data { > struct sev_hv_doorbell_page hv_doorbell_page; > + /* > + * Indication that we are currently handling #HV events. > + */ > + bool hv_handling_events; > }; > > static DEFINE_PER_CPU(struct sev_snp_runtime_data*, snp_runtime_data); > @@ -200,6 +204,8 @@ static void do_exc_hv(struct pt_regs *regs) > union hv_pending_events pending_events; > u8 vector; > > + this_cpu_read(snp_runtime_data)->hv_handling_events = true; > + > while (sev_hv_pending()) { > pending_events.events = xchg( > &sev_snp_current_doorbell_page()->pending_events.events, > @@ -234,6 +240,8 @@ static void do_exc_hv(struct pt_regs *regs) > common_interrupt(regs, pending_events.vector); > } > } > + > + this_cpu_read(snp_runtime_data)->hv_handling_events = false; > } > > static __always_inline bool on_vc_stack(struct pt_regs *regs) > @@ -2529,3 +2537,25 @@ static int __init snp_init_platform_device(void) > return 0; > } > device_initcall(snp_init_platform_device); > + > +noinstr void irqentry_exit_hv_cond(struct pt_regs *regs, irqentry_state_t state) > +{ This code path is being called even for the guest without SNP. Ran a SEV guest and guest crashed in this code path. Checking & returning made guest (non SNP) to boot with some call traces. But this branch needs to be avoided for non-SNP guests and host as well. Thanks, Pankaj +++ b/arch/x86/kernel/sev.c @@ -2540,6 +2540,9 @@ device_initcall(snp_init_platform_device); noinstr void irqentry_exit_hv_cond(struct pt_regs *regs, irqentry_state_t state) { + + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + return; > + /* > + * Check whether this returns to user mode, if so and if > + * we are currently executing the #HV handler then we don't > + * want to follow the irqentry_exit_to_user_mode path as > + * that can potentially cause the #HV handler to be > + * preempted and rescheduled on another CPU. Rescheduled #HV > + * handler on another cpu will cause interrupts to be handled > + * on a different cpu than the injected one, causing > + * invalid EOIs and missed/lost guest interrupts and > + * corresponding hangs and/or per-cpu IRQs handled on > + * non-intended cpu. > + */ > + if (user_mode(regs) && > + this_cpu_read(snp_runtime_data)->hv_handling_events) > + return; > + > + /* follow normal interrupt return/exit path */ > + irqentry_exit(regs, state); > +}
On 2/22/2023 12:44 AM, Gupta, Pankaj wrote: >> @@ -2529,3 +2537,25 @@ static int __init snp_init_platform_device(void) >> return 0; >> } >> device_initcall(snp_init_platform_device); >> + >> +noinstr void irqentry_exit_hv_cond(struct pt_regs *regs, >> irqentry_state_t state) >> +{ > > This code path is being called even for the guest without SNP. Ran > a SEV guest and guest crashed in this code path. Checking & returning > made guest (non SNP) to boot with some call traces. But this branch > needs to be avoided for non-SNP guests and host as well. > Nice catch! I will fix it in the next version. Thanks.
On Sat, 21 Jan 2023 21:46:06 -0500 Tianyu Lan <ltykernel@gmail.com> wrote: > From: Ashish Kalra <ashish.kalra@amd.com> > > Add checks in interrupt exit code paths in case of returns > to user mode to check if currently executing the #HV handler > then don't follow the irqentry_exit_to_user_mode path as > that can potentially cause the #HV handler to be > preempted and rescheduled on another CPU. Rescheduled #HV > handler on another cpu will cause interrupts to be handled > on a different cpu than the injected one, causing > invalid EOIs and missed/lost guest interrupts and > corresponding hangs and/or per-cpu IRQs handled on > non-intended cpu. > Why doesn't this problem happen in #VC handler? As #VC handler doesn't have this special handling. > Signed-off-by: Ashish Kalra <ashish.kalra@amd.com> > --- > arch/x86/include/asm/idtentry.h | 66 +++++++++++++++++++++++++++++++++ > arch/x86/kernel/sev.c | 30 +++++++++++++++ > 2 files changed, 96 insertions(+) > > diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h > index 652fea10d377..45b47132be7c 100644 > --- a/arch/x86/include/asm/idtentry.h > +++ b/arch/x86/include/asm/idtentry.h > @@ -13,6 +13,10 @@ > > #include <asm/irq_stack.h> > > +#ifdef CONFIG_AMD_MEM_ENCRYPT > +noinstr void irqentry_exit_hv_cond(struct pt_regs *regs, irqentry_state_t state); > +#endif > + > /** > * DECLARE_IDTENTRY - Declare functions for simple IDT entry points > * No error code pushed by hardware > @@ -176,6 +180,7 @@ __visible noinstr void func(struct pt_regs *regs, unsigned long error_code) > #define DECLARE_IDTENTRY_IRQ(vector, func) \ > DECLARE_IDTENTRY_ERRORCODE(vector, func) > > +#ifndef CONFIG_AMD_MEM_ENCRYPT > /** > * DEFINE_IDTENTRY_IRQ - Emit code for device interrupt IDT entry points > * @func: Function name of the entry point > @@ -205,6 +210,26 @@ __visible noinstr void func(struct pt_regs *regs, \ > } \ > \ > static noinline void __##func(struct pt_regs *regs, u32 vector) > +#else > + > +#define DEFINE_IDTENTRY_IRQ(func) \ > +static void __##func(struct pt_regs *regs, u32 vector); \ > + \ > +__visible noinstr void func(struct pt_regs *regs, \ > + unsigned long error_code) \ > +{ \ > + irqentry_state_t state = irqentry_enter(regs); \ > + u32 vector = (u32)(u8)error_code; \ > + \ > + instrumentation_begin(); \ > + kvm_set_cpu_l1tf_flush_l1d(); \ > + run_irq_on_irqstack_cond(__##func, regs, vector); \ > + instrumentation_end(); \ > + irqentry_exit_hv_cond(regs, state); \ > +} \ > + \ > +static noinline void __##func(struct pt_regs *regs, u32 vector) > +#endif > > /** > * DECLARE_IDTENTRY_SYSVEC - Declare functions for system vector entry points > @@ -221,6 +246,7 @@ static noinline void __##func(struct pt_regs *regs, u32 vector) > #define DECLARE_IDTENTRY_SYSVEC(vector, func) \ > DECLARE_IDTENTRY(vector, func) > > +#ifndef CONFIG_AMD_MEM_ENCRYPT > /** > * DEFINE_IDTENTRY_SYSVEC - Emit code for system vector IDT entry points > * @func: Function name of the entry point > @@ -245,6 +271,26 @@ __visible noinstr void func(struct pt_regs *regs) \ > } \ > \ > static noinline void __##func(struct pt_regs *regs) > +#else > + > +#define DEFINE_IDTENTRY_SYSVEC(func) \ > +static void __##func(struct pt_regs *regs); \ > + \ > +__visible noinstr void func(struct pt_regs *regs) \ > +{ \ > + irqentry_state_t state = irqentry_enter(regs); \ > + \ > + instrumentation_begin(); \ > + kvm_set_cpu_l1tf_flush_l1d(); \ > + run_sysvec_on_irqstack_cond(__##func, regs); \ > + instrumentation_end(); \ > + irqentry_exit_hv_cond(regs, state); \ > +} \ > + \ > +static noinline void __##func(struct pt_regs *regs) > +#endif > + > +#ifndef CONFIG_AMD_MEM_ENCRYPT > > /** > * DEFINE_IDTENTRY_SYSVEC_SIMPLE - Emit code for simple system vector IDT > @@ -274,6 +320,26 @@ __visible noinstr void func(struct pt_regs *regs) \ > } \ > \ > static __always_inline void __##func(struct pt_regs *regs) > +#else > + > +#define DEFINE_IDTENTRY_SYSVEC_SIMPLE(func) \ > +static __always_inline void __##func(struct pt_regs *regs); \ > + \ > +__visible noinstr void func(struct pt_regs *regs) \ > +{ \ > + irqentry_state_t state = irqentry_enter(regs); \ > + \ > + instrumentation_begin(); \ > + __irq_enter_raw(); \ > + kvm_set_cpu_l1tf_flush_l1d(); \ > + __##func(regs); \ > + __irq_exit_raw(); \ > + instrumentation_end(); \ > + irqentry_exit_hv_cond(regs, state); \ > +} \ > + \ > +static __always_inline void __##func(struct pt_regs *regs) > +#endif > > /** > * DECLARE_IDTENTRY_XENCB - Declare functions for XEN HV callback entry point > diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c > index b1a98c2a52f8..23f15e95838b 100644 > --- a/arch/x86/kernel/sev.c > +++ b/arch/x86/kernel/sev.c > @@ -147,6 +147,10 @@ struct sev_hv_doorbell_page { > > struct sev_snp_runtime_data { > struct sev_hv_doorbell_page hv_doorbell_page; > + /* > + * Indication that we are currently handling #HV events. > + */ > + bool hv_handling_events; > }; > > static DEFINE_PER_CPU(struct sev_snp_runtime_data*, snp_runtime_data); > @@ -200,6 +204,8 @@ static void do_exc_hv(struct pt_regs *regs) > union hv_pending_events pending_events; > u8 vector; > > + this_cpu_read(snp_runtime_data)->hv_handling_events = true; > + > while (sev_hv_pending()) { > pending_events.events = xchg( > &sev_snp_current_doorbell_page()->pending_events.events, > @@ -234,6 +240,8 @@ static void do_exc_hv(struct pt_regs *regs) > common_interrupt(regs, pending_events.vector); > } > } > + > + this_cpu_read(snp_runtime_data)->hv_handling_events = false; > } > > static __always_inline bool on_vc_stack(struct pt_regs *regs) > @@ -2529,3 +2537,25 @@ static int __init snp_init_platform_device(void) > return 0; > } > device_initcall(snp_init_platform_device); > + > +noinstr void irqentry_exit_hv_cond(struct pt_regs *regs, irqentry_state_t state) > +{ > + /* > + * Check whether this returns to user mode, if so and if > + * we are currently executing the #HV handler then we don't > + * want to follow the irqentry_exit_to_user_mode path as > + * that can potentially cause the #HV handler to be > + * preempted and rescheduled on another CPU. Rescheduled #HV > + * handler on another cpu will cause interrupts to be handled > + * on a different cpu than the injected one, causing > + * invalid EOIs and missed/lost guest interrupts and > + * corresponding hangs and/or per-cpu IRQs handled on > + * non-intended cpu. > + */ > + if (user_mode(regs) && > + this_cpu_read(snp_runtime_data)->hv_handling_events) > + return; > + > + /* follow normal interrupt return/exit path */ > + irqentry_exit(regs, state); > +}
On 2/2/2023 5:20 PM, Zhi Wang wrote: > On Sat, 21 Jan 2023 21:46:06 -0500 > Tianyu Lan <ltykernel@gmail.com> wrote: > >> From: Ashish Kalra <ashish.kalra@amd.com> >> >> Add checks in interrupt exit code paths in case of returns >> to user mode to check if currently executing the #HV handler >> then don't follow the irqentry_exit_to_user_mode path as >> that can potentially cause the #HV handler to be >> preempted and rescheduled on another CPU. Rescheduled #HV >> handler on another cpu will cause interrupts to be handled >> on a different cpu than the injected one, causing >> invalid EOIs and missed/lost guest interrupts and >> corresponding hangs and/or per-cpu IRQs handled on >> non-intended cpu. >> > > Why doesn't this problem happen in #VC handler? As #VC handler doesn't have > this special handling. > Because the #VC handler does not invoke common_interrupt() handler to do IRQ processing. Doing IRQ handling is specific to #HV exception handler as all guest interrupt handling is invoked from #HV exception handler once restricted interrupt injection support is enabled. Thanks, Ashish >> Signed-off-by: Ashish Kalra <ashish.kalra@amd.com> >> --- >> arch/x86/include/asm/idtentry.h | 66 +++++++++++++++++++++++++++++++++ >> arch/x86/kernel/sev.c | 30 +++++++++++++++ >> 2 files changed, 96 insertions(+) >> >> diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h >> index 652fea10d377..45b47132be7c 100644 >> --- a/arch/x86/include/asm/idtentry.h >> +++ b/arch/x86/include/asm/idtentry.h >> @@ -13,6 +13,10 @@ >> >> #include <asm/irq_stack.h> >> >> +#ifdef CONFIG_AMD_MEM_ENCRYPT >> +noinstr void irqentry_exit_hv_cond(struct pt_regs *regs, irqentry_state_t state); >> +#endif >> + >> /** >> * DECLARE_IDTENTRY - Declare functions for simple IDT entry points >> * No error code pushed by hardware >> @@ -176,6 +180,7 @@ __visible noinstr void func(struct pt_regs *regs, unsigned long error_code) >> #define DECLARE_IDTENTRY_IRQ(vector, func) \ >> DECLARE_IDTENTRY_ERRORCODE(vector, func) >> >> +#ifndef CONFIG_AMD_MEM_ENCRYPT >> /** >> * DEFINE_IDTENTRY_IRQ - Emit code for device interrupt IDT entry points >> * @func: Function name of the entry point >> @@ -205,6 +210,26 @@ __visible noinstr void func(struct pt_regs *regs, \ >> } \ >> \ >> static noinline void __##func(struct pt_regs *regs, u32 vector) >> +#else >> + >> +#define DEFINE_IDTENTRY_IRQ(func) \ >> +static void __##func(struct pt_regs *regs, u32 vector); \ >> + \ >> +__visible noinstr void func(struct pt_regs *regs, \ >> + unsigned long error_code) \ >> +{ \ >> + irqentry_state_t state = irqentry_enter(regs); \ >> + u32 vector = (u32)(u8)error_code; \ >> + \ >> + instrumentation_begin(); \ >> + kvm_set_cpu_l1tf_flush_l1d(); \ >> + run_irq_on_irqstack_cond(__##func, regs, vector); \ >> + instrumentation_end(); \ >> + irqentry_exit_hv_cond(regs, state); \ >> +} \ >> + \ >> +static noinline void __##func(struct pt_regs *regs, u32 vector) >> +#endif >> >> /** >> * DECLARE_IDTENTRY_SYSVEC - Declare functions for system vector entry points >> @@ -221,6 +246,7 @@ static noinline void __##func(struct pt_regs *regs, u32 vector) >> #define DECLARE_IDTENTRY_SYSVEC(vector, func) \ >> DECLARE_IDTENTRY(vector, func) >> >> +#ifndef CONFIG_AMD_MEM_ENCRYPT >> /** >> * DEFINE_IDTENTRY_SYSVEC - Emit code for system vector IDT entry points >> * @func: Function name of the entry point >> @@ -245,6 +271,26 @@ __visible noinstr void func(struct pt_regs *regs) \ >> } \ >> \ >> static noinline void __##func(struct pt_regs *regs) >> +#else >> + >> +#define DEFINE_IDTENTRY_SYSVEC(func) \ >> +static void __##func(struct pt_regs *regs); \ >> + \ >> +__visible noinstr void func(struct pt_regs *regs) \ >> +{ \ >> + irqentry_state_t state = irqentry_enter(regs); \ >> + \ >> + instrumentation_begin(); \ >> + kvm_set_cpu_l1tf_flush_l1d(); \ >> + run_sysvec_on_irqstack_cond(__##func, regs); \ >> + instrumentation_end(); \ >> + irqentry_exit_hv_cond(regs, state); \ >> +} \ >> + \ >> +static noinline void __##func(struct pt_regs *regs) >> +#endif >> + >> +#ifndef CONFIG_AMD_MEM_ENCRYPT >> >> /** >> * DEFINE_IDTENTRY_SYSVEC_SIMPLE - Emit code for simple system vector IDT >> @@ -274,6 +320,26 @@ __visible noinstr void func(struct pt_regs *regs) \ >> } \ >> \ >> static __always_inline void __##func(struct pt_regs *regs) >> +#else >> + >> +#define DEFINE_IDTENTRY_SYSVEC_SIMPLE(func) \ >> +static __always_inline void __##func(struct pt_regs *regs); \ >> + \ >> +__visible noinstr void func(struct pt_regs *regs) \ >> +{ \ >> + irqentry_state_t state = irqentry_enter(regs); \ >> + \ >> + instrumentation_begin(); \ >> + __irq_enter_raw(); \ >> + kvm_set_cpu_l1tf_flush_l1d(); \ >> + __##func(regs); \ >> + __irq_exit_raw(); \ >> + instrumentation_end(); \ >> + irqentry_exit_hv_cond(regs, state); \ >> +} \ >> + \ >> +static __always_inline void __##func(struct pt_regs *regs) >> +#endif >> >> /** >> * DECLARE_IDTENTRY_XENCB - Declare functions for XEN HV callback entry point >> diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c >> index b1a98c2a52f8..23f15e95838b 100644 >> --- a/arch/x86/kernel/sev.c >> +++ b/arch/x86/kernel/sev.c >> @@ -147,6 +147,10 @@ struct sev_hv_doorbell_page { >> >> struct sev_snp_runtime_data { >> struct sev_hv_doorbell_page hv_doorbell_page; >> + /* >> + * Indication that we are currently handling #HV events. >> + */ >> + bool hv_handling_events; >> }; >> >> static DEFINE_PER_CPU(struct sev_snp_runtime_data*, snp_runtime_data); >> @@ -200,6 +204,8 @@ static void do_exc_hv(struct pt_regs *regs) >> union hv_pending_events pending_events; >> u8 vector; >> >> + this_cpu_read(snp_runtime_data)->hv_handling_events = true; >> + >> while (sev_hv_pending()) { >> pending_events.events = xchg( >> &sev_snp_current_doorbell_page()->pending_events.events, >> @@ -234,6 +240,8 @@ static void do_exc_hv(struct pt_regs *regs) >> common_interrupt(regs, pending_events.vector); >> } >> } >> + >> + this_cpu_read(snp_runtime_data)->hv_handling_events = false; >> } >> >> static __always_inline bool on_vc_stack(struct pt_regs *regs) >> @@ -2529,3 +2537,25 @@ static int __init snp_init_platform_device(void) >> return 0; >> } >> device_initcall(snp_init_platform_device); >> + >> +noinstr void irqentry_exit_hv_cond(struct pt_regs *regs, irqentry_state_t state) >> +{ >> + /* >> + * Check whether this returns to user mode, if so and if >> + * we are currently executing the #HV handler then we don't >> + * want to follow the irqentry_exit_to_user_mode path as >> + * that can potentially cause the #HV handler to be >> + * preempted and rescheduled on another CPU. Rescheduled #HV >> + * handler on another cpu will cause interrupts to be handled >> + * on a different cpu than the injected one, causing >> + * invalid EOIs and missed/lost guest interrupts and >> + * corresponding hangs and/or per-cpu IRQs handled on >> + * non-intended cpu. >> + */ >> + if (user_mode(regs) && >> + this_cpu_read(snp_runtime_data)->hv_handling_events) >> + return; >> + >> + /* follow normal interrupt return/exit path */ >> + irqentry_exit(regs, state); >> +} >
© 2016 - 2025 Red Hat, Inc.