From: Ira Weiny <ira.weiny@intel.com>
Auxiliary pt_regs space needs to be manipulated by the generic
entry/exit code.
Ideally irqentry_exit() would take care of handling any auxiliary
pt_regs on exit. Unfortunately, irqentry_exit() is not the only exit
from exception path. The call to irqentry_exit_cond_resched() from
xen_pv_evtchn_do_upcall() bypasses irqentry_exit().
Make irqentry_exit_cond_resched() symmetrical with irqentry_enter() by
passing pt_regs to it. This makes irqentry_exit_cond_resched() capable
of handling auxiliary pt_regs in future patches.
Cc: Rik van Riel <riel@surriel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Borislav Petkov <bp@alien8.de>
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
---
Forward ported from PKS series:
https://lore.kernel.org/lkml/20220419170649.1022246-19-ira.weiny@intel.com/
---
arch/arm64/include/asm/preempt.h | 2 +-
arch/arm64/kernel/entry-common.c | 4 ++--
arch/x86/entry/common.c | 2 +-
include/linux/entry-common.h | 17 ++++++++------
kernel/entry/common.c | 13 +++++++----
kernel/sched/core.c | 40 ++++++++++++++++----------------
6 files changed, 43 insertions(+), 35 deletions(-)
diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
index 0159b625cc7f..bd185a214096 100644
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -87,7 +87,7 @@ void preempt_schedule_notrace(void);
#ifdef CONFIG_PREEMPT_DYNAMIC
-DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched_internal);
void dynamic_preempt_schedule(void);
#define __preempt_schedule() dynamic_preempt_schedule()
void dynamic_preempt_schedule_notrace(void);
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index c75ca36b4a49..a1cc8795b729 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -224,9 +224,9 @@ static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
}
#ifdef CONFIG_PREEMPT_DYNAMIC
-DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched_internal);
#define need_irq_preemption() \
- (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
+ (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched_internal))
#else
#define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION))
#endif
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 6c2826417b33..f1ba770d035d 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -309,7 +309,7 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
inhcall = get_and_clear_inhcall();
if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
- irqentry_exit_cond_resched();
+ irqentry_exit_cond_resched(regs);
instrumentation_end();
restore_inhcall(inhcall);
} else {
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index 84a466b176cf..976cce7cf803 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -412,23 +412,26 @@ irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
/**
* irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt
+ * @regs: Pointer to pt_regs of interrupted context
*
* Conditional reschedule with additional sanity checks.
*/
+void irqentry_exit_cond_resched(struct pt_regs *regs);
+
void raw_irqentry_exit_cond_resched(void);
#ifdef CONFIG_PREEMPT_DYNAMIC
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched
-#define irqentry_exit_cond_resched_dynamic_disabled NULL
-DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
-#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)()
+#define irqentry_exit_cond_resched_internal_dynamic_enabled raw_irqentry_exit_cond_resched
+#define irqentry_exit_cond_resched_internal_dynamic_disabled NULL
+DECLARE_STATIC_CALL(irqentry_exit_cond_resched_internal, raw_irqentry_exit_cond_resched);
+#define irqentry_exit_cond_resched_internal() static_call(irqentry_exit_cond_resched_internal)()
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched_internal);
void dynamic_irqentry_exit_cond_resched(void);
-#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched()
+#define irqentry_exit_cond_resched_internal() dynamic_irqentry_exit_cond_resched()
#endif
#else /* CONFIG_PREEMPT_DYNAMIC */
-#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
+#define irqentry_exit_cond_resched_internal() raw_irqentry_exit_cond_resched()
#endif /* CONFIG_PREEMPT_DYNAMIC */
/**
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index 063068a9ea9b..8c0f334c4b75 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -387,18 +387,23 @@ void raw_irqentry_exit_cond_resched(void)
}
#ifdef CONFIG_PREEMPT_DYNAMIC
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
+DEFINE_STATIC_CALL(irqentry_exit_cond_resched_internal, raw_irqentry_exit_cond_resched);
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched_internal);
void dynamic_irqentry_exit_cond_resched(void)
{
- if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
+ if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched_internal))
return;
raw_irqentry_exit_cond_resched();
}
#endif
#endif
+void irqentry_exit_cond_resched(struct pt_regs *regs)
+{
+ irqentry_exit_cond_resched_internal();
+}
+
noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
{
lockdep_assert_irqs_disabled();
@@ -425,7 +430,7 @@ noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
instrumentation_begin();
if (IS_ENABLED(CONFIG_PREEMPTION))
- irqentry_exit_cond_resched();
+ irqentry_exit_cond_resched_internal();
/* Covers both tracing and lockdep */
trace_hardirqs_on();
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 189999007f32..38dd74ba1c86 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8419,29 +8419,29 @@ EXPORT_SYMBOL(__cond_resched_rwlock_write);
* SC:might_resched
* SC:preempt_schedule
* SC:preempt_schedule_notrace
- * SC:irqentry_exit_cond_resched
+ * SC:irqentry_exit_cond_resched_internal
*
*
* NONE:
- * cond_resched <- __cond_resched
- * might_resched <- RET0
- * preempt_schedule <- NOP
- * preempt_schedule_notrace <- NOP
- * irqentry_exit_cond_resched <- NOP
+ * cond_resched <- __cond_resched
+ * might_resched <- RET0
+ * preempt_schedule <- NOP
+ * preempt_schedule_notrace <- NOP
+ * irqentry_exit_cond_resched_internal <- NOP
*
* VOLUNTARY:
- * cond_resched <- __cond_resched
- * might_resched <- __cond_resched
- * preempt_schedule <- NOP
- * preempt_schedule_notrace <- NOP
- * irqentry_exit_cond_resched <- NOP
+ * cond_resched <- __cond_resched
+ * might_resched <- __cond_resched
+ * preempt_schedule <- NOP
+ * preempt_schedule_notrace <- NOP
+ * irqentry_exit_cond_resched_internal <- NOP
*
* FULL:
- * cond_resched <- RET0
- * might_resched <- RET0
- * preempt_schedule <- preempt_schedule
- * preempt_schedule_notrace <- preempt_schedule_notrace
- * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
+ * cond_resched <- RET0
+ * might_resched <- RET0
+ * preempt_schedule <- preempt_schedule
+ * preempt_schedule_notrace <- preempt_schedule_notrace
+ * irqentry_exit_cond_resched_internal <- irqentry_exit_cond_resched_internal
*/
enum {
@@ -8487,7 +8487,7 @@ void sched_dynamic_update(int mode)
preempt_dynamic_enable(might_resched);
preempt_dynamic_enable(preempt_schedule);
preempt_dynamic_enable(preempt_schedule_notrace);
- preempt_dynamic_enable(irqentry_exit_cond_resched);
+ preempt_dynamic_enable(irqentry_exit_cond_resched_internal);
switch (mode) {
case preempt_dynamic_none:
@@ -8495,7 +8495,7 @@ void sched_dynamic_update(int mode)
preempt_dynamic_disable(might_resched);
preempt_dynamic_disable(preempt_schedule);
preempt_dynamic_disable(preempt_schedule_notrace);
- preempt_dynamic_disable(irqentry_exit_cond_resched);
+ preempt_dynamic_disable(irqentry_exit_cond_resched_internal);
pr_info("Dynamic Preempt: none\n");
break;
@@ -8504,7 +8504,7 @@ void sched_dynamic_update(int mode)
preempt_dynamic_enable(might_resched);
preempt_dynamic_disable(preempt_schedule);
preempt_dynamic_disable(preempt_schedule_notrace);
- preempt_dynamic_disable(irqentry_exit_cond_resched);
+ preempt_dynamic_disable(irqentry_exit_cond_resched_internal);
pr_info("Dynamic Preempt: voluntary\n");
break;
@@ -8513,7 +8513,7 @@ void sched_dynamic_update(int mode)
preempt_dynamic_disable(might_resched);
preempt_dynamic_enable(preempt_schedule);
preempt_dynamic_enable(preempt_schedule_notrace);
- preempt_dynamic_enable(irqentry_exit_cond_resched);
+ preempt_dynamic_enable(irqentry_exit_cond_resched_internal);
pr_info("Dynamic Preempt: full\n");
break;
}
--
2.35.3
On Fri, Aug 05, 2022 at 10:30:05AM -0700, ira.weiny@intel.com wrote:
> From: Ira Weiny <ira.weiny@intel.com>
>
> Auxiliary pt_regs space needs to be manipulated by the generic
> entry/exit code.
>
> Ideally irqentry_exit() would take care of handling any auxiliary
> pt_regs on exit. Unfortunately, irqentry_exit() is not the only exit
> from exception path. The call to irqentry_exit_cond_resched() from
> xen_pv_evtchn_do_upcall() bypasses irqentry_exit().
>
> Make irqentry_exit_cond_resched() symmetrical with irqentry_enter() by
> passing pt_regs to it. This makes irqentry_exit_cond_resched() capable
> of handling auxiliary pt_regs in future patches.
>
> Cc: Rik van Riel <riel@surriel.com>
> Cc: Dave Hansen <dave.hansen@linux.intel.com>
> Cc: Borislav Petkov <bp@alien8.de>
> Signed-off-by: Ira Weiny <ira.weiny@intel.com>
>
> ---
> Forward ported from PKS series:
> https://lore.kernel.org/lkml/20220419170649.1022246-19-ira.weiny@intel.com/
> ---
> arch/arm64/include/asm/preempt.h | 2 +-
> arch/arm64/kernel/entry-common.c | 4 ++--
> arch/x86/entry/common.c | 2 +-
> include/linux/entry-common.h | 17 ++++++++------
> kernel/entry/common.c | 13 +++++++----
> kernel/sched/core.c | 40 ++++++++++++++++----------------
> 6 files changed, 43 insertions(+), 35 deletions(-)
Why all this churn?
Why can't you add a parameter to irqentry_exit():
noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state, bool cond_resched);
and then have all callers except xen_pv_evtchn_do_upcall() pass in false
and this way have all exit paths end up in irqentry_exit()?
And, ofc, move the true case which is the body of
raw_irqentry_exit_cond_resched() to irqentry_exit() and then get rid of
former.
xen_pv_evtchn_do_upcall() will, ofc, do:
if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
irqentry_exit(regs, state, true);
instrumentation_end();
restore_inhcall(inhcall);
} else {
instrumentation_end();
irqentry_exit(regs, state, false);
Hmmm?
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
On Mon, Aug 08 2022 at 12:38, Borislav Petkov wrote:
> On Fri, Aug 05, 2022 at 10:30:05AM -0700, ira.weiny@intel.com wrote:
>> ---
>> arch/arm64/include/asm/preempt.h | 2 +-
>> arch/arm64/kernel/entry-common.c | 4 ++--
>> arch/x86/entry/common.c | 2 +-
>> include/linux/entry-common.h | 17 ++++++++------
>> kernel/entry/common.c | 13 +++++++----
>> kernel/sched/core.c | 40 ++++++++++++++++----------------
>> 6 files changed, 43 insertions(+), 35 deletions(-)
>
> Why all this churn?
>
> Why can't you add a parameter to irqentry_exit():
>
> noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state, bool cond_resched);
>
> and then have all callers except xen_pv_evtchn_do_upcall() pass in false
> and this way have all exit paths end up in irqentry_exit()?
>
> And, ofc, move the true case which is the body of
> raw_irqentry_exit_cond_resched() to irqentry_exit() and then get rid of
> former.
>
> xen_pv_evtchn_do_upcall() will, ofc, do:
>
> if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
> irqentry_exit(regs, state, true);
> instrumentation_end();
> restore_inhcall(inhcall);
> } else {
> instrumentation_end();
> irqentry_exit(regs, state, false);
>
How is that less churn? Care to do:
git grep 'irqentry_exit(' arch/
The real question is:
Why is it required that irqentry_exit_cond_resched() handles any of
the auxiliary pt regs space?
That's completely unanswered by the changelog and absolutely irrelevant
for the problem at hand, i.e. storing the CPU number on irq/exception
entry.
So why is this patch in this series at all?
But even for future purposes it is more than questionable. Why?
Contrary to the claim of the changelog xen_pv_evtchn_do_upcall() is not
really a bypass of irqentry_exit(). The point is:
The hypercall is issued by the kernel from privcmd_ioctl_hypercall()
which does:
xen_preemptible_hcall_begin();
hypercall();
xen_preemptible_hcall_end();
So any upcall from the hypervisor to the guest will semantically hit
regular interrupt enabled guest kernel space which means that if that
code would call irqentry_exit() then it would run through the kernel
exit code path:
} else if (!regs_irqs_disabled(regs)) {
instrumentation_begin();
if (IS_ENABLED(CONFIG_PREEMPTION))
irqentry_exit_cond_resched();
/* Covers both tracing and lockdep */
trace_hardirqs_on();
instrumentation_end();
} ....
Which would fail to invoke irqentry_exit_cond_resched() if
CONFIG_PREEMPTION=n. But the whole point of this exercise is to allow
preemption from the upcall even when the kernel has CONFIG_PREEMPTION=n.
Staring at this XENPV code there are two issues:
1) That code lacks a trace_hardirqs_on() after the call to
irqentry_exit_cond_resched(). My bad.
2) CONFIG_PREEMPT_DYNAMIC broke this mechanism.
If the static call/key is disabled then the call becomes a NOP.
Frederic?
Both clearly demonstrate how well tested this XEN_PV muck is which means
we should just delete it.
Anyway. This wants the fix below.
If there is still a need to do anything about this XEN cond_resched()
muck for the PREEMPTION=n case for future auxregs usage then this can be
simply hidden in this new XEN helper and does not need any change to the
rest of the code.
I doubt that this is required, but if so then there needs to be a very
concise explanation and not just uncomprehensible hand waving word
salad.
Thanks,
tglx
---
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -283,9 +283,18 @@ static __always_inline void restore_inhc
{
__this_cpu_write(xen_in_preemptible_hcall, inhcall);
}
+
+static __always_inline void xenpv_irqentry_exit_cond_resched(void)
+{
+ instrumentation_begin();
+ raw_irqentry_exit_cond_resched();
+ trace_hardirqs_on();
+ instrumentation_end();
+}
#else
static __always_inline bool get_and_clear_inhcall(void) { return false; }
static __always_inline void restore_inhcall(bool inhcall) { }
+static __always_inline void xenpv_irqentry_exit_cond_resched(void) { }
#endif
static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
@@ -306,11 +315,11 @@ static void __xen_pv_evtchn_do_upcall(st
instrumentation_begin();
run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
+ instrumentation_end();
inhcall = get_and_clear_inhcall();
if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
- irqentry_exit_cond_resched();
- instrumentation_end();
+ xenpv_irqentry_exit_cond_resched();
restore_inhcall(inhcall);
} else {
instrumentation_end();
On Wed, Aug 10 2022 at 01:18, Thomas Gleixner wrote:
> Both clearly demonstrate how well tested this XEN_PV muck is which means
> we should just delete it.
>
> Anyway. This wants the fix below.
which is not sufficient. If CONFIG_PREEMPT_DYNAMIC is set then
CONFIG_PREEMPTION is set too, which makes the preemtible hypercall magic
a complete NOP. So if the dynamic call is disabled, then such a several
seconds (sic!) hypercall cannot be preempted at all.
Something like the below then. Oh well...
Thanks,
tglx
---
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -253,7 +253,8 @@ SYSCALL_DEFINE0(ni_syscall)
}
#ifdef CONFIG_XEN_PV
-#ifndef CONFIG_PREEMPTION
+
+#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
/*
* Some hypercalls issued by the toolstack can take many 10s of
* seconds. Allow tasks running hypercalls via the privcmd driver to
@@ -283,9 +284,18 @@ static __always_inline void restore_inhc
{
__this_cpu_write(xen_in_preemptible_hcall, inhcall);
}
+
+static __always_inline void xenpv_irqentry_exit_cond_resched(void)
+{
+ instrumentation_begin();
+ raw_irqentry_exit_cond_resched();
+ trace_hardirqs_on();
+ instrumentation_end();
+}
#else
static __always_inline bool get_and_clear_inhcall(void) { return false; }
static __always_inline void restore_inhcall(bool inhcall) { }
+static __always_inline void xenpv_irqentry_exit_cond_resched(void) { }
#endif
static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
@@ -306,11 +316,11 @@ static void __xen_pv_evtchn_do_upcall(st
instrumentation_begin();
run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
+ instrumentation_end();
inhcall = get_and_clear_inhcall();
if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
- irqentry_exit_cond_resched();
- instrumentation_end();
+ xenpv_irqentry_exit_cond_resched();
restore_inhcall(inhcall);
} else {
instrumentation_end();
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -416,6 +416,7 @@ irqentry_state_t noinstr irqentry_enter(
* Conditional reschedule with additional sanity checks.
*/
void raw_irqentry_exit_cond_resched(void);
+
#ifdef CONFIG_PREEMPT_DYNAMIC
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -194,7 +194,7 @@ bool xen_running_on_version_or_later(uns
void xen_efi_runtime_setup(void);
-#if defined(CONFIG_XEN_PV) && !defined(CONFIG_PREEMPTION)
+#if defined(CONFIG_XEN_PV) && (!defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC))
DECLARE_PER_CPU(bool, xen_in_preemptible_hcall);
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -385,6 +385,7 @@ void raw_irqentry_exit_cond_resched(void
preempt_schedule_irq();
}
}
+
#ifdef CONFIG_PREEMPT_DYNAMIC
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
On Mon, Aug 08, 2022 at 12:38:08PM +0200, Borislav Petkov wrote:
> On Fri, Aug 05, 2022 at 10:30:05AM -0700, ira.weiny@intel.com wrote:
> > From: Ira Weiny <ira.weiny@intel.com>
> >
> > Auxiliary pt_regs space needs to be manipulated by the generic
> > entry/exit code.
> >
> > Ideally irqentry_exit() would take care of handling any auxiliary
> > pt_regs on exit. Unfortunately, irqentry_exit() is not the only exit
> > from exception path. The call to irqentry_exit_cond_resched() from
> > xen_pv_evtchn_do_upcall() bypasses irqentry_exit().
> >
> > Make irqentry_exit_cond_resched() symmetrical with irqentry_enter() by
> > passing pt_regs to it. This makes irqentry_exit_cond_resched() capable
> > of handling auxiliary pt_regs in future patches.
> >
> > Cc: Rik van Riel <riel@surriel.com>
> > Cc: Dave Hansen <dave.hansen@linux.intel.com>
> > Cc: Borislav Petkov <bp@alien8.de>
> > Signed-off-by: Ira Weiny <ira.weiny@intel.com>
> >
> > ---
> > Forward ported from PKS series:
> > https://lore.kernel.org/lkml/20220419170649.1022246-19-ira.weiny@intel.com/
> > ---
> > arch/arm64/include/asm/preempt.h | 2 +-
> > arch/arm64/kernel/entry-common.c | 4 ++--
> > arch/x86/entry/common.c | 2 +-
> > include/linux/entry-common.h | 17 ++++++++------
> > kernel/entry/common.c | 13 +++++++----
> > kernel/sched/core.c | 40 ++++++++++++++++----------------
> > 6 files changed, 43 insertions(+), 35 deletions(-)
>
> Why all this churn?
>
> Why can't you add a parameter to irqentry_exit():
>
> noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state, bool cond_resched);
I thought about that but generally have been steered away from using bool
arguments like this.
>
> and then have all callers except xen_pv_evtchn_do_upcall() pass in false
> and this way have all exit paths end up in irqentry_exit()?
>
> And, ofc, move the true case which is the body of
> raw_irqentry_exit_cond_resched() to irqentry_exit() and then get rid of
> former.
Looking this over I think this may be a reasonable use of a flag like that.
I'm not sure if this series is going to land given the discussion of
performance. Would this clean up be welcome on its own? I'm willing to respin
this patch and submit if so.
Ira
>
> xen_pv_evtchn_do_upcall() will, ofc, do:
>
> if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
> irqentry_exit(regs, state, true);
> instrumentation_end();
> restore_inhcall(inhcall);
> } else {
> instrumentation_end();
> irqentry_exit(regs, state, false);
>
> Hmmm?
>
> --
> Regards/Gruss,
> Boris.
>
> https://people.kernel.org/tglx/notes-about-netiquette
On Mon, Aug 08, 2022 at 10:34:19AM -0700, Ira Weiny wrote:
> I thought about that but generally have been steered away from using bool
> arguments like this.
The reason being?
> Looking this over I think this may be a reasonable use of a flag like
> that.
>
> I'm not sure if this series is going to land given the discussion of
> performance.
Well, the only way to talk performance is to have something to measure
it with. But let me finish going through the rest of your set first.
> Would this clean up be welcome on its own? I'm willing to respin
> this patch and submit if so.
Depends on who steered you away from the bool thing and why?
:-)
Thx.
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
On 8/8/22 10:38, Borislav Petkov wrote:
> On Mon, Aug 08, 2022 at 10:34:19AM -0700, Ira Weiny wrote:
>> I thought about that but generally have been steered away from using bool
>> arguments like this.
> The reason being?
Might have been me. Function calls that look like this:
foo(&ptr, false, true, false, true, 1, 0);
are incomprehensible. A true/false is effectively a magic number here
and you have to go looking at the code implementing 'foo()' or at least
the declaration hoping that the variable names help (if the declaration
has variable names).
I think I've encouraged Ira to do something like this instead:
enum foo_mode {
MODE_BAR,
MODE_BAZ
}
where the call ends up looking like:
foo(&ptr, MODE_BAR);
which is much more self-documenting.
On Mon, Aug 08, 2022 at 10:43:35AM -0700, Dave Hansen wrote:
> Might have been me. Function calls that look like this:
>
> foo(&ptr, false, true, false, true, 1, 0);
>
> are incomprehensible. A true/false is effectively a magic number here
> and you have to go looking at the code implementing 'foo()' or at least
> the declaration hoping that the variable names help (if the declaration
> has variable names).
Yap, agreed.
It would start getting on my nerves after the second bool. :)
> I think I've encouraged Ira to do something like this instead:
>
> enum foo_mode {
> MODE_BAR,
> MODE_BAZ
> }
>
> where the call ends up looking like:
>
> foo(&ptr, MODE_BAR);
>
> which is much more self-documenting.
Yap, that's much better.
I suggested the bool thing in thinking that this would be the only
exception to the usage, i.e., a one-off thing. I probably should talk
to Jürgen whether we even need this one-off thing and maybe solve it
differently.
Thx.
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
On Fri, 2022-08-05 at 10:30 -0700, ira.weiny@intel.com wrote: > From: Ira Weiny <ira.weiny@intel.com> > > Auxiliary pt_regs space needs to be manipulated by the generic > entry/exit code. > > Ideally irqentry_exit() would take care of handling any auxiliary > pt_regs on exit. Unfortunately, irqentry_exit() is not the only exit > from exception path. The call to irqentry_exit_cond_resched() from > xen_pv_evtchn_do_upcall() bypasses irqentry_exit(). > > Make irqentry_exit_cond_resched() symmetrical with irqentry_enter() > by > passing pt_regs to it. This makes irqentry_exit_cond_resched() > capable > of handling auxiliary pt_regs in future patches. > > Cc: Rik van Riel <riel@surriel.com> > Cc: Dave Hansen <dave.hansen@linux.intel.com> > Cc: Borislav Petkov <bp@alien8.de> > Signed-off-by: Ira Weiny <ira.weiny@intel.com> Acked-by: Rik van Riel <riel@surriel.com> -- All Rights Reversed.
© 2016 - 2026 Red Hat, Inc.