Similar to the IRQ tracepoint, the preempt tracepoints are typically
disabled in production systems due to the significant overhead they
introduce even when not in use.
The overhead primarily comes from two sources: First, when tracepoints
are compiled into the kernel, preempt_count_add() and preempt_count_sub()
become external function calls rather than inlined operations. Second,
these functions perform unnecessary preempt_count() checks even when the
tracepoint itself is disabled.
This optimization introduces an early check of the tracepoint static key,
which allows us to skip both the function call overhead and the redundant
preempt_count() checks when tracing is disabled. The change maintains all
existing functionality when tracing is active while significantly
reducing overhead for the common case where tracing is inactive.
Signed-off-by: Wander Lairson Costa <wander@redhat.com>
Suggested-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Clark Williams <williams@redhat.com>
Cc: Gabriele Monaco <gmonaco@redhat.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
---
include/linux/preempt.h | 35 ++++++++++++++++++++++++++++++---
kernel/sched/core.c | 12 +----------
kernel/trace/trace_preemptirq.c | 19 ++++++++++++++++++
3 files changed, 52 insertions(+), 14 deletions(-)
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index b0af8d4ef6e6..d13c755cd934 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -10,6 +10,7 @@
#include <linux/linkage.h>
#include <linux/cleanup.h>
#include <linux/types.h>
+#include <linux/tracepoint-defs.h>
/*
* We put the hardirq and softirq counter into the preemption
@@ -191,17 +192,45 @@ static __always_inline unsigned char interrupt_context_level(void)
*/
#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
-#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
+#if defined(CONFIG_DEBUG_PREEMPT)
extern void preempt_count_add(int val);
extern void preempt_count_sub(int val);
-#define preempt_count_dec_and_test() \
- ({ preempt_count_sub(1); should_resched(0); })
+#elif defined(CONFIG_TRACE_PREEMPT_TOGGLE)
+extern void __trace_preempt_on(void);
+extern void __trace_preempt_off(void);
+
+DECLARE_TRACEPOINT(preempt_enable);
+DECLARE_TRACEPOINT(preempt_disable);
+
+#define __preempt_trace_enabled(type) \
+ (tracepoint_enabled(preempt_##type) && preempt_count() == val)
+
+static inline void preempt_count_add(int val)
+{
+ __preempt_count_add(val);
+
+ if (__preempt_trace_enabled(disable))
+ __trace_preempt_off();
+}
+
+static inline void preempt_count_sub(int val)
+{
+ if (__preempt_trace_enabled(enable))
+ __trace_preempt_on();
+
+ __preempt_count_sub(val);
+}
#else
#define preempt_count_add(val) __preempt_count_add(val)
#define preempt_count_sub(val) __preempt_count_sub(val)
#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
#endif
+#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
+#define preempt_count_dec_and_test() \
+ ({ preempt_count_sub(1); should_resched(0); })
+#endif
+
#define __preempt_count_inc() __preempt_count_add(1)
#define __preempt_count_dec() __preempt_count_sub(1)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8988d38d46a3..4feba4738d79 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5840,8 +5840,7 @@ static inline void sched_tick_start(int cpu) { }
static inline void sched_tick_stop(int cpu) { }
#endif
-#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
- defined(CONFIG_TRACE_PREEMPT_TOGGLE))
+#if defined(CONFIG_PREEMPTION) && defined(CONFIG_DEBUG_PREEMPT)
/*
* If the value passed in is equal to the current preempt count
* then we just disabled preemption. Start timing the latency.
@@ -5850,30 +5849,24 @@ static inline void preempt_latency_start(int val)
{
if (preempt_count() == val) {
unsigned long ip = get_lock_parent_ip();
-#ifdef CONFIG_DEBUG_PREEMPT
current->preempt_disable_ip = ip;
-#endif
trace_preempt_off(CALLER_ADDR0, ip);
}
}
void preempt_count_add(int val)
{
-#ifdef CONFIG_DEBUG_PREEMPT
/*
* Underflow?
*/
if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
return;
-#endif
__preempt_count_add(val);
-#ifdef CONFIG_DEBUG_PREEMPT
/*
* Spinlock count overflowing soon?
*/
DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
PREEMPT_MASK - 10);
-#endif
preempt_latency_start(val);
}
EXPORT_SYMBOL(preempt_count_add);
@@ -5891,7 +5884,6 @@ static inline void preempt_latency_stop(int val)
void preempt_count_sub(int val)
{
-#ifdef CONFIG_DEBUG_PREEMPT
/*
* Underflow?
*/
@@ -5903,14 +5895,12 @@ void preempt_count_sub(int val)
if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
!(preempt_count() & PREEMPT_MASK)))
return;
-#endif
preempt_latency_stop(val);
__preempt_count_sub(val);
}
EXPORT_SYMBOL(preempt_count_sub);
NOKPROBE_SYMBOL(preempt_count_sub);
-
#else
static inline void preempt_latency_start(int val) { }
static inline void preempt_latency_stop(int val) { }
diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
index 90ee65db4516..deb2428b34a2 100644
--- a/kernel/trace/trace_preemptirq.c
+++ b/kernel/trace/trace_preemptirq.c
@@ -118,6 +118,25 @@ EXPORT_TRACEPOINT_SYMBOL(irq_enable);
#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
+#if !defined(CONFIG_DEBUG_PREEMPT)
+EXPORT_SYMBOL(__tracepoint_preempt_disable);
+EXPORT_SYMBOL(__tracepoint_preempt_enable);
+
+void __trace_preempt_on(void)
+{
+ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
+}
+EXPORT_SYMBOL(__trace_preempt_on);
+NOKPROBE_SYMBOL(__trace_preempt_on);
+
+void __trace_preempt_off(void)
+{
+ trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
+}
+EXPORT_SYMBOL(__trace_preempt_off);
+NOKPROBE_SYMBOL(__trace_preempt_off);
+#endif /* !CONFIG_DEBUG_PREEMPT */
+
void trace_preempt_on(unsigned long a0, unsigned long a1)
{
trace(preempt_enable, TP_ARGS(a0, a1));
--
2.50.0
On Fri, Jul 04, 2025 at 02:07:43PM -0300, Wander Lairson Costa wrote: > +#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE) > +#define preempt_count_dec_and_test() \ > + ({ preempt_count_sub(1); should_resched(0); }) > +#endif Also this is terrible. Surely you can do better.
On Mon, Jul 07, 2025 at 01:26:22PM +0200, Peter Zijlstra wrote: > On Fri, Jul 04, 2025 at 02:07:43PM -0300, Wander Lairson Costa wrote: > > +#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE) > > +#define preempt_count_dec_and_test() \ > > + ({ preempt_count_sub(1); should_resched(0); }) > > +#endif > > Also this is terrible. Surely you can do better. > Thank you for pointing this out. I'm not sure I've fully understood the concern here. My understanding was that this logic was pre-existing and my patch only reorganized it. I'm clearly missing something. Could you please elaborate a bit on the issue you've spotted?
On Tue, Jul 08, 2025 at 10:09:45AM -0300, Wander Lairson Costa wrote: > On Mon, Jul 07, 2025 at 01:26:22PM +0200, Peter Zijlstra wrote: > > On Fri, Jul 04, 2025 at 02:07:43PM -0300, Wander Lairson Costa wrote: > > > +#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE) > > > +#define preempt_count_dec_and_test() \ > > > + ({ preempt_count_sub(1); should_resched(0); }) > > > +#endif > > > > Also this is terrible. Surely you can do better. > > > > Thank you for pointing this out. I'm not sure I've fully understood the > concern here. My understanding was that this logic was pre-existing and > my patch only reorganized it. > > I'm clearly missing something. Could you please elaborate a bit on the > issue you've spotted? The normal (!DEBUG) case uses __preempt_count_dec_and_test(), which is significantly better.
On Tue, Jul 8, 2025 at 3:47 PM Peter Zijlstra <peterz@infradead.org> wrote: > Sorry for the late reply. A mix of PTO + higher priority backlog items. > On Tue, Jul 08, 2025 at 10:09:45AM -0300, Wander Lairson Costa wrote: > > On Mon, Jul 07, 2025 at 01:26:22PM +0200, Peter Zijlstra wrote: > > > On Fri, Jul 04, 2025 at 02:07:43PM -0300, Wander Lairson Costa wrote: > > > > +#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE) > > > > +#define preempt_count_dec_and_test() \ > > > > + ({ preempt_count_sub(1); should_resched(0); }) > > > > +#endif > > > > > > Also this is terrible. Surely you can do better. > > > > > > > Thank you for pointing this out. I'm not sure I've fully understood the > > concern here. My understanding was that this logic was pre-existing and > > my patch only reorganized it. > > > > I'm clearly missing something. Could you please elaborate a bit on the > > issue you've spotted? > > The normal (!DEBUG) case uses __preempt_count_dec_and_test(), which is > significantly better. > Maybe I am missing something, but my understanding is that this behavior didn't change. When DEBUG_PREEMPT and TRACE_PREEMPT_TOGGLE are not defined, __preempt_count_dec_and_test() is used.
On Fri, Jul 04, 2025 at 02:07:43PM -0300, Wander Lairson Costa wrote: > Similar to the IRQ tracepoint, the preempt tracepoints are typically > disabled in production systems due to the significant overhead they > introduce even when not in use. > > The overhead primarily comes from two sources: First, when tracepoints > are compiled into the kernel, preempt_count_add() and preempt_count_sub() > become external function calls rather than inlined operations. Second, > these functions perform unnecessary preempt_count() checks even when the > tracepoint itself is disabled. > > This optimization introduces an early check of the tracepoint static key, > which allows us to skip both the function call overhead and the redundant > preempt_count() checks when tracing is disabled. The change maintains all > existing functionality when tracing is active while significantly > reducing overhead for the common case where tracing is inactive. > This one in particular I worry about the code gen impact. There are a *LOT* of preempt_{dis,en}able() sites in the kernel and now they all get this static branch and call crud on. We spend significant effort to make preempt_{dis,en}able() as small as possible.
O Mon, Jul 07, 2025 at 01:20:03PM +0200, Peter Zijlstra wrote: > On Fri, Jul 04, 2025 at 02:07:43PM -0300, Wander Lairson Costa wrote: > > Similar to the IRQ tracepoint, the preempt tracepoints are typically > > disabled in production systems due to the significant overhead they > > introduce even when not in use. > > > > The overhead primarily comes from two sources: First, when tracepoints > > are compiled into the kernel, preempt_count_add() and preempt_count_sub() > > become external function calls rather than inlined operations. Second, > > these functions perform unnecessary preempt_count() checks even when the > > tracepoint itself is disabled. > > > > This optimization introduces an early check of the tracepoint static key, > > which allows us to skip both the function call overhead and the redundant > > preempt_count() checks when tracing is disabled. The change maintains all > > existing functionality when tracing is active while significantly > > reducing overhead for the common case where tracing is inactive. > > > > This one in particular I worry about the code gen impact. There are a > *LOT* of preempt_{dis,en}able() sites in the kernel and now they all get > this static branch and call crud on. > > We spend significant effort to make preempt_{dis,en}able() as small as > possible. > Thank you for the feedback, it's much appreciated. I just want to make sure I'm on the right track. If I understand your concern correctly, it revolves around the overhead this patch might introduce—specifically to the binary size and its effect on the iCache—when the kernel is built with preempt tracepoints enabled. Is that an accurate summary?
On Tue, Jul 08, 2025 at 09:54:06AM -0300, Wander Lairson Costa wrote: > O Mon, Jul 07, 2025 at 01:20:03PM +0200, Peter Zijlstra wrote: > > On Fri, Jul 04, 2025 at 02:07:43PM -0300, Wander Lairson Costa wrote: > > > Similar to the IRQ tracepoint, the preempt tracepoints are typically > > > disabled in production systems due to the significant overhead they > > > introduce even when not in use. > > > > > > The overhead primarily comes from two sources: First, when tracepoints > > > are compiled into the kernel, preempt_count_add() and preempt_count_sub() > > > become external function calls rather than inlined operations. Second, > > > these functions perform unnecessary preempt_count() checks even when the > > > tracepoint itself is disabled. > > > > > > This optimization introduces an early check of the tracepoint static key, > > > which allows us to skip both the function call overhead and the redundant > > > preempt_count() checks when tracing is disabled. The change maintains all > > > existing functionality when tracing is active while significantly > > > reducing overhead for the common case where tracing is inactive. > > > > > > > This one in particular I worry about the code gen impact. There are a > > *LOT* of preempt_{dis,en}able() sites in the kernel and now they all get > > this static branch and call crud on. > > > > We spend significant effort to make preempt_{dis,en}able() as small as > > possible. > > > > Thank you for the feedback, it's much appreciated. I just want to make sure > I'm on the right track. If I understand your concern correctly, it revolves > around the overhead this patch might introduce???specifically to the binary > size and its effect on the iCache???when the kernel is built with preempt > tracepoints enabled. Is that an accurate summary? Yes, specifically: preempt_disable() incl %gs:__preempt_count preempt_enable() decl %gs:__preempt_count jz do_schedule 1: ... do_schedule: call __SCT__preemptible_schedule jmp 1 your proposal adds significantly to this.
On Tue, Jul 8, 2025 at 3:54 PM Peter Zijlstra <peterz@infradead.org> wrote: > > On Tue, Jul 08, 2025 at 09:54:06AM -0300, Wander Lairson Costa wrote: > > O Mon, Jul 07, 2025 at 01:20:03PM +0200, Peter Zijlstra wrote: > > > On Fri, Jul 04, 2025 at 02:07:43PM -0300, Wander Lairson Costa wrote: > > > > Similar to the IRQ tracepoint, the preempt tracepoints are typically > > > > disabled in production systems due to the significant overhead they > > > > introduce even when not in use. > > > > > > > > The overhead primarily comes from two sources: First, when tracepoints > > > > are compiled into the kernel, preempt_count_add() and preempt_count_sub() > > > > become external function calls rather than inlined operations. Second, > > > > these functions perform unnecessary preempt_count() checks even when the > > > > tracepoint itself is disabled. > > > > > > > > This optimization introduces an early check of the tracepoint static key, > > > > which allows us to skip both the function call overhead and the redundant > > > > preempt_count() checks when tracing is disabled. The change maintains all > > > > existing functionality when tracing is active while significantly > > > > reducing overhead for the common case where tracing is inactive. > > > > > > > > > > This one in particular I worry about the code gen impact. There are a > > > *LOT* of preempt_{dis,en}able() sites in the kernel and now they all get > > > this static branch and call crud on. > > > > > > We spend significant effort to make preempt_{dis,en}able() as small as > > > possible. > > > > > > > Thank you for the feedback, it's much appreciated. I just want to make sure > > I'm on the right track. If I understand your concern correctly, it revolves > > around the overhead this patch might introduce???specifically to the binary > > size and its effect on the iCache???when the kernel is built with preempt > > tracepoints enabled. Is that an accurate summary? > > Yes, specifically: > > preempt_disable() > incl %gs:__preempt_count > > > > preempt_enable() > decl %gs:__preempt_count > jz do_schedule > 1: ... > > do_schedule: > call __SCT__preemptible_schedule > jmp 1 > > > your proposal adds significantly to this. > Here is a breakdown of the patch's behavior under the different kernel configurations: * When DEBUG_PREEMPT is defined, the behavior is identical to the current implementation, with calls to preempt_count_add/sub(). * When both DEBUG_PREEMPT and TRACE_PREEMPT_TOGGLE are disabled, the generated code is also unchanged. * The primary change occurs when only TRACE_PREEMPT_TOGGLE is defined. In this case, the code uses a static key test instead of a function call. As the benchmarks show, this approach is faster when the tracepoints are disabled. The main trade-off is that enabling or disabling these tracepoints will require the kernel to patch more code locations due to the use of static keys.
Resending in case people missed the previous message. On Fri, Aug 1, 2025 at 10:30 AM Wander Lairson Costa <wander@redhat.com> wrote: > > On Tue, Jul 8, 2025 at 3:54 PM Peter Zijlstra <peterz@infradead.org> wrote: > > > > On Tue, Jul 08, 2025 at 09:54:06AM -0300, Wander Lairson Costa wrote: > > > O Mon, Jul 07, 2025 at 01:20:03PM +0200, Peter Zijlstra wrote: > > > > On Fri, Jul 04, 2025 at 02:07:43PM -0300, Wander Lairson Costa wrote: > > > > > Similar to the IRQ tracepoint, the preempt tracepoints are typically > > > > > disabled in production systems due to the significant overhead they > > > > > introduce even when not in use. > > > > > > > > > > The overhead primarily comes from two sources: First, when tracepoints > > > > > are compiled into the kernel, preempt_count_add() and preempt_count_sub() > > > > > become external function calls rather than inlined operations. Second, > > > > > these functions perform unnecessary preempt_count() checks even when the > > > > > tracepoint itself is disabled. > > > > > > > > > > This optimization introduces an early check of the tracepoint static key, > > > > > which allows us to skip both the function call overhead and the redundant > > > > > preempt_count() checks when tracing is disabled. The change maintains all > > > > > existing functionality when tracing is active while significantly > > > > > reducing overhead for the common case where tracing is inactive. > > > > > > > > > > > > > This one in particular I worry about the code gen impact. There are a > > > > *LOT* of preempt_{dis,en}able() sites in the kernel and now they all get > > > > this static branch and call crud on. > > > > > > > > We spend significant effort to make preempt_{dis,en}able() as small as > > > > possible. > > > > > > > > > > Thank you for the feedback, it's much appreciated. I just want to make sure > > > I'm on the right track. If I understand your concern correctly, it revolves > > > around the overhead this patch might introduce???specifically to the binary > > > size and its effect on the iCache???when the kernel is built with preempt > > > tracepoints enabled. Is that an accurate summary? > > > > Yes, specifically: > > > > preempt_disable() > > incl %gs:__preempt_count > > > > > > > > preempt_enable() > > decl %gs:__preempt_count > > jz do_schedule > > 1: ... > > > > do_schedule: > > call __SCT__preemptible_schedule > > jmp 1 > > > > > > your proposal adds significantly to this. > > > Here is a breakdown of the patch's behavior under the different kernel configurations: * When DEBUG_PREEMPT is defined, the behavior is identical to the current implementation, with calls to preempt_count_add/sub(). * When both DEBUG_PREEMPT and TRACE_PREEMPT_TOGGLE are disabled, the generated code is also unchanged. * The primary change occurs when only TRACE_PREEMPT_TOGGLE is defined. In this case, the code uses a static key test instead of a function call. As the benchmarks show, this approach is faster when the tracepoints are disabled. The main trade-off is that enabling or disabling these tracepoints will require the kernel to patch more code locations due to the use of static keys.
Hi Wander, kernel test robot noticed the following build errors: [auto build test ERROR on trace/for-next] [also build test ERROR on tip/sched/core linus/master v6.16-rc5 next-20250704] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information] url: https://github.com/intel-lab-lkp/linux/commits/Wander-Lairson-Costa/trace-preemptirq-reduce-overhead-of-irq_enable-disable-tracepoints/20250705-011058 base: https://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace for-next patch link: https://lore.kernel.org/r/20250704170748.97632-3-wander%40redhat.com patch subject: [PATCH v3 2/2] tracing/preemptirq: Optimize preempt_disable/enable() tracepoint overhead config: m68k-allmodconfig (https://download.01.org/0day-ci/archive/20250707/202507071800.OViM30Fr-lkp@intel.com/config) compiler: m68k-linux-gcc (GCC) 15.1.0 reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250707/202507071800.OViM30Fr-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202507071800.OViM30Fr-lkp@intel.com/ All errors (new ones prefixed by >>): In file included from include/linux/irqflags.h:18, from arch/m68k/include/asm/atomic.h:6, from include/linux/atomic.h:7, from include/linux/jump_label.h:257, from include/linux/static_key.h:1, from include/linux/tracepoint-defs.h:11, from include/linux/preempt.h:13, from arch/m68k/include/asm/processor.h:11, from include/linux/sched.h:13, from arch/m68k/kernel/asm-offsets.c:15: arch/m68k/include/asm/irqflags.h: In function 'arch_local_irq_enable': >> arch/m68k/include/asm/irqflags.h:44:29: error: implicit declaration of function 'hardirq_count' [-Wimplicit-function-declaration] 44 | if (MACH_IS_Q40 || !hardirq_count()) | ^~~~~~~~~~~~~ make[3]: *** [scripts/Makefile.build:98: arch/m68k/kernel/asm-offsets.s] Error 1 make[3]: Target 'prepare' not remade because of errors. make[2]: *** [Makefile:1274: prepare0] Error 2 make[2]: Target 'prepare' not remade because of errors. make[1]: *** [Makefile:248: __sub-make] Error 2 make[1]: Target 'prepare' not remade because of errors. make: *** [Makefile:248: __sub-make] Error 2 make: Target 'prepare' not remade because of errors. vim +/hardirq_count +44 arch/m68k/include/asm/irqflags.h df9ee29270c11d David Howells 2010-10-07 31 df9ee29270c11d David Howells 2010-10-07 32 static inline void arch_local_irq_enable(void) df9ee29270c11d David Howells 2010-10-07 33 { df9ee29270c11d David Howells 2010-10-07 34 #if defined(CONFIG_COLDFIRE) df9ee29270c11d David Howells 2010-10-07 35 asm volatile ( df9ee29270c11d David Howells 2010-10-07 36 "move %/sr,%%d0 \n\t" df9ee29270c11d David Howells 2010-10-07 37 "andi.l #0xf8ff,%%d0 \n\t" df9ee29270c11d David Howells 2010-10-07 38 "move %%d0,%/sr \n" df9ee29270c11d David Howells 2010-10-07 39 : /* no outputs */ df9ee29270c11d David Howells 2010-10-07 40 : df9ee29270c11d David Howells 2010-10-07 41 : "cc", "%d0", "memory"); df9ee29270c11d David Howells 2010-10-07 42 #else df9ee29270c11d David Howells 2010-10-07 43 # if defined(CONFIG_MMU) df9ee29270c11d David Howells 2010-10-07 @44 if (MACH_IS_Q40 || !hardirq_count()) df9ee29270c11d David Howells 2010-10-07 45 # endif df9ee29270c11d David Howells 2010-10-07 46 asm volatile ( df9ee29270c11d David Howells 2010-10-07 47 "andiw %0,%%sr" df9ee29270c11d David Howells 2010-10-07 48 : df9ee29270c11d David Howells 2010-10-07 49 : "i" (ALLOWINT) df9ee29270c11d David Howells 2010-10-07 50 : "memory"); df9ee29270c11d David Howells 2010-10-07 51 #endif df9ee29270c11d David Howells 2010-10-07 52 } df9ee29270c11d David Howells 2010-10-07 53 -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki
© 2016 - 2025 Red Hat, Inc.