kernel/kprobes.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-)
Commit a189d0350f387 ("kprobes: disable preempt for module_text_address() and kernel_text_address()")
introduced a preempt_disable() region to protect against concurrent
module unloading. However this region also includes the call to
jump_label_text_reserved() which takes a long time;
up to 400us, iterating over approx 6000 jump tables.
The scope protected by preempt_disable() is largen than necessary.
core_kernel_text() does not need to be protected as it does not interact
with module code at all.
Only the scope from __module_text_address() to try_module_get() needs to
be protected.
By limiting the critical section to __module_text_address() and
try_module_get() the function responsible for the latency spike remains
preemptible.
This works fine even when !CONFIG_MODULES as in that case
try_module_get() will always return true and that block can be optimized
away.
Limit the critical section to __module_text_address() and
try_module_get(). Use guard(preempt)() for easier error handling.
While at it also remove a spurious *probed_mod = NULL in an error
path. On errors the output parameter is never inspected by the caller.
Some error paths were clearing the parameters, some didn't.
Align them for clarity.
Signed-off-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
---
kernel/kprobes.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index da59c68df8412c4662d39d3e286d516e8cee9a69..369020170e832a34ea9c05eda6693bded02ec505 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -39,6 +39,7 @@
#include <linux/static_call.h>
#include <linux/perf_event.h>
#include <linux/execmem.h>
+#include <linux/cleanup.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
@@ -1570,16 +1571,25 @@ static int check_kprobe_address_safe(struct kprobe *p,
if (ret)
return ret;
jump_label_lock();
- preempt_disable();
/* Ensure the address is in a text area, and find a module if exists. */
*probed_mod = NULL;
if (!core_kernel_text((unsigned long) p->addr)) {
+ guard(preempt)();
*probed_mod = __module_text_address((unsigned long) p->addr);
if (!(*probed_mod)) {
ret = -EINVAL;
goto out;
}
+
+ /*
+ * We must hold a refcount of the probed module while updating
+ * its code to prohibit unexpected unloading.
+ */
+ if (unlikely(!try_module_get(*probed_mod))) {
+ ret = -ENOENT;
+ goto out;
+ }
}
/* Ensure it is not in reserved area. */
if (in_gate_area_no_mm((unsigned long) p->addr) ||
@@ -1588,21 +1598,13 @@ static int check_kprobe_address_safe(struct kprobe *p,
static_call_text_reserved(p->addr, p->addr) ||
find_bug((unsigned long)p->addr) ||
is_cfi_preamble_symbol((unsigned long)p->addr)) {
+ module_put(*probed_mod);
ret = -EINVAL;
goto out;
}
/* Get module refcount and reject __init functions for loaded modules. */
if (IS_ENABLED(CONFIG_MODULES) && *probed_mod) {
- /*
- * We must hold a refcount of the probed module while updating
- * its code to prohibit unexpected unloading.
- */
- if (unlikely(!try_module_get(*probed_mod))) {
- ret = -ENOENT;
- goto out;
- }
-
/*
* If the module freed '.init.text', we couldn't insert
* kprobes in there.
@@ -1610,13 +1612,11 @@ static int check_kprobe_address_safe(struct kprobe *p,
if (within_module_init((unsigned long)p->addr, *probed_mod) &&
!module_is_coming(*probed_mod)) {
module_put(*probed_mod);
- *probed_mod = NULL;
ret = -ENOENT;
}
}
out:
- preempt_enable();
jump_label_unlock();
return ret;
---
base-commit: adc218676eef25575469234709c2d87185ca223a
change-id: 20241120-kprobes-preempt-09826f5e5d9c
Best regards,
--
Thomas Weißschuh <thomas.weissschuh@linutronix.de>
On Thu, 21 Nov 2024 08:38:08 +0100
Thomas Weißschuh <thomas.weissschuh@linutronix.de> wrote:
> Commit a189d0350f387 ("kprobes: disable preempt for module_text_address() and kernel_text_address()")
> introduced a preempt_disable() region to protect against concurrent
> module unloading. However this region also includes the call to
> jump_label_text_reserved() which takes a long time;
> up to 400us, iterating over approx 6000 jump tables.
Oops, that's too long.
>
> The scope protected by preempt_disable() is largen than necessary.
> core_kernel_text() does not need to be protected as it does not interact
> with module code at all.
> Only the scope from __module_text_address() to try_module_get() needs to
> be protected.
> By limiting the critical section to __module_text_address() and
> try_module_get() the function responsible for the latency spike remains
> preemptible.
Yes, after getting the module refcount, no need to take care of
preemption.
>
> This works fine even when !CONFIG_MODULES as in that case
> try_module_get() will always return true and that block can be optimized
> away.
>
> Limit the critical section to __module_text_address() and
> try_module_get(). Use guard(preempt)() for easier error handling.
>
> While at it also remove a spurious *probed_mod = NULL in an error
> path. On errors the output parameter is never inspected by the caller.
> Some error paths were clearing the parameters, some didn't.
> Align them for clarity.
>
> Signed-off-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
Looks good to me. let me pick it.
Thank you,
> ---
> kernel/kprobes.c | 24 ++++++++++++------------
> 1 file changed, 12 insertions(+), 12 deletions(-)
>
> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index da59c68df8412c4662d39d3e286d516e8cee9a69..369020170e832a34ea9c05eda6693bded02ec505 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -39,6 +39,7 @@
> #include <linux/static_call.h>
> #include <linux/perf_event.h>
> #include <linux/execmem.h>
> +#include <linux/cleanup.h>
>
> #include <asm/sections.h>
> #include <asm/cacheflush.h>
> @@ -1570,16 +1571,25 @@ static int check_kprobe_address_safe(struct kprobe *p,
> if (ret)
> return ret;
> jump_label_lock();
> - preempt_disable();
>
> /* Ensure the address is in a text area, and find a module if exists. */
> *probed_mod = NULL;
> if (!core_kernel_text((unsigned long) p->addr)) {
> + guard(preempt)();
> *probed_mod = __module_text_address((unsigned long) p->addr);
> if (!(*probed_mod)) {
> ret = -EINVAL;
> goto out;
> }
> +
> + /*
> + * We must hold a refcount of the probed module while updating
> + * its code to prohibit unexpected unloading.
> + */
> + if (unlikely(!try_module_get(*probed_mod))) {
> + ret = -ENOENT;
> + goto out;
> + }
> }
> /* Ensure it is not in reserved area. */
> if (in_gate_area_no_mm((unsigned long) p->addr) ||
> @@ -1588,21 +1598,13 @@ static int check_kprobe_address_safe(struct kprobe *p,
> static_call_text_reserved(p->addr, p->addr) ||
> find_bug((unsigned long)p->addr) ||
> is_cfi_preamble_symbol((unsigned long)p->addr)) {
> + module_put(*probed_mod);
> ret = -EINVAL;
> goto out;
> }
>
> /* Get module refcount and reject __init functions for loaded modules. */
> if (IS_ENABLED(CONFIG_MODULES) && *probed_mod) {
> - /*
> - * We must hold a refcount of the probed module while updating
> - * its code to prohibit unexpected unloading.
> - */
> - if (unlikely(!try_module_get(*probed_mod))) {
> - ret = -ENOENT;
> - goto out;
> - }
> -
> /*
> * If the module freed '.init.text', we couldn't insert
> * kprobes in there.
> @@ -1610,13 +1612,11 @@ static int check_kprobe_address_safe(struct kprobe *p,
> if (within_module_init((unsigned long)p->addr, *probed_mod) &&
> !module_is_coming(*probed_mod)) {
> module_put(*probed_mod);
> - *probed_mod = NULL;
> ret = -ENOENT;
> }
> }
>
> out:
> - preempt_enable();
> jump_label_unlock();
>
> return ret;
>
> ---
> base-commit: adc218676eef25575469234709c2d87185ca223a
> change-id: 20241120-kprobes-preempt-09826f5e5d9c
>
> Best regards,
> --
> Thomas Weißschuh <thomas.weissschuh@linutronix.de>
>
--
Masami Hiramatsu (Google) <mhiramat@kernel.org>
On 2024-11-21 08:38:08 [+0100], Thomas Weißschuh wrote: … > Signed-off-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de> Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> > diff --git a/kernel/kprobes.c b/kernel/kprobes.c > index da59c68df8412c4662d39d3e286d516e8cee9a69..369020170e832a34ea9c05eda6693bded02ec505 100644 > --- a/kernel/kprobes.c > +++ b/kernel/kprobes.c > @@ -39,6 +39,7 @@ > #include <linux/static_call.h> > #include <linux/perf_event.h> > #include <linux/execmem.h> > +#include <linux/cleanup.h> This hunk can be dropped. The definition for guard(preempt)() comes from preempt.h. > #include <asm/sections.h> > #include <asm/cacheflush.h> Sebastian
On Thu, 21 Nov 2024 09:04:32 +0100 Sebastian Andrzej Siewior <bigeasy@linutronix.de> wrote: > On 2024-11-21 08:38:08 [+0100], Thomas Weißschuh wrote: > … > > Signed-off-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de> > > Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Thanks for your review! > > > diff --git a/kernel/kprobes.c b/kernel/kprobes.c > > index da59c68df8412c4662d39d3e286d516e8cee9a69..369020170e832a34ea9c05eda6693bded02ec505 100644 > > --- a/kernel/kprobes.c > > +++ b/kernel/kprobes.c > > @@ -39,6 +39,7 @@ > > #include <linux/static_call.h> > > #include <linux/perf_event.h> > > #include <linux/execmem.h> > > +#include <linux/cleanup.h> > > This hunk can be dropped. The definition for guard(preempt)() comes from > preempt.h. > > > #include <asm/sections.h> > > #include <asm/cacheflush.h> > > Sebastian -- Masami Hiramatsu (Google) <mhiramat@kernel.org>
© 2016 - 2026 Red Hat, Inc.