:p
atchew
Login
Hi, This series fixes eprobes and cleanup kprobes and probe events in ftrace to use guard() and scoped_guard() instead of pairs of mutex locks. Some locks are still not using guard(). We need some more work to complete. Thanks, --- Masami Hiramatsu (Google) (6): tracing/eprobe: Fix to release eprobe when failed to add dyn_event kprobes: Adopt guard() and scoped_guard() tracing/kprobe: Adopt guard() and scoped_guard() tracing/uprobe: Adopt guard() and scoped_guard() tracing/eprobe: Adopt guard() and scoped_guard() tracing/dynevent: Adopt guard() and scoped_guard() kernel/kprobes.c | 205 ++++++++++++++++++----------------------- kernel/trace/trace_dynevent.c | 12 +- kernel/trace/trace_eprobe.c | 31 +++--- kernel/trace/trace_kprobe.c | 18 +--- kernel/trace/trace_uprobe.c | 15 +-- 5 files changed, 121 insertions(+), 160 deletions(-) -- Masami Hiramatsu (Google) <mhiramat@kernel.org>
From: Masami Hiramatsu (Google) <mhiramat@kernel.org> Fix eprobe event to unregister event call and release eprobe when it fails to add dynamic event correctly. Fixes: 7491e2c44278 ("tracing: Add a probe that attaches to trace events") Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> --- kernel/trace/trace_eprobe.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c index XXXXXXX..XXXXXXX 100644 --- a/kernel/trace/trace_eprobe.c +++ b/kernel/trace/trace_eprobe.c @@ -XXX,XX +XXX,XX @@ static int __trace_eprobe_create(int argc, const char *argv[]) goto error; } ret = dyn_event_add(&ep->devent, &ep->tp.event->call); + if (ret < 0) { + trace_probe_unregister_event_call(&ep->tp); + mutex_unlock(&event_mutex); + goto error; + } mutex_unlock(&event_mutex); return ret; parse_error:
From: Masami Hiramatsu (Google) <mhiramat@kernel.org> Use guard() or scoped_guard() for critical sections rather than discrete lock/unlock pairs. Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> --- kernel/kprobes.c | 205 +++++++++++++++++++++++------------------------------- 1 file changed, 89 insertions(+), 116 deletions(-) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index XXXXXXX..XXXXXXX 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -XXX,XX +XXX,XX @@ static int collect_garbage_slots(struct kprobe_insn_cache *c); kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) { struct kprobe_insn_page *kip; - kprobe_opcode_t *slot = NULL; /* Since the slot array is not protected by rcu, we need a mutex */ - mutex_lock(&c->mutex); + guard(mutex)(&c->mutex); retry: rcu_read_lock(); list_for_each_entry_rcu(kip, &c->pages, list) { @@ -XXX,XX +XXX,XX @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) if (kip->slot_used[i] == SLOT_CLEAN) { kip->slot_used[i] = SLOT_USED; kip->nused++; - slot = kip->insns + (i * c->insn_size); rcu_read_unlock(); - goto out; + return kip->insns + (i * c->insn_size); } } /* kip->nused is broken. Fix it. */ @@ -XXX,XX +XXX,XX @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) /* All out of space. Need to allocate a new page. */ kip = kmalloc(struct_size(kip, slot_used, slots_per_page(c)), GFP_KERNEL); if (!kip) - goto out; + return NULL; kip->insns = c->alloc(); if (!kip->insns) { kfree(kip); - goto out; + return NULL; } INIT_LIST_HEAD(&kip->list); memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); @@ -XXX,XX +XXX,XX @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) kip->ngarbage = 0; kip->cache = c; list_add_rcu(&kip->list, &c->pages); - slot = kip->insns; /* Record the perf ksymbol register event after adding the page */ perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns, PAGE_SIZE, false, c->sym); -out: - mutex_unlock(&c->mutex); - return slot; + + return kip->insns; } /* Return true if all garbages are collected, otherwise false. */ @@ -XXX,XX +XXX,XX @@ void __free_insn_slot(struct kprobe_insn_cache *c, struct kprobe_insn_page *kip; long idx; - mutex_lock(&c->mutex); + guard(mutex)(&c->mutex); rcu_read_lock(); list_for_each_entry_rcu(kip, &c->pages, list) { idx = ((long)slot - (long)kip->insns) / @@ -XXX,XX +XXX,XX @@ void __free_insn_slot(struct kprobe_insn_cache *c, collect_one_slot(kip, idx); } } - mutex_unlock(&c->mutex); } /* @@ -XXX,XX +XXX,XX @@ static void kprobe_optimizer(struct work_struct *work) mutex_unlock(&kprobe_mutex); } -/* Wait for completing optimization and unoptimization */ -void wait_for_kprobe_optimizer(void) +static void wait_for_kprobe_optimizer_locked(void) { - mutex_lock(&kprobe_mutex); + lockdep_assert_held(&kprobe_mutex); while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) { mutex_unlock(&kprobe_mutex); @@ -XXX,XX +XXX,XX @@ void wait_for_kprobe_optimizer(void) mutex_lock(&kprobe_mutex); } +} - mutex_unlock(&kprobe_mutex); +/* Wait for completing optimization and unoptimization */ +void wait_for_kprobe_optimizer(void) +{ + guard(mutex)(&kprobe_mutex); + + wait_for_kprobe_optimizer_locked(); } bool optprobe_queued_unopt(struct optimized_kprobe *op) @@ -XXX,XX +XXX,XX @@ static void optimize_all_kprobes(void) struct kprobe *p; unsigned int i; - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); /* If optimization is already allowed, just return. */ if (kprobes_allow_optimization) - goto out; + return; cpus_read_lock(); kprobes_allow_optimization = true; @@ -XXX,XX +XXX,XX @@ static void optimize_all_kprobes(void) } cpus_read_unlock(); pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n"); -out: - mutex_unlock(&kprobe_mutex); } #ifdef CONFIG_SYSCTL @@ -XXX,XX +XXX,XX @@ static void unoptimize_all_kprobes(void) struct kprobe *p; unsigned int i; - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); /* If optimization is already prohibited, just return. */ - if (!kprobes_allow_optimization) { - mutex_unlock(&kprobe_mutex); + if (!kprobes_allow_optimization) return; - } cpus_read_lock(); kprobes_allow_optimization = false; @@ -XXX,XX +XXX,XX @@ static void unoptimize_all_kprobes(void) } } cpus_read_unlock(); - mutex_unlock(&kprobe_mutex); - /* Wait for unoptimizing completion. */ - wait_for_kprobe_optimizer(); + wait_for_kprobe_optimizer_locked(); pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n"); } @@ -XXX,XX +XXX,XX @@ static int proc_kprobes_optimization_handler(const struct ctl_table *table, { int ret; - mutex_lock(&kprobe_sysctl_mutex); + guard(mutex)(&kprobe_sysctl_mutex); sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; ret = proc_dointvec_minmax(table, write, buffer, length, ppos); @@ -XXX,XX +XXX,XX @@ static int proc_kprobes_optimization_handler(const struct ctl_table *table, optimize_all_kprobes(); else unoptimize_all_kprobes(); - mutex_unlock(&kprobe_sysctl_mutex); return ret; } @@ -XXX,XX +XXX,XX @@ static void __disarm_kprobe(struct kprobe *p, bool reopt) #define __arm_kprobe(p) arch_arm_kprobe(p) #define __disarm_kprobe(p, o) arch_disarm_kprobe(p) #define kprobe_disarmed(p) kprobe_disabled(p) -#define wait_for_kprobe_optimizer() do {} while (0) +#define wait_for_kprobe_optimizer_locked() \ + lockdep_assert_held(&kprobe_mutex) static int reuse_unused_kprobe(struct kprobe *ap) { @@ -XXX,XX +XXX,XX @@ _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name, static kprobe_opcode_t *kprobe_addr(struct kprobe *p) { bool on_func_entry; + return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry); } @@ -XXX,XX +XXX,XX @@ static struct kprobe *__get_valid_kprobe(struct kprobe *p) */ static inline int warn_kprobe_rereg(struct kprobe *p) { - int ret = 0; + guard(mutex)(&kprobe_mutex); - mutex_lock(&kprobe_mutex); if (WARN_ON_ONCE(__get_valid_kprobe(p))) - ret = -EINVAL; - mutex_unlock(&kprobe_mutex); + return -EINVAL; - return ret; + return 0; } static int check_ftrace_location(struct kprobe *p) @@ -XXX,XX +XXX,XX @@ static int check_kprobe_address_safe(struct kprobe *p, return ret; } -int register_kprobe(struct kprobe *p) +static int __register_kprobe(struct kprobe *p) { int ret; struct kprobe *old_p; - struct module *probed_mod; - kprobe_opcode_t *addr; - bool on_func_entry; - - /* Adjust probe address from symbol */ - addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry); - if (IS_ERR(addr)) - return PTR_ERR(addr); - p->addr = addr; - - ret = warn_kprobe_rereg(p); - if (ret) - return ret; - /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ - p->flags &= KPROBE_FLAG_DISABLED; - p->nmissed = 0; - INIT_LIST_HEAD(&p->list); - - ret = check_kprobe_address_safe(p, &probed_mod); - if (ret) - return ret; - - mutex_lock(&kprobe_mutex); - - if (on_func_entry) - p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY; + guard(mutex)(&kprobe_mutex); old_p = get_kprobe(p->addr); - if (old_p) { + if (old_p) /* Since this may unoptimize 'old_p', locking 'text_mutex'. */ - ret = register_aggr_kprobe(old_p, p); - goto out; - } + return register_aggr_kprobe(old_p, p); cpus_read_lock(); /* Prevent text modification */ @@ -XXX,XX +XXX,XX @@ int register_kprobe(struct kprobe *p) mutex_unlock(&text_mutex); cpus_read_unlock(); if (ret) - goto out; + return ret; INIT_HLIST_NODE(&p->hlist); hlist_add_head_rcu(&p->hlist, @@ -XXX,XX +XXX,XX @@ int register_kprobe(struct kprobe *p) if (ret) { hlist_del_rcu(&p->hlist); synchronize_rcu(); - goto out; } } /* Try to optimize kprobe */ try_to_optimize_kprobe(p); -out: - mutex_unlock(&kprobe_mutex); + return 0; +} + +int register_kprobe(struct kprobe *p) +{ + int ret; + struct module *probed_mod; + kprobe_opcode_t *addr; + bool on_func_entry; + + /* Canonicalize probe address from symbol */ + addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry); + if (IS_ERR(addr)) + return PTR_ERR(addr); + p->addr = addr; + + ret = warn_kprobe_rereg(p); + if (ret) + return ret; + + /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ + p->flags &= KPROBE_FLAG_DISABLED; + if (on_func_entry) + p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY; + p->nmissed = 0; + INIT_LIST_HEAD(&p->list); + + ret = check_kprobe_address_safe(p, &probed_mod); + if (ret) + return ret; + + ret = __register_kprobe(p); if (probed_mod) module_put(probed_mod); @@ -XXX,XX +XXX,XX @@ void unregister_kprobes(struct kprobe **kps, int num) if (num <= 0) return; - mutex_lock(&kprobe_mutex); - for (i = 0; i < num; i++) - if (__unregister_kprobe_top(kps[i]) < 0) - kps[i]->addr = NULL; - mutex_unlock(&kprobe_mutex); - + scoped_guard(mutex, &kprobe_mutex) { + for (i = 0; i < num; i++) + if (__unregister_kprobe_top(kps[i]) < 0) + kps[i]->addr = NULL; + } synchronize_rcu(); for (i = 0; i < num; i++) if (kps[i]->addr) @@ -XXX,XX +XXX,XX @@ void unregister_kretprobes(struct kretprobe **rps, int num) if (num <= 0) return; - mutex_lock(&kprobe_mutex); for (i = 0; i < num; i++) { + guard(mutex)(&kprobe_mutex); + if (__unregister_kprobe_top(&rps[i]->kp) < 0) rps[i]->kp.addr = NULL; #ifdef CONFIG_KRETPROBE_ON_RETHOOK @@ -XXX,XX +XXX,XX @@ void unregister_kretprobes(struct kretprobe **rps, int num) rcu_assign_pointer(rps[i]->rph->rp, NULL); #endif } - mutex_unlock(&kprobe_mutex); synchronize_rcu(); for (i = 0; i < num; i++) { @@ -XXX,XX +XXX,XX @@ static void kill_kprobe(struct kprobe *p) /* Disable one kprobe */ int disable_kprobe(struct kprobe *kp) { - int ret = 0; struct kprobe *p; - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); /* Disable this kprobe */ p = __disable_kprobe(kp); - if (IS_ERR(p)) - ret = PTR_ERR(p); - mutex_unlock(&kprobe_mutex); - return ret; + return IS_ERR(p) ? PTR_ERR(p) : 0; } EXPORT_SYMBOL_GPL(disable_kprobe); @@ -XXX,XX +XXX,XX @@ int enable_kprobe(struct kprobe *kp) int ret = 0; struct kprobe *p; - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); /* Check whether specified probe is valid. */ p = __get_valid_kprobe(kp); - if (unlikely(p == NULL)) { - ret = -EINVAL; - goto out; - } + if (unlikely(p == NULL)) + return -EINVAL; - if (kprobe_gone(kp)) { + if (kprobe_gone(kp)) /* This kprobe has gone, we couldn't enable it. */ - ret = -EINVAL; - goto out; - } + return -EINVAL; if (p != kp) kp->flags &= ~KPROBE_FLAG_DISABLED; @@ -XXX,XX +XXX,XX @@ int enable_kprobe(struct kprobe *kp) kp->flags |= KPROBE_FLAG_DISABLED; } } -out: - mutex_unlock(&kprobe_mutex); return ret; } EXPORT_SYMBOL_GPL(enable_kprobe); @@ -XXX,XX +XXX,XX @@ static int kprobes_module_callback(struct notifier_block *nb, unsigned int i; int checkcore = (val == MODULE_STATE_GOING); - if (val == MODULE_STATE_COMING) { - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); + + if (val == MODULE_STATE_COMING) add_module_kprobe_blacklist(mod); - mutex_unlock(&kprobe_mutex); - } + if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) return NOTIFY_DONE; @@ -XXX,XX +XXX,XX @@ static int kprobes_module_callback(struct notifier_block *nb, * notified, only '.init.text' section would be freed. We need to * disable kprobes which have been inserted in the sections. */ - mutex_lock(&kprobe_mutex); for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; hlist_for_each_entry(p, head, hlist) @@ -XXX,XX +XXX,XX @@ static int kprobes_module_callback(struct notifier_block *nb, } if (val == MODULE_STATE_GOING) remove_module_kprobe_blacklist(mod); - mutex_unlock(&kprobe_mutex); return NOTIFY_DONE; } @@ -XXX,XX +XXX,XX @@ void kprobe_free_init_mem(void) struct kprobe *p; int i; - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); /* Kill all kprobes on initmem because the target code has been freed. */ for (i = 0; i < KPROBE_TABLE_SIZE; i++) { @@ -XXX,XX +XXX,XX @@ void kprobe_free_init_mem(void) kill_kprobe(p); } } - - mutex_unlock(&kprobe_mutex); } static int __init init_kprobes(void) @@ -XXX,XX +XXX,XX @@ static int arm_all_kprobes(void) unsigned int i, total = 0, errors = 0; int err, ret = 0; - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); /* If kprobes are armed, just return */ if (!kprobes_all_disarmed) - goto already_enabled; + return 0; /* * optimize_kprobe() called by arm_kprobe() checks @@ -XXX,XX +XXX,XX @@ static int arm_all_kprobes(void) else pr_info("Kprobes globally enabled\n"); -already_enabled: - mutex_unlock(&kprobe_mutex); return ret; } @@ -XXX,XX +XXX,XX @@ static int disarm_all_kprobes(void) unsigned int i, total = 0, errors = 0; int err, ret = 0; - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); /* If kprobes are already disarmed, just return */ - if (kprobes_all_disarmed) { - mutex_unlock(&kprobe_mutex); + if (kprobes_all_disarmed) return 0; - } kprobes_all_disarmed = true; @@ -XXX,XX +XXX,XX @@ static int disarm_all_kprobes(void) else pr_info("Kprobes globally disabled\n"); - mutex_unlock(&kprobe_mutex); - /* Wait for disarming all kprobes by optimizer */ - wait_for_kprobe_optimizer(); - + wait_for_kprobe_optimizer_locked(); return ret; }
From: Masami Hiramatsu (Google) <mhiramat@kernel.org> Use guard() or scoped_guard() in kprobe events for critical sections rather than discrete lock/unlock pairs. Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> --- kernel/trace/trace_kprobe.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index XXXXXXX..XXXXXXX 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -XXX,XX +XXX,XX @@ static int register_trace_kprobe(struct trace_kprobe *tk) struct trace_kprobe *old_tk; int ret; - mutex_lock(&event_mutex); + guard(mutex)(&event_mutex); old_tk = find_trace_kprobe(trace_probe_name(&tk->tp), trace_probe_group_name(&tk->tp)); @@ -XXX,XX +XXX,XX @@ static int register_trace_kprobe(struct trace_kprobe *tk) if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) { trace_probe_log_set_index(0); trace_probe_log_err(0, DIFF_PROBE_TYPE); - ret = -EEXIST; - } else { - ret = append_trace_kprobe(tk, old_tk); + return -EEXIST; } - goto end; + return append_trace_kprobe(tk, old_tk); } /* Register new event */ @@ -XXX,XX +XXX,XX @@ static int register_trace_kprobe(struct trace_kprobe *tk) trace_probe_log_err(0, EVENT_EXIST); } else pr_warn("Failed to register probe event(%d)\n", ret); - goto end; + return ret; } /* Register k*probe */ @@ -XXX,XX +XXX,XX @@ static int register_trace_kprobe(struct trace_kprobe *tk) else dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp)); -end: - mutex_unlock(&event_mutex); return ret; } @@ -XXX,XX +XXX,XX @@ static int trace_kprobe_module_callback(struct notifier_block *nb, return NOTIFY_DONE; /* Update probes on coming module */ - mutex_lock(&event_mutex); + guard(mutex)(&event_mutex); for_each_trace_kprobe(tk, pos) { if (trace_kprobe_within_module(tk, mod)) { /* Don't need to check busy - this should have gone. */ @@ -XXX,XX +XXX,XX @@ static int trace_kprobe_module_callback(struct notifier_block *nb, module_name(mod), ret); } } - mutex_unlock(&event_mutex); return NOTIFY_DONE; } @@ -XXX,XX +XXX,XX @@ static __init void enable_boot_kprobe_events(void) struct trace_kprobe *tk; struct dyn_event *pos; - mutex_lock(&event_mutex); + guard(mutex)(&event_mutex); for_each_trace_kprobe(tk, pos) { list_for_each_entry(file, &tr->events, list) if (file->event_call == trace_probe_event_call(&tk->tp)) trace_event_enable_disable(file, 1, 0); } - mutex_unlock(&event_mutex); } static __init void setup_boot_kprobe_events(void)
From: Masami Hiramatsu (Google) <mhiramat@kernel.org> Use guard() or scoped_guard() in uprobe events for critical sections rather than discrete lock/unlock pairs. Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> --- kernel/trace/trace_uprobe.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index XXXXXXX..XXXXXXX 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -XXX,XX +XXX,XX @@ static int register_trace_uprobe(struct trace_uprobe *tu) struct trace_uprobe *old_tu; int ret; - mutex_lock(&event_mutex); + guard(mutex)(&event_mutex); ret = validate_ref_ctr_offset(tu); if (ret) - goto end; + return ret; /* register as an event */ old_tu = find_probe_event(trace_probe_name(&tu->tp), @@ -XXX,XX +XXX,XX @@ static int register_trace_uprobe(struct trace_uprobe *tu) if (is_ret_probe(tu) != is_ret_probe(old_tu)) { trace_probe_log_set_index(0); trace_probe_log_err(0, DIFF_PROBE_TYPE); - ret = -EEXIST; - } else { - ret = append_trace_uprobe(tu, old_tu); + return -EEXIST; } - goto end; + return append_trace_uprobe(tu, old_tu); } ret = register_uprobe_event(tu); @@ -XXX,XX +XXX,XX @@ static int register_trace_uprobe(struct trace_uprobe *tu) trace_probe_log_err(0, EVENT_EXIST); } else pr_warn("Failed to register probe event(%d)\n", ret); - goto end; + return ret; } dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp)); -end: - mutex_unlock(&event_mutex); - return ret; }
From: Masami Hiramatsu (Google) <mhiramat@kernel.org> Use guard() or scoped_guard() in eprobe events for critical sections rather than discrete lock/unlock pairs. Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> --- kernel/trace/trace_eprobe.c | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c index XXXXXXX..XXXXXXX 100644 --- a/kernel/trace/trace_eprobe.c +++ b/kernel/trace/trace_eprobe.c @@ -XXX,XX +XXX,XX @@ static int __trace_eprobe_create(int argc, const char *argv[]) goto error; } - mutex_lock(&event_mutex); - event_call = find_and_get_event(sys_name, sys_event); - ep = alloc_event_probe(group, event, event_call, argc - 2); - mutex_unlock(&event_mutex); + scoped_guard(mutex, &event_mutex) { + event_call = find_and_get_event(sys_name, sys_event); + ep = alloc_event_probe(group, event, event_call, argc - 2); + } if (IS_ERR(ep)) { ret = PTR_ERR(ep); @@ -XXX,XX +XXX,XX @@ static int __trace_eprobe_create(int argc, const char *argv[]) if (ret < 0) goto error; init_trace_eprobe_call(ep); - mutex_lock(&event_mutex); - ret = trace_probe_register_event_call(&ep->tp); - if (ret) { - if (ret == -EEXIST) { - trace_probe_log_set_index(0); - trace_probe_log_err(0, EVENT_EXIST); + scoped_guard(mutex, &event_mutex) { + ret = trace_probe_register_event_call(&ep->tp); + if (ret) { + if (ret == -EEXIST) { + trace_probe_log_set_index(0); + trace_probe_log_err(0, EVENT_EXIST); + } + goto error; + } + ret = dyn_event_add(&ep->devent, &ep->tp.event->call); + if (ret < 0) { + trace_probe_unregister_event_call(&ep->tp); + goto error; } - mutex_unlock(&event_mutex); - goto error; - } - ret = dyn_event_add(&ep->devent, &ep->tp.event->call); - if (ret < 0) { - trace_probe_unregister_event_call(&ep->tp); - mutex_unlock(&event_mutex); - goto error; } - mutex_unlock(&event_mutex); return ret; parse_error: ret = -EINVAL;
From: Masami Hiramatsu (Google) <mhiramat@kernel.org> Use guard() or scoped_guard() in dynamic events for critical sections rather than discrete lock/unlock pairs. Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> --- kernel/trace/trace_dynevent.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c index XXXXXXX..XXXXXXX 100644 --- a/kernel/trace/trace_dynevent.c +++ b/kernel/trace/trace_dynevent.c @@ -XXX,XX +XXX,XX @@ int dyn_event_register(struct dyn_event_operations *ops) return -EINVAL; INIT_LIST_HEAD(&ops->list); - mutex_lock(&dyn_event_ops_mutex); + guard(mutex)(&dyn_event_ops_mutex); list_add_tail(&ops->list, &dyn_event_ops_list); - mutex_unlock(&dyn_event_ops_mutex); return 0; } @@ -XXX,XX +XXX,XX @@ int dyn_event_release(const char *raw_command, struct dyn_event_operations *type goto out; } - mutex_lock(&event_mutex); + guard(mutex)(&event_mutex); for_each_dyn_event_safe(pos, n) { if (type && type != pos->ops) continue; @@ -XXX,XX +XXX,XX @@ int dyn_event_release(const char *raw_command, struct dyn_event_operations *type break; } tracing_reset_all_online_cpus(); - mutex_unlock(&event_mutex); out: argv_free(argv); return ret; @@ -XXX,XX +XXX,XX @@ static int create_dyn_event(const char *raw_command) if (raw_command[0] == '-' || raw_command[0] == '!') return dyn_event_release(raw_command, NULL); - mutex_lock(&dyn_event_ops_mutex); + guard(mutex)(&dyn_event_ops_mutex); list_for_each_entry(ops, &dyn_event_ops_list, list) { ret = ops->create(raw_command); if (!ret || ret != -ECANCELED) break; } - mutex_unlock(&dyn_event_ops_mutex); if (ret == -ECANCELED) ret = -EINVAL; @@ -XXX,XX +XXX,XX @@ int dyn_events_release_all(struct dyn_event_operations *type) struct dyn_event *ev, *tmp; int ret = 0; - mutex_lock(&event_mutex); + guard(mutex)(&event_mutex); for_each_dyn_event(ev) { if (type && ev->ops != type) continue; @@ -XXX,XX +XXX,XX @@ int dyn_events_release_all(struct dyn_event_operations *type) } out: tracing_reset_all_online_cpus(); - mutex_unlock(&event_mutex); return ret; }
Hi, This is the v2 of the series to fix eprobes and cleanup kprobes and probe events in ftrace to use guard() and scoped_guard() instead of pairs of mutex locks. In this version I fixed mixing of gurad and goto in the same function by using scoped_guard. Some locks are still not using guard(). We need some more work to complete. Thanks, --- Masami Hiramatsu (Google) (6): tracing/eprobe: Fix to release eprobe when failed to add dyn_event kprobes: Adopt guard() and scoped_guard() tracing/kprobe: Adopt guard() and scoped_guard() tracing/uprobe: Adopt guard() and scoped_guard() tracing/eprobe: Adopt guard() and scoped_guard() tracing/dynevent: Adopt guard() and scoped_guard() kernel/kprobes.c | 205 ++++++++++++++++++----------------------- kernel/trace/trace_dynevent.c | 35 +++---- kernel/trace/trace_eprobe.c | 31 +++--- kernel/trace/trace_kprobe.c | 18 +--- kernel/trace/trace_uprobe.c | 15 +-- 5 files changed, 133 insertions(+), 171 deletions(-) -- Masami Hiramatsu (Google) <mhiramat@kernel.org>
From: Masami Hiramatsu (Google) <mhiramat@kernel.org> Fix eprobe event to unregister event call and release eprobe when it fails to add dynamic event correctly. Fixes: 7491e2c44278 ("tracing: Add a probe that attaches to trace events") Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> --- kernel/trace/trace_eprobe.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c index XXXXXXX..XXXXXXX 100644 --- a/kernel/trace/trace_eprobe.c +++ b/kernel/trace/trace_eprobe.c @@ -XXX,XX +XXX,XX @@ static int __trace_eprobe_create(int argc, const char *argv[]) goto error; } ret = dyn_event_add(&ep->devent, &ep->tp.event->call); + if (ret < 0) { + trace_probe_unregister_event_call(&ep->tp); + mutex_unlock(&event_mutex); + goto error; + } mutex_unlock(&event_mutex); return ret; parse_error:
From: Masami Hiramatsu (Google) <mhiramat@kernel.org> Use guard() or scoped_guard() for critical sections rather than discrete lock/unlock pairs. Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> --- kernel/kprobes.c | 205 +++++++++++++++++++++++------------------------------- 1 file changed, 89 insertions(+), 116 deletions(-) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index XXXXXXX..XXXXXXX 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -XXX,XX +XXX,XX @@ static int collect_garbage_slots(struct kprobe_insn_cache *c); kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) { struct kprobe_insn_page *kip; - kprobe_opcode_t *slot = NULL; /* Since the slot array is not protected by rcu, we need a mutex */ - mutex_lock(&c->mutex); + guard(mutex)(&c->mutex); retry: rcu_read_lock(); list_for_each_entry_rcu(kip, &c->pages, list) { @@ -XXX,XX +XXX,XX @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) if (kip->slot_used[i] == SLOT_CLEAN) { kip->slot_used[i] = SLOT_USED; kip->nused++; - slot = kip->insns + (i * c->insn_size); rcu_read_unlock(); - goto out; + return kip->insns + (i * c->insn_size); } } /* kip->nused is broken. Fix it. */ @@ -XXX,XX +XXX,XX @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) /* All out of space. Need to allocate a new page. */ kip = kmalloc(struct_size(kip, slot_used, slots_per_page(c)), GFP_KERNEL); if (!kip) - goto out; + return NULL; kip->insns = c->alloc(); if (!kip->insns) { kfree(kip); - goto out; + return NULL; } INIT_LIST_HEAD(&kip->list); memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); @@ -XXX,XX +XXX,XX @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) kip->ngarbage = 0; kip->cache = c; list_add_rcu(&kip->list, &c->pages); - slot = kip->insns; /* Record the perf ksymbol register event after adding the page */ perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns, PAGE_SIZE, false, c->sym); -out: - mutex_unlock(&c->mutex); - return slot; + + return kip->insns; } /* Return true if all garbages are collected, otherwise false. */ @@ -XXX,XX +XXX,XX @@ void __free_insn_slot(struct kprobe_insn_cache *c, struct kprobe_insn_page *kip; long idx; - mutex_lock(&c->mutex); + guard(mutex)(&c->mutex); rcu_read_lock(); list_for_each_entry_rcu(kip, &c->pages, list) { idx = ((long)slot - (long)kip->insns) / @@ -XXX,XX +XXX,XX @@ void __free_insn_slot(struct kprobe_insn_cache *c, collect_one_slot(kip, idx); } } - mutex_unlock(&c->mutex); } /* @@ -XXX,XX +XXX,XX @@ static void kprobe_optimizer(struct work_struct *work) mutex_unlock(&kprobe_mutex); } -/* Wait for completing optimization and unoptimization */ -void wait_for_kprobe_optimizer(void) +static void wait_for_kprobe_optimizer_locked(void) { - mutex_lock(&kprobe_mutex); + lockdep_assert_held(&kprobe_mutex); while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) { mutex_unlock(&kprobe_mutex); @@ -XXX,XX +XXX,XX @@ void wait_for_kprobe_optimizer(void) mutex_lock(&kprobe_mutex); } +} - mutex_unlock(&kprobe_mutex); +/* Wait for completing optimization and unoptimization */ +void wait_for_kprobe_optimizer(void) +{ + guard(mutex)(&kprobe_mutex); + + wait_for_kprobe_optimizer_locked(); } bool optprobe_queued_unopt(struct optimized_kprobe *op) @@ -XXX,XX +XXX,XX @@ static void optimize_all_kprobes(void) struct kprobe *p; unsigned int i; - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); /* If optimization is already allowed, just return. */ if (kprobes_allow_optimization) - goto out; + return; cpus_read_lock(); kprobes_allow_optimization = true; @@ -XXX,XX +XXX,XX @@ static void optimize_all_kprobes(void) } cpus_read_unlock(); pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n"); -out: - mutex_unlock(&kprobe_mutex); } #ifdef CONFIG_SYSCTL @@ -XXX,XX +XXX,XX @@ static void unoptimize_all_kprobes(void) struct kprobe *p; unsigned int i; - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); /* If optimization is already prohibited, just return. */ - if (!kprobes_allow_optimization) { - mutex_unlock(&kprobe_mutex); + if (!kprobes_allow_optimization) return; - } cpus_read_lock(); kprobes_allow_optimization = false; @@ -XXX,XX +XXX,XX @@ static void unoptimize_all_kprobes(void) } } cpus_read_unlock(); - mutex_unlock(&kprobe_mutex); - /* Wait for unoptimizing completion. */ - wait_for_kprobe_optimizer(); + wait_for_kprobe_optimizer_locked(); pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n"); } @@ -XXX,XX +XXX,XX @@ static int proc_kprobes_optimization_handler(const struct ctl_table *table, { int ret; - mutex_lock(&kprobe_sysctl_mutex); + guard(mutex)(&kprobe_sysctl_mutex); sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; ret = proc_dointvec_minmax(table, write, buffer, length, ppos); @@ -XXX,XX +XXX,XX @@ static int proc_kprobes_optimization_handler(const struct ctl_table *table, optimize_all_kprobes(); else unoptimize_all_kprobes(); - mutex_unlock(&kprobe_sysctl_mutex); return ret; } @@ -XXX,XX +XXX,XX @@ static void __disarm_kprobe(struct kprobe *p, bool reopt) #define __arm_kprobe(p) arch_arm_kprobe(p) #define __disarm_kprobe(p, o) arch_disarm_kprobe(p) #define kprobe_disarmed(p) kprobe_disabled(p) -#define wait_for_kprobe_optimizer() do {} while (0) +#define wait_for_kprobe_optimizer_locked() \ + lockdep_assert_held(&kprobe_mutex) static int reuse_unused_kprobe(struct kprobe *ap) { @@ -XXX,XX +XXX,XX @@ _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name, static kprobe_opcode_t *kprobe_addr(struct kprobe *p) { bool on_func_entry; + return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry); } @@ -XXX,XX +XXX,XX @@ static struct kprobe *__get_valid_kprobe(struct kprobe *p) */ static inline int warn_kprobe_rereg(struct kprobe *p) { - int ret = 0; + guard(mutex)(&kprobe_mutex); - mutex_lock(&kprobe_mutex); if (WARN_ON_ONCE(__get_valid_kprobe(p))) - ret = -EINVAL; - mutex_unlock(&kprobe_mutex); + return -EINVAL; - return ret; + return 0; } static int check_ftrace_location(struct kprobe *p) @@ -XXX,XX +XXX,XX @@ static int check_kprobe_address_safe(struct kprobe *p, return ret; } -int register_kprobe(struct kprobe *p) +static int __register_kprobe(struct kprobe *p) { int ret; struct kprobe *old_p; - struct module *probed_mod; - kprobe_opcode_t *addr; - bool on_func_entry; - - /* Adjust probe address from symbol */ - addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry); - if (IS_ERR(addr)) - return PTR_ERR(addr); - p->addr = addr; - - ret = warn_kprobe_rereg(p); - if (ret) - return ret; - /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ - p->flags &= KPROBE_FLAG_DISABLED; - p->nmissed = 0; - INIT_LIST_HEAD(&p->list); - - ret = check_kprobe_address_safe(p, &probed_mod); - if (ret) - return ret; - - mutex_lock(&kprobe_mutex); - - if (on_func_entry) - p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY; + guard(mutex)(&kprobe_mutex); old_p = get_kprobe(p->addr); - if (old_p) { + if (old_p) /* Since this may unoptimize 'old_p', locking 'text_mutex'. */ - ret = register_aggr_kprobe(old_p, p); - goto out; - } + return register_aggr_kprobe(old_p, p); cpus_read_lock(); /* Prevent text modification */ @@ -XXX,XX +XXX,XX @@ int register_kprobe(struct kprobe *p) mutex_unlock(&text_mutex); cpus_read_unlock(); if (ret) - goto out; + return ret; INIT_HLIST_NODE(&p->hlist); hlist_add_head_rcu(&p->hlist, @@ -XXX,XX +XXX,XX @@ int register_kprobe(struct kprobe *p) if (ret) { hlist_del_rcu(&p->hlist); synchronize_rcu(); - goto out; } } /* Try to optimize kprobe */ try_to_optimize_kprobe(p); -out: - mutex_unlock(&kprobe_mutex); + return 0; +} + +int register_kprobe(struct kprobe *p) +{ + int ret; + struct module *probed_mod; + kprobe_opcode_t *addr; + bool on_func_entry; + + /* Canonicalize probe address from symbol */ + addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry); + if (IS_ERR(addr)) + return PTR_ERR(addr); + p->addr = addr; + + ret = warn_kprobe_rereg(p); + if (ret) + return ret; + + /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ + p->flags &= KPROBE_FLAG_DISABLED; + if (on_func_entry) + p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY; + p->nmissed = 0; + INIT_LIST_HEAD(&p->list); + + ret = check_kprobe_address_safe(p, &probed_mod); + if (ret) + return ret; + + ret = __register_kprobe(p); if (probed_mod) module_put(probed_mod); @@ -XXX,XX +XXX,XX @@ void unregister_kprobes(struct kprobe **kps, int num) if (num <= 0) return; - mutex_lock(&kprobe_mutex); - for (i = 0; i < num; i++) - if (__unregister_kprobe_top(kps[i]) < 0) - kps[i]->addr = NULL; - mutex_unlock(&kprobe_mutex); - + scoped_guard(mutex, &kprobe_mutex) { + for (i = 0; i < num; i++) + if (__unregister_kprobe_top(kps[i]) < 0) + kps[i]->addr = NULL; + } synchronize_rcu(); for (i = 0; i < num; i++) if (kps[i]->addr) @@ -XXX,XX +XXX,XX @@ void unregister_kretprobes(struct kretprobe **rps, int num) if (num <= 0) return; - mutex_lock(&kprobe_mutex); for (i = 0; i < num; i++) { + guard(mutex)(&kprobe_mutex); + if (__unregister_kprobe_top(&rps[i]->kp) < 0) rps[i]->kp.addr = NULL; #ifdef CONFIG_KRETPROBE_ON_RETHOOK @@ -XXX,XX +XXX,XX @@ void unregister_kretprobes(struct kretprobe **rps, int num) rcu_assign_pointer(rps[i]->rph->rp, NULL); #endif } - mutex_unlock(&kprobe_mutex); synchronize_rcu(); for (i = 0; i < num; i++) { @@ -XXX,XX +XXX,XX @@ static void kill_kprobe(struct kprobe *p) /* Disable one kprobe */ int disable_kprobe(struct kprobe *kp) { - int ret = 0; struct kprobe *p; - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); /* Disable this kprobe */ p = __disable_kprobe(kp); - if (IS_ERR(p)) - ret = PTR_ERR(p); - mutex_unlock(&kprobe_mutex); - return ret; + return IS_ERR(p) ? PTR_ERR(p) : 0; } EXPORT_SYMBOL_GPL(disable_kprobe); @@ -XXX,XX +XXX,XX @@ int enable_kprobe(struct kprobe *kp) int ret = 0; struct kprobe *p; - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); /* Check whether specified probe is valid. */ p = __get_valid_kprobe(kp); - if (unlikely(p == NULL)) { - ret = -EINVAL; - goto out; - } + if (unlikely(p == NULL)) + return -EINVAL; - if (kprobe_gone(kp)) { + if (kprobe_gone(kp)) /* This kprobe has gone, we couldn't enable it. */ - ret = -EINVAL; - goto out; - } + return -EINVAL; if (p != kp) kp->flags &= ~KPROBE_FLAG_DISABLED; @@ -XXX,XX +XXX,XX @@ int enable_kprobe(struct kprobe *kp) kp->flags |= KPROBE_FLAG_DISABLED; } } -out: - mutex_unlock(&kprobe_mutex); return ret; } EXPORT_SYMBOL_GPL(enable_kprobe); @@ -XXX,XX +XXX,XX @@ static int kprobes_module_callback(struct notifier_block *nb, unsigned int i; int checkcore = (val == MODULE_STATE_GOING); - if (val == MODULE_STATE_COMING) { - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); + + if (val == MODULE_STATE_COMING) add_module_kprobe_blacklist(mod); - mutex_unlock(&kprobe_mutex); - } + if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) return NOTIFY_DONE; @@ -XXX,XX +XXX,XX @@ static int kprobes_module_callback(struct notifier_block *nb, * notified, only '.init.text' section would be freed. We need to * disable kprobes which have been inserted in the sections. */ - mutex_lock(&kprobe_mutex); for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; hlist_for_each_entry(p, head, hlist) @@ -XXX,XX +XXX,XX @@ static int kprobes_module_callback(struct notifier_block *nb, } if (val == MODULE_STATE_GOING) remove_module_kprobe_blacklist(mod); - mutex_unlock(&kprobe_mutex); return NOTIFY_DONE; } @@ -XXX,XX +XXX,XX @@ void kprobe_free_init_mem(void) struct kprobe *p; int i; - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); /* Kill all kprobes on initmem because the target code has been freed. */ for (i = 0; i < KPROBE_TABLE_SIZE; i++) { @@ -XXX,XX +XXX,XX @@ void kprobe_free_init_mem(void) kill_kprobe(p); } } - - mutex_unlock(&kprobe_mutex); } static int __init init_kprobes(void) @@ -XXX,XX +XXX,XX @@ static int arm_all_kprobes(void) unsigned int i, total = 0, errors = 0; int err, ret = 0; - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); /* If kprobes are armed, just return */ if (!kprobes_all_disarmed) - goto already_enabled; + return 0; /* * optimize_kprobe() called by arm_kprobe() checks @@ -XXX,XX +XXX,XX @@ static int arm_all_kprobes(void) else pr_info("Kprobes globally enabled\n"); -already_enabled: - mutex_unlock(&kprobe_mutex); return ret; } @@ -XXX,XX +XXX,XX @@ static int disarm_all_kprobes(void) unsigned int i, total = 0, errors = 0; int err, ret = 0; - mutex_lock(&kprobe_mutex); + guard(mutex)(&kprobe_mutex); /* If kprobes are already disarmed, just return */ - if (kprobes_all_disarmed) { - mutex_unlock(&kprobe_mutex); + if (kprobes_all_disarmed) return 0; - } kprobes_all_disarmed = true; @@ -XXX,XX +XXX,XX @@ static int disarm_all_kprobes(void) else pr_info("Kprobes globally disabled\n"); - mutex_unlock(&kprobe_mutex); - /* Wait for disarming all kprobes by optimizer */ - wait_for_kprobe_optimizer(); - + wait_for_kprobe_optimizer_locked(); return ret; }
From: Masami Hiramatsu (Google) <mhiramat@kernel.org> Use guard() or scoped_guard() in kprobe events for critical sections rather than discrete lock/unlock pairs. Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> --- kernel/trace/trace_kprobe.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index XXXXXXX..XXXXXXX 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -XXX,XX +XXX,XX @@ static int register_trace_kprobe(struct trace_kprobe *tk) struct trace_kprobe *old_tk; int ret; - mutex_lock(&event_mutex); + guard(mutex)(&event_mutex); old_tk = find_trace_kprobe(trace_probe_name(&tk->tp), trace_probe_group_name(&tk->tp)); @@ -XXX,XX +XXX,XX @@ static int register_trace_kprobe(struct trace_kprobe *tk) if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) { trace_probe_log_set_index(0); trace_probe_log_err(0, DIFF_PROBE_TYPE); - ret = -EEXIST; - } else { - ret = append_trace_kprobe(tk, old_tk); + return -EEXIST; } - goto end; + return append_trace_kprobe(tk, old_tk); } /* Register new event */ @@ -XXX,XX +XXX,XX @@ static int register_trace_kprobe(struct trace_kprobe *tk) trace_probe_log_err(0, EVENT_EXIST); } else pr_warn("Failed to register probe event(%d)\n", ret); - goto end; + return ret; } /* Register k*probe */ @@ -XXX,XX +XXX,XX @@ static int register_trace_kprobe(struct trace_kprobe *tk) else dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp)); -end: - mutex_unlock(&event_mutex); return ret; } @@ -XXX,XX +XXX,XX @@ static int trace_kprobe_module_callback(struct notifier_block *nb, return NOTIFY_DONE; /* Update probes on coming module */ - mutex_lock(&event_mutex); + guard(mutex)(&event_mutex); for_each_trace_kprobe(tk, pos) { if (trace_kprobe_within_module(tk, mod)) { /* Don't need to check busy - this should have gone. */ @@ -XXX,XX +XXX,XX @@ static int trace_kprobe_module_callback(struct notifier_block *nb, module_name(mod), ret); } } - mutex_unlock(&event_mutex); return NOTIFY_DONE; } @@ -XXX,XX +XXX,XX @@ static __init void enable_boot_kprobe_events(void) struct trace_kprobe *tk; struct dyn_event *pos; - mutex_lock(&event_mutex); + guard(mutex)(&event_mutex); for_each_trace_kprobe(tk, pos) { list_for_each_entry(file, &tr->events, list) if (file->event_call == trace_probe_event_call(&tk->tp)) trace_event_enable_disable(file, 1, 0); } - mutex_unlock(&event_mutex); } static __init void setup_boot_kprobe_events(void)
From: Masami Hiramatsu (Google) <mhiramat@kernel.org> Use guard() or scoped_guard() in uprobe events for critical sections rather than discrete lock/unlock pairs. Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> --- kernel/trace/trace_uprobe.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index XXXXXXX..XXXXXXX 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -XXX,XX +XXX,XX @@ static int register_trace_uprobe(struct trace_uprobe *tu) struct trace_uprobe *old_tu; int ret; - mutex_lock(&event_mutex); + guard(mutex)(&event_mutex); ret = validate_ref_ctr_offset(tu); if (ret) - goto end; + return ret; /* register as an event */ old_tu = find_probe_event(trace_probe_name(&tu->tp), @@ -XXX,XX +XXX,XX @@ static int register_trace_uprobe(struct trace_uprobe *tu) if (is_ret_probe(tu) != is_ret_probe(old_tu)) { trace_probe_log_set_index(0); trace_probe_log_err(0, DIFF_PROBE_TYPE); - ret = -EEXIST; - } else { - ret = append_trace_uprobe(tu, old_tu); + return -EEXIST; } - goto end; + return append_trace_uprobe(tu, old_tu); } ret = register_uprobe_event(tu); @@ -XXX,XX +XXX,XX @@ static int register_trace_uprobe(struct trace_uprobe *tu) trace_probe_log_err(0, EVENT_EXIST); } else pr_warn("Failed to register probe event(%d)\n", ret); - goto end; + return ret; } dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp)); -end: - mutex_unlock(&event_mutex); - return ret; }
From: Masami Hiramatsu (Google) <mhiramat@kernel.org> Use guard() or scoped_guard() in eprobe events for critical sections rather than discrete lock/unlock pairs. Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> --- kernel/trace/trace_eprobe.c | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c index XXXXXXX..XXXXXXX 100644 --- a/kernel/trace/trace_eprobe.c +++ b/kernel/trace/trace_eprobe.c @@ -XXX,XX +XXX,XX @@ static int __trace_eprobe_create(int argc, const char *argv[]) goto error; } - mutex_lock(&event_mutex); - event_call = find_and_get_event(sys_name, sys_event); - ep = alloc_event_probe(group, event, event_call, argc - 2); - mutex_unlock(&event_mutex); + scoped_guard(mutex, &event_mutex) { + event_call = find_and_get_event(sys_name, sys_event); + ep = alloc_event_probe(group, event, event_call, argc - 2); + } if (IS_ERR(ep)) { ret = PTR_ERR(ep); @@ -XXX,XX +XXX,XX @@ static int __trace_eprobe_create(int argc, const char *argv[]) if (ret < 0) goto error; init_trace_eprobe_call(ep); - mutex_lock(&event_mutex); - ret = trace_probe_register_event_call(&ep->tp); - if (ret) { - if (ret == -EEXIST) { - trace_probe_log_set_index(0); - trace_probe_log_err(0, EVENT_EXIST); + scoped_guard(mutex, &event_mutex) { + ret = trace_probe_register_event_call(&ep->tp); + if (ret) { + if (ret == -EEXIST) { + trace_probe_log_set_index(0); + trace_probe_log_err(0, EVENT_EXIST); + } + goto error; + } + ret = dyn_event_add(&ep->devent, &ep->tp.event->call); + if (ret < 0) { + trace_probe_unregister_event_call(&ep->tp); + goto error; } - mutex_unlock(&event_mutex); - goto error; - } - ret = dyn_event_add(&ep->devent, &ep->tp.event->call); - if (ret < 0) { - trace_probe_unregister_event_call(&ep->tp); - mutex_unlock(&event_mutex); - goto error; } - mutex_unlock(&event_mutex); return ret; parse_error: ret = -EINVAL;
From: Masami Hiramatsu (Google) <mhiramat@kernel.org> Use guard() or scoped_guard() in dynamic events for critical sections rather than discrete lock/unlock pairs. Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> --- Changes in v2: - Use scoped_guard() instead of guard() to avoid goto warnings. --- kernel/trace/trace_dynevent.c | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c index XXXXXXX..XXXXXXX 100644 --- a/kernel/trace/trace_dynevent.c +++ b/kernel/trace/trace_dynevent.c @@ -XXX,XX +XXX,XX @@ int dyn_event_register(struct dyn_event_operations *ops) return -EINVAL; INIT_LIST_HEAD(&ops->list); - mutex_lock(&dyn_event_ops_mutex); + guard(mutex)(&dyn_event_ops_mutex); list_add_tail(&ops->list, &dyn_event_ops_list); - mutex_unlock(&dyn_event_ops_mutex); return 0; } @@ -XXX,XX +XXX,XX @@ int dyn_event_release(const char *raw_command, struct dyn_event_operations *type goto out; } - mutex_lock(&event_mutex); - for_each_dyn_event_safe(pos, n) { - if (type && type != pos->ops) - continue; - if (!pos->ops->match(system, event, - argc - 1, (const char **)argv + 1, pos)) - continue; - - ret = pos->ops->free(pos); - if (ret) - break; + scoped_guard(mutex, &event_mutex) { + for_each_dyn_event_safe(pos, n) { + if (type && type != pos->ops) + continue; + if (!pos->ops->match(system, event, + argc - 1, (const char **)argv + 1, pos)) + continue; + + ret = pos->ops->free(pos); + if (ret) + break; + } + tracing_reset_all_online_cpus(); } - tracing_reset_all_online_cpus(); - mutex_unlock(&event_mutex); out: argv_free(argv); return ret; @@ -XXX,XX +XXX,XX @@ static int create_dyn_event(const char *raw_command) if (raw_command[0] == '-' || raw_command[0] == '!') return dyn_event_release(raw_command, NULL); - mutex_lock(&dyn_event_ops_mutex); + guard(mutex)(&dyn_event_ops_mutex); list_for_each_entry(ops, &dyn_event_ops_list, list) { ret = ops->create(raw_command); if (!ret || ret != -ECANCELED) break; } - mutex_unlock(&dyn_event_ops_mutex); if (ret == -ECANCELED) ret = -EINVAL; @@ -XXX,XX +XXX,XX @@ int dyn_events_release_all(struct dyn_event_operations *type) struct dyn_event *ev, *tmp; int ret = 0; - mutex_lock(&event_mutex); + guard(mutex)(&event_mutex); for_each_dyn_event(ev) { if (type && ev->ops != type) continue; @@ -XXX,XX +XXX,XX @@ int dyn_events_release_all(struct dyn_event_operations *type) } out: tracing_reset_all_online_cpus(); - mutex_unlock(&event_mutex); return ret; }