Hi all,
Today's linux-next merge of the ftrace tree got a conflict in:
kernel/trace/trace.c
between commit:
86e685ff364394 ("tracing: remove size parameter in __trace_puts()")
from the mm-nonmm-stable tree and commits:
27931ee8f45415 ("tracing: Move trace_printk functions out of trace.c and into trace_printk.c")
0e730bc067e7a7 ("tracing: Move __trace_buffer_{un}lock_*() functions to trace.h")
from the ftrace tree.
I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging. You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.
diff --combined kernel/trace/trace.c
index b1cb30a7b83d3a,845b8a165daf3c..00000000000000
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@@ -67,7 -67,7 +67,7 @@@
* insertions into the ring-buffer such as trace_printk could occurred
* at the same time, giving false positive or negative results.
*/
- static bool __read_mostly tracing_selftest_running;
+ bool __read_mostly tracing_selftest_running;
/*
* If boot-time tracing including tracers/events via kernel cmdline
@@@ -83,7 -83,6 +83,6 @@@ void __init disable_tracing_selftest(co
}
}
#else
- #define tracing_selftest_running 0
#define tracing_selftest_disabled 0
#endif
@@@ -114,7 -113,7 +113,7 @@@ DEFINE_PER_CPU(bool, trace_taskinfo_sav
* of the tracer is successful. But that is the only place that sets
* this back to zero.
*/
- static int tracing_disabled = 1;
+ int tracing_disabled = 1;
cpumask_var_t __read_mostly tracing_buffer_mask;
@@@ -535,22 -534,11 +534,11 @@@ static struct trace_array global_trace
.trace_flags = TRACE_DEFAULT_FLAGS,
};
- static struct trace_array *printk_trace = &global_trace;
+ struct trace_array *printk_trace = &global_trace;
/* List of trace_arrays interested in the top level trace_marker */
static LIST_HEAD(marker_copies);
- static __always_inline bool printk_binsafe(struct trace_array *tr)
- {
- /*
- * The binary format of traceprintk can cause a crash if used
- * by a buffer from another boot. Force the use of the
- * non binary version of trace_printk if the trace_printk
- * buffer is a boot mapped ring buffer.
- */
- return !(tr->flags & TRACE_ARRAY_FL_BOOT);
- }
-
static void update_printk_trace(struct trace_array *tr)
{
if (printk_trace == tr)
@@@ -649,248 -637,6 +637,6 @@@ int tracing_check_open_get_tr(struct tr
return 0;
}
- /**
- * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
- * @filtered_pids: The list of pids to check
- * @search_pid: The PID to find in @filtered_pids
- *
- * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
- */
- bool
- trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
- {
- return trace_pid_list_is_set(filtered_pids, search_pid);
- }
-
- /**
- * trace_ignore_this_task - should a task be ignored for tracing
- * @filtered_pids: The list of pids to check
- * @filtered_no_pids: The list of pids not to be traced
- * @task: The task that should be ignored if not filtered
- *
- * Checks if @task should be traced or not from @filtered_pids.
- * Returns true if @task should *NOT* be traced.
- * Returns false if @task should be traced.
- */
- bool
- trace_ignore_this_task(struct trace_pid_list *filtered_pids,
- struct trace_pid_list *filtered_no_pids,
- struct task_struct *task)
- {
- /*
- * If filtered_no_pids is not empty, and the task's pid is listed
- * in filtered_no_pids, then return true.
- * Otherwise, if filtered_pids is empty, that means we can
- * trace all tasks. If it has content, then only trace pids
- * within filtered_pids.
- */
-
- return (filtered_pids &&
- !trace_find_filtered_pid(filtered_pids, task->pid)) ||
- (filtered_no_pids &&
- trace_find_filtered_pid(filtered_no_pids, task->pid));
- }
-
- /**
- * trace_filter_add_remove_task - Add or remove a task from a pid_list
- * @pid_list: The list to modify
- * @self: The current task for fork or NULL for exit
- * @task: The task to add or remove
- *
- * If adding a task, if @self is defined, the task is only added if @self
- * is also included in @pid_list. This happens on fork and tasks should
- * only be added when the parent is listed. If @self is NULL, then the
- * @task pid will be removed from the list, which would happen on exit
- * of a task.
- */
- void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
- struct task_struct *self,
- struct task_struct *task)
- {
- if (!pid_list)
- return;
-
- /* For forks, we only add if the forking task is listed */
- if (self) {
- if (!trace_find_filtered_pid(pid_list, self->pid))
- return;
- }
-
- /* "self" is set for forks, and NULL for exits */
- if (self)
- trace_pid_list_set(pid_list, task->pid);
- else
- trace_pid_list_clear(pid_list, task->pid);
- }
-
- /**
- * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
- * @pid_list: The pid list to show
- * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
- * @pos: The position of the file
- *
- * This is used by the seq_file "next" operation to iterate the pids
- * listed in a trace_pid_list structure.
- *
- * Returns the pid+1 as we want to display pid of zero, but NULL would
- * stop the iteration.
- */
- void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
- {
- long pid = (unsigned long)v;
- unsigned int next;
-
- (*pos)++;
-
- /* pid already is +1 of the actual previous bit */
- if (trace_pid_list_next(pid_list, pid, &next) < 0)
- return NULL;
-
- pid = next;
-
- /* Return pid + 1 to allow zero to be represented */
- return (void *)(pid + 1);
- }
-
- /**
- * trace_pid_start - Used for seq_file to start reading pid lists
- * @pid_list: The pid list to show
- * @pos: The position of the file
- *
- * This is used by seq_file "start" operation to start the iteration
- * of listing pids.
- *
- * Returns the pid+1 as we want to display pid of zero, but NULL would
- * stop the iteration.
- */
- void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
- {
- unsigned long pid;
- unsigned int first;
- loff_t l = 0;
-
- if (trace_pid_list_first(pid_list, &first) < 0)
- return NULL;
-
- pid = first;
-
- /* Return pid + 1 so that zero can be the exit value */
- for (pid++; pid && l < *pos;
- pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
- ;
- return (void *)pid;
- }
-
- /**
- * trace_pid_show - show the current pid in seq_file processing
- * @m: The seq_file structure to write into
- * @v: A void pointer of the pid (+1) value to display
- *
- * Can be directly used by seq_file operations to display the current
- * pid value.
- */
- int trace_pid_show(struct seq_file *m, void *v)
- {
- unsigned long pid = (unsigned long)v - 1;
-
- seq_printf(m, "%lu\n", pid);
- return 0;
- }
-
- /* 128 should be much more than enough */
- #define PID_BUF_SIZE 127
-
- int trace_pid_write(struct trace_pid_list *filtered_pids,
- struct trace_pid_list **new_pid_list,
- const char __user *ubuf, size_t cnt)
- {
- struct trace_pid_list *pid_list;
- struct trace_parser parser;
- unsigned long val;
- int nr_pids = 0;
- ssize_t read = 0;
- ssize_t ret;
- loff_t pos;
- pid_t pid;
-
- if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
- return -ENOMEM;
-
- /*
- * Always recreate a new array. The write is an all or nothing
- * operation. Always create a new array when adding new pids by
- * the user. If the operation fails, then the current list is
- * not modified.
- */
- pid_list = trace_pid_list_alloc();
- if (!pid_list) {
- trace_parser_put(&parser);
- return -ENOMEM;
- }
-
- if (filtered_pids) {
- /* copy the current bits to the new max */
- ret = trace_pid_list_first(filtered_pids, &pid);
- while (!ret) {
- ret = trace_pid_list_set(pid_list, pid);
- if (ret < 0)
- goto out;
-
- ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
- nr_pids++;
- }
- }
-
- ret = 0;
- while (cnt > 0) {
-
- pos = 0;
-
- ret = trace_get_user(&parser, ubuf, cnt, &pos);
- if (ret < 0)
- break;
-
- read += ret;
- ubuf += ret;
- cnt -= ret;
-
- if (!trace_parser_loaded(&parser))
- break;
-
- ret = -EINVAL;
- if (kstrtoul(parser.buffer, 0, &val))
- break;
-
- pid = (pid_t)val;
-
- if (trace_pid_list_set(pid_list, pid) < 0) {
- ret = -1;
- break;
- }
- nr_pids++;
-
- trace_parser_clear(&parser);
- ret = 0;
- }
- out:
- trace_parser_put(&parser);
-
- if (ret < 0) {
- trace_pid_list_free(pid_list);
- return ret;
- }
-
- if (!nr_pids) {
- /* Cleared the list of pids */
- trace_pid_list_free(pid_list);
- pid_list = NULL;
- }
-
- *new_pid_list = pid_list;
-
- return read;
- }
-
static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
{
u64 ts;
@@@ -1033,56 -779,6 +779,6 @@@ static inline void trace_access_lock_in
#endif
- #ifdef CONFIG_STACKTRACE
- static void __ftrace_trace_stack(struct trace_array *tr,
- struct trace_buffer *buffer,
- unsigned int trace_ctx,
- int skip, struct pt_regs *regs);
- static inline void ftrace_trace_stack(struct trace_array *tr,
- struct trace_buffer *buffer,
- unsigned int trace_ctx,
- int skip, struct pt_regs *regs);
-
- #else
- static inline void __ftrace_trace_stack(struct trace_array *tr,
- struct trace_buffer *buffer,
- unsigned int trace_ctx,
- int skip, struct pt_regs *regs)
- {
- }
- static inline void ftrace_trace_stack(struct trace_array *tr,
- struct trace_buffer *buffer,
- unsigned long trace_ctx,
- int skip, struct pt_regs *regs)
- {
- }
-
- #endif
-
- static __always_inline void
- trace_event_setup(struct ring_buffer_event *event,
- int type, unsigned int trace_ctx)
- {
- struct trace_entry *ent = ring_buffer_event_data(event);
-
- tracing_generic_entry_update(ent, type, trace_ctx);
- }
-
- static __always_inline struct ring_buffer_event *
- __trace_buffer_lock_reserve(struct trace_buffer *buffer,
- int type,
- unsigned long len,
- unsigned int trace_ctx)
- {
- struct ring_buffer_event *event;
-
- event = ring_buffer_lock_reserve(buffer, len);
- if (event != NULL)
- trace_event_setup(event, type, trace_ctx);
-
- return event;
- }
-
void tracer_tracing_on(struct trace_array *tr)
{
if (tr->array_buffer.buffer)
@@@ -1110,129 -806,10 +806,10 @@@ void tracing_on(void
}
EXPORT_SYMBOL_GPL(tracing_on);
-
- static __always_inline void
- __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
- {
- __this_cpu_write(trace_taskinfo_save, true);
-
- /* If this is the temp buffer, we need to commit fully */
- if (this_cpu_read(trace_buffered_event) == event) {
- /* Length is in event->array[0] */
- ring_buffer_write(buffer, event->array[0], &event->array[1]);
- /* Release the temp buffer */
- this_cpu_dec(trace_buffered_event_cnt);
- /* ring_buffer_unlock_commit() enables preemption */
- preempt_enable_notrace();
- } else
- ring_buffer_unlock_commit(buffer);
- }
-
- int __trace_array_puts(struct trace_array *tr, unsigned long ip,
- const char *str, int size)
- {
- struct ring_buffer_event *event;
- struct trace_buffer *buffer;
- struct print_entry *entry;
- unsigned int trace_ctx;
- int alloc;
-
- if (!(tr->trace_flags & TRACE_ITER(PRINTK)))
- return 0;
-
- if (unlikely(tracing_selftest_running && tr == &global_trace))
- return 0;
-
- if (unlikely(tracing_disabled))
- return 0;
-
- alloc = sizeof(*entry) + size + 2; /* possible \n added */
-
- trace_ctx = tracing_gen_ctx();
- buffer = tr->array_buffer.buffer;
- guard(ring_buffer_nest)(buffer);
- event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
- trace_ctx);
- if (!event)
- return 0;
-
- entry = ring_buffer_event_data(event);
- entry->ip = ip;
-
- memcpy(&entry->buf, str, size);
-
- /* Add a newline if necessary */
- if (entry->buf[size - 1] != '\n') {
- entry->buf[size] = '\n';
- entry->buf[size + 1] = '\0';
- } else
- entry->buf[size] = '\0';
-
- __buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
- return size;
- }
- EXPORT_SYMBOL_GPL(__trace_array_puts);
-
- /**
- * __trace_puts - write a constant string into the trace buffer.
- * @ip: The address of the caller
- * @str: The constant string to write
- */
- int __trace_puts(unsigned long ip, const char *str)
- {
- return __trace_array_puts(printk_trace, ip, str, strlen(str));
- }
- EXPORT_SYMBOL_GPL(__trace_puts);
-
- /**
- * __trace_bputs - write the pointer to a constant string into trace buffer
- * @ip: The address of the caller
- * @str: The constant string to write to the buffer to
- */
- int __trace_bputs(unsigned long ip, const char *str)
- {
- struct trace_array *tr = READ_ONCE(printk_trace);
- struct ring_buffer_event *event;
- struct trace_buffer *buffer;
- struct bputs_entry *entry;
- unsigned int trace_ctx;
- int size = sizeof(struct bputs_entry);
-
- if (!printk_binsafe(tr))
- return __trace_puts(ip, str);
-
- if (!(tr->trace_flags & TRACE_ITER(PRINTK)))
- return 0;
-
- if (unlikely(tracing_selftest_running || tracing_disabled))
- return 0;
-
- trace_ctx = tracing_gen_ctx();
- buffer = tr->array_buffer.buffer;
-
- guard(ring_buffer_nest)(buffer);
- event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
- trace_ctx);
- if (!event)
- return 0;
-
- entry = ring_buffer_event_data(event);
- entry->ip = ip;
- entry->str = str;
-
- __buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
-
- return 1;
- }
- EXPORT_SYMBOL_GPL(__trace_bputs);
-
#ifdef CONFIG_TRACER_SNAPSHOT
static void tracing_snapshot_instance_cond(struct trace_array *tr,
void *cond_data)
{
- struct tracer *tracer = tr->current_trace;
unsigned long flags;
if (in_nmi()) {
@@@ -1248,15 -825,15 +825,15 @@@
return;
}
- /* Note, snapshot can not be used when the tracer uses it */
- if (tracer->use_max_tr) {
- trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
+ if (tr->mapped) {
+ trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n");
trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
return;
}
- if (tr->mapped) {
- trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n");
+ /* Note, snapshot can not be used when the tracer uses it */
+ if (tracer_uses_snapshot(tr->current_trace)) {
+ trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
return;
}
@@@ -1356,12 -933,12 +933,12 @@@ int tracing_alloc_snapshot_instance(str
/* Make the snapshot buffer have the same order as main buffer */
order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
- ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
+ ret = ring_buffer_subbuf_order_set(tr->snapshot_buffer.buffer, order);
if (ret < 0)
return ret;
/* allocate spare buffer */
- ret = resize_buffer_duplicate_size(&tr->max_buffer,
+ ret = resize_buffer_duplicate_size(&tr->snapshot_buffer,
&tr->array_buffer, RING_BUFFER_ALL_CPUS);
if (ret < 0)
return ret;
@@@ -1379,10 -956,10 +956,10 @@@ static void free_snapshot(struct trace_
* The max_tr ring buffer has some state (e.g. ring->clock) and
* we want preserve it.
*/
- ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0);
- ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
- set_buffer_entries(&tr->max_buffer, 1);
- tracing_reset_online_cpus(&tr->max_buffer);
+ ring_buffer_subbuf_order_set(tr->snapshot_buffer.buffer, 0);
+ ring_buffer_resize(tr->snapshot_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
+ set_buffer_entries(&tr->snapshot_buffer, 1);
+ tracing_reset_online_cpus(&tr->snapshot_buffer);
tr->allocated_snapshot = false;
}
@@@ -1498,7 -1075,7 +1075,7 @@@ int tracing_snapshot_cond_enable(struc
guard(mutex)(&trace_types_lock);
- if (tr->current_trace->use_max_tr)
+ if (tracer_uses_snapshot(tr->current_trace))
return -EBUSY;
/*
@@@ -1665,9 -1242,18 +1242,18 @@@ EXPORT_SYMBOL_GPL(tracing_off)
void disable_trace_on_warning(void)
{
if (__disable_trace_on_warning) {
+ struct trace_array *tr = READ_ONCE(printk_trace);
+
trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
"Disabling tracing due to warning\n");
tracing_off();
+
+ /* Disable trace_printk() buffer too */
+ if (tr != &global_trace) {
+ trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_,
+ "Disabling tracing due to warning\n");
+ tracer_tracing_off(tr);
+ }
}
}
@@@ -1902,10 -1488,7 +1488,7 @@@ static ssize_t trace_seq_to_buffer(stru
unsigned long __read_mostly tracing_thresh;
#ifdef CONFIG_TRACER_MAX_TRACE
- static const struct file_operations tracing_max_lat_fops;
-
#ifdef LATENCY_FS_NOTIFY
-
static struct workqueue_struct *fsnotify_wq;
static void latency_fsnotify_workfn(struct work_struct *work)
@@@ -1922,17 -1505,6 +1505,6 @@@ static void latency_fsnotify_workfn_irq
queue_work(fsnotify_wq, &tr->fsnotify_work);
}
- static void trace_create_maxlat_file(struct trace_array *tr,
- struct dentry *d_tracer)
- {
- INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
- init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
- tr->d_max_latency = trace_create_file("tracing_max_latency",
- TRACE_MODE_WRITE,
- d_tracer, tr,
- &tracing_max_lat_fops);
- }
-
__init static int latency_fsnotify_init(void)
{
fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
@@@ -1957,14 -1529,22 +1529,22 @@@ void latency_fsnotify(struct trace_arra
*/
irq_work_queue(&tr->fsnotify_irqwork);
}
+ #endif /* !LATENCY_FS_NOTIFY */
- #else /* !LATENCY_FS_NOTIFY */
-
- #define trace_create_maxlat_file(tr, d_tracer) \
- trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
- d_tracer, tr, &tracing_max_lat_fops)
+ static const struct file_operations tracing_max_lat_fops;
+ static void trace_create_maxlat_file(struct trace_array *tr,
+ struct dentry *d_tracer)
+ {
+ #ifdef LATENCY_FS_NOTIFY
+ INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
+ init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
#endif
+ tr->d_max_latency = trace_create_file("tracing_max_latency",
+ TRACE_MODE_WRITE,
+ d_tracer, tr,
+ &tracing_max_lat_fops);
+ }
/*
* Copy the new maximum trace into the separate maximum-trace
@@@ -1975,8 -1555,8 +1555,8 @@@ static voi
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
struct array_buffer *trace_buf = &tr->array_buffer;
- struct array_buffer *max_buf = &tr->max_buffer;
struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
+ struct array_buffer *max_buf = &tr->snapshot_buffer;
struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
max_buf->cpu = cpu;
@@@ -2005,7 -1585,14 +1585,14 @@@
tracing_record_cmdline(tsk);
latency_fsnotify(tr);
}
+ #else
+ static inline void trace_create_maxlat_file(struct trace_array *tr,
+ struct dentry *d_tracer) { }
+ static inline void __update_max_tr(struct trace_array *tr,
+ struct task_struct *tsk, int cpu) { }
+ #endif /* CONFIG_TRACER_MAX_TRACE */
+ #ifdef CONFIG_TRACER_SNAPSHOT
/**
* update_max_tr - snapshot all trace buffers from global_trace to max_tr
* @tr: tracer
@@@ -2035,17 -1622,16 +1622,16 @@@ update_max_tr(struct trace_array *tr, s
/* Inherit the recordable setting from array_buffer */
if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
- ring_buffer_record_on(tr->max_buffer.buffer);
+ ring_buffer_record_on(tr->snapshot_buffer.buffer);
else
- ring_buffer_record_off(tr->max_buffer.buffer);
+ ring_buffer_record_off(tr->snapshot_buffer.buffer);
- #ifdef CONFIG_TRACER_SNAPSHOT
if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
arch_spin_unlock(&tr->max_lock);
return;
}
- #endif
- swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
+
+ swap(tr->array_buffer.buffer, tr->snapshot_buffer.buffer);
__update_max_tr(tr, tsk, cpu);
@@@ -2080,7 -1666,7 +1666,7 @@@ update_max_tr_single(struct trace_arra
arch_spin_lock(&tr->max_lock);
- ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
+ ret = ring_buffer_swap_cpu(tr->snapshot_buffer.buffer, tr->array_buffer.buffer, cpu);
if (ret == -EBUSY) {
/*
@@@ -2090,7 -1676,7 +1676,7 @@@
* and flag that it failed.
* Another reason is resize is in progress.
*/
- trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
+ trace_array_printk_buf(tr->snapshot_buffer.buffer, _THIS_IP_,
"Failed to swap buffers due to commit or resize in progress\n");
}
@@@ -2099,8 -1685,7 +1685,7 @@@
__update_max_tr(tr, tsk, cpu);
arch_spin_unlock(&tr->max_lock);
}
-
- #endif /* CONFIG_TRACER_MAX_TRACE */
+ #endif /* CONFIG_TRACER_SNAPSHOT */
struct pipe_wait {
struct trace_iterator *iter;
@@@ -2133,13 -1718,13 +1718,13 @@@ static int wait_on_pipe(struct trace_it
ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full,
wait_pipe_cond, &pwait);
- #ifdef CONFIG_TRACER_MAX_TRACE
+ #ifdef CONFIG_TRACER_SNAPSHOT
/*
* Make sure this is still the snapshot buffer, as if a snapshot were
* to happen, this would now be the main buffer.
*/
if (iter->snapshot)
- iter->array_buffer = &iter->tr->max_buffer;
+ iter->array_buffer = &iter->tr->snapshot_buffer;
#endif
return ret;
}
@@@ -2204,10 -1789,10 +1789,10 @@@ static int run_tracer_selftest(struct t
tr->current_trace_flags = type->flags ? : type->default_flags;
#ifdef CONFIG_TRACER_MAX_TRACE
- if (type->use_max_tr) {
+ if (tracer_uses_snapshot(type)) {
/* If we expanded the buffers, make sure the max is expanded too */
if (tr->ring_buffer_expanded)
- ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
+ ring_buffer_resize(tr->snapshot_buffer.buffer, trace_buf_size,
RING_BUFFER_ALL_CPUS);
tr->allocated_snapshot = true;
}
@@@ -2229,12 -1814,12 +1814,12 @@@
tracing_reset_online_cpus(&tr->array_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
- if (type->use_max_tr) {
+ if (tracer_uses_snapshot(type)) {
tr->allocated_snapshot = false;
/* Shrink the max buffer again */
if (tr->ring_buffer_expanded)
- ring_buffer_resize(tr->max_buffer.buffer, 1,
+ ring_buffer_resize(tr->snapshot_buffer.buffer, 1,
RING_BUFFER_ALL_CPUS);
}
#endif
@@@ -2476,8 -2061,8 +2061,8 @@@ void tracing_reset_all_online_cpus_unlo
continue;
tr->clear_trace = false;
tracing_reset_online_cpus(&tr->array_buffer);
- #ifdef CONFIG_TRACER_MAX_TRACE
- tracing_reset_online_cpus(&tr->max_buffer);
+ #ifdef CONFIG_TRACER_SNAPSHOT
+ tracing_reset_online_cpus(&tr->snapshot_buffer);
#endif
}
}
@@@ -2516,8 -2101,8 +2101,8 @@@ static void tracing_start_tr(struct tra
if (buffer)
ring_buffer_record_enable(buffer);
- #ifdef CONFIG_TRACER_MAX_TRACE
- buffer = tr->max_buffer.buffer;
+ #ifdef CONFIG_TRACER_SNAPSHOT
+ buffer = tr->snapshot_buffer.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
#endif
@@@ -2552,8 -2137,8 +2137,8 @@@ static void tracing_stop_tr(struct trac
if (buffer)
ring_buffer_record_disable(buffer);
- #ifdef CONFIG_TRACER_MAX_TRACE
- buffer = tr->max_buffer.buffer;
+ #ifdef CONFIG_TRACER_SNAPSHOT
+ buffer = tr->snapshot_buffer.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
#endif
@@@ -3001,10 -2586,10 +2586,10 @@@ struct ftrace_stacks
static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
static DEFINE_PER_CPU(int, ftrace_stack_reserve);
- static void __ftrace_trace_stack(struct trace_array *tr,
- struct trace_buffer *buffer,
- unsigned int trace_ctx,
- int skip, struct pt_regs *regs)
+ void __ftrace_trace_stack(struct trace_array *tr,
+ struct trace_buffer *buffer,
+ unsigned int trace_ctx,
+ int skip, struct pt_regs *regs)
{
struct ring_buffer_event *event;
unsigned int size, nr_entries;
@@@ -3087,17 -2672,6 +2672,6 @@@
trace_clear_recursion(bit);
}
- static inline void ftrace_trace_stack(struct trace_array *tr,
- struct trace_buffer *buffer,
- unsigned int trace_ctx,
- int skip, struct pt_regs *regs)
- {
- if (!(tr->trace_flags & TRACE_ITER(STACKTRACE)))
- return;
-
- __ftrace_trace_stack(tr, buffer, trace_ctx, skip, regs);
- }
-
void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
int skip)
{
@@@ -3232,324 -2806,6 +2806,6 @@@ void trace_last_func_repeats(struct tra
__buffer_unlock_commit(buffer, event);
}
- /* created for use with alloc_percpu */
- struct trace_buffer_struct {
- int nesting;
- char buffer[4][TRACE_BUF_SIZE];
- };
-
- static struct trace_buffer_struct __percpu *trace_percpu_buffer;
-
- /*
- * This allows for lockless recording. If we're nested too deeply, then
- * this returns NULL.
- */
- static char *get_trace_buf(void)
- {
- struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
-
- if (!trace_percpu_buffer || buffer->nesting >= 4)
- return NULL;
-
- buffer->nesting++;
-
- /* Interrupts must see nesting incremented before we use the buffer */
- barrier();
- return &buffer->buffer[buffer->nesting - 1][0];
- }
-
- static void put_trace_buf(void)
- {
- /* Don't let the decrement of nesting leak before this */
- barrier();
- this_cpu_dec(trace_percpu_buffer->nesting);
- }
-
- static int alloc_percpu_trace_buffer(void)
- {
- struct trace_buffer_struct __percpu *buffers;
-
- if (trace_percpu_buffer)
- return 0;
-
- buffers = alloc_percpu(struct trace_buffer_struct);
- if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
- return -ENOMEM;
-
- trace_percpu_buffer = buffers;
- return 0;
- }
-
- static int buffers_allocated;
-
- void trace_printk_init_buffers(void)
- {
- if (buffers_allocated)
- return;
-
- if (alloc_percpu_trace_buffer())
- return;
-
- /* trace_printk() is for debug use only. Don't use it in production. */
-
- pr_warn("\n");
- pr_warn("**********************************************************\n");
- pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
- pr_warn("** **\n");
- pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
- pr_warn("** **\n");
- pr_warn("** This means that this is a DEBUG kernel and it is **\n");
- pr_warn("** unsafe for production use. **\n");
- pr_warn("** **\n");
- pr_warn("** If you see this message and you are not debugging **\n");
- pr_warn("** the kernel, report this immediately to your vendor! **\n");
- pr_warn("** **\n");
- pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
- pr_warn("**********************************************************\n");
-
- /* Expand the buffers to set size */
- tracing_update_buffers(&global_trace);
-
- buffers_allocated = 1;
-
- /*
- * trace_printk_init_buffers() can be called by modules.
- * If that happens, then we need to start cmdline recording
- * directly here. If the global_trace.buffer is already
- * allocated here, then this was called by module code.
- */
- if (global_trace.array_buffer.buffer)
- tracing_start_cmdline_record();
- }
- EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
-
- void trace_printk_start_comm(void)
- {
- /* Start tracing comms if trace printk is set */
- if (!buffers_allocated)
- return;
- tracing_start_cmdline_record();
- }
-
- static void trace_printk_start_stop_comm(int enabled)
- {
- if (!buffers_allocated)
- return;
-
- if (enabled)
- tracing_start_cmdline_record();
- else
- tracing_stop_cmdline_record();
- }
-
- /**
- * trace_vbprintk - write binary msg to tracing buffer
- * @ip: The address of the caller
- * @fmt: The string format to write to the buffer
- * @args: Arguments for @fmt
- */
- int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
- {
- struct ring_buffer_event *event;
- struct trace_buffer *buffer;
- struct trace_array *tr = READ_ONCE(printk_trace);
- struct bprint_entry *entry;
- unsigned int trace_ctx;
- char *tbuffer;
- int len = 0, size;
-
- if (!printk_binsafe(tr))
- return trace_vprintk(ip, fmt, args);
-
- if (unlikely(tracing_selftest_running || tracing_disabled))
- return 0;
-
- /* Don't pollute graph traces with trace_vprintk internals */
- pause_graph_tracing();
-
- trace_ctx = tracing_gen_ctx();
- guard(preempt_notrace)();
-
- tbuffer = get_trace_buf();
- if (!tbuffer) {
- len = 0;
- goto out_nobuffer;
- }
-
- len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
-
- if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
- goto out_put;
-
- size = sizeof(*entry) + sizeof(u32) * len;
- buffer = tr->array_buffer.buffer;
- scoped_guard(ring_buffer_nest, buffer) {
- event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
- trace_ctx);
- if (!event)
- goto out_put;
- entry = ring_buffer_event_data(event);
- entry->ip = ip;
- entry->fmt = fmt;
-
- memcpy(entry->buf, tbuffer, sizeof(u32) * len);
- __buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
- }
- out_put:
- put_trace_buf();
-
- out_nobuffer:
- unpause_graph_tracing();
-
- return len;
- }
- EXPORT_SYMBOL_GPL(trace_vbprintk);
-
- static __printf(3, 0)
- int __trace_array_vprintk(struct trace_buffer *buffer,
- unsigned long ip, const char *fmt, va_list args)
- {
- struct ring_buffer_event *event;
- int len = 0, size;
- struct print_entry *entry;
- unsigned int trace_ctx;
- char *tbuffer;
-
- if (tracing_disabled)
- return 0;
-
- /* Don't pollute graph traces with trace_vprintk internals */
- pause_graph_tracing();
-
- trace_ctx = tracing_gen_ctx();
- guard(preempt_notrace)();
-
-
- tbuffer = get_trace_buf();
- if (!tbuffer) {
- len = 0;
- goto out_nobuffer;
- }
-
- len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
-
- size = sizeof(*entry) + len + 1;
- scoped_guard(ring_buffer_nest, buffer) {
- event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
- trace_ctx);
- if (!event)
- goto out;
- entry = ring_buffer_event_data(event);
- entry->ip = ip;
-
- memcpy(&entry->buf, tbuffer, len + 1);
- __buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL);
- }
- out:
- put_trace_buf();
-
- out_nobuffer:
- unpause_graph_tracing();
-
- return len;
- }
-
- int trace_array_vprintk(struct trace_array *tr,
- unsigned long ip, const char *fmt, va_list args)
- {
- if (tracing_selftest_running && tr == &global_trace)
- return 0;
-
- return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
- }
-
- /**
- * trace_array_printk - Print a message to a specific instance
- * @tr: The instance trace_array descriptor
- * @ip: The instruction pointer that this is called from.
- * @fmt: The format to print (printf format)
- *
- * If a subsystem sets up its own instance, they have the right to
- * printk strings into their tracing instance buffer using this
- * function. Note, this function will not write into the top level
- * buffer (use trace_printk() for that), as writing into the top level
- * buffer should only have events that can be individually disabled.
- * trace_printk() is only used for debugging a kernel, and should not
- * be ever incorporated in normal use.
- *
- * trace_array_printk() can be used, as it will not add noise to the
- * top level tracing buffer.
- *
- * Note, trace_array_init_printk() must be called on @tr before this
- * can be used.
- */
- int trace_array_printk(struct trace_array *tr,
- unsigned long ip, const char *fmt, ...)
- {
- int ret;
- va_list ap;
-
- if (!tr)
- return -ENOENT;
-
- /* This is only allowed for created instances */
- if (tr == &global_trace)
- return 0;
-
- if (!(tr->trace_flags & TRACE_ITER(PRINTK)))
- return 0;
-
- va_start(ap, fmt);
- ret = trace_array_vprintk(tr, ip, fmt, ap);
- va_end(ap);
- return ret;
- }
- EXPORT_SYMBOL_GPL(trace_array_printk);
-
- /**
- * trace_array_init_printk - Initialize buffers for trace_array_printk()
- * @tr: The trace array to initialize the buffers for
- *
- * As trace_array_printk() only writes into instances, they are OK to
- * have in the kernel (unlike trace_printk()). This needs to be called
- * before trace_array_printk() can be used on a trace_array.
- */
- int trace_array_init_printk(struct trace_array *tr)
- {
- if (!tr)
- return -ENOENT;
-
- /* This is only allowed for created instances */
- if (tr == &global_trace)
- return -EINVAL;
-
- return alloc_percpu_trace_buffer();
- }
- EXPORT_SYMBOL_GPL(trace_array_init_printk);
-
- int trace_array_printk_buf(struct trace_buffer *buffer,
- unsigned long ip, const char *fmt, ...)
- {
- int ret;
- va_list ap;
-
- if (!(printk_trace->trace_flags & TRACE_ITER(PRINTK)))
- return 0;
-
- va_start(ap, fmt);
- ret = __trace_array_vprintk(buffer, ip, fmt, ap);
- va_end(ap);
- return ret;
- }
-
- int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
- {
- return trace_array_vprintk(printk_trace, ip, fmt, args);
- }
- EXPORT_SYMBOL_GPL(trace_vprintk);
-
static void trace_iterator_increment(struct trace_iterator *iter)
{
struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
@@@ -3986,10 -3242,8 +3242,8 @@@ static void *s_start(struct seq_file *m
}
mutex_unlock(&trace_types_lock);
- #ifdef CONFIG_TRACER_MAX_TRACE
- if (iter->snapshot && iter->trace->use_max_tr)
+ if (iter->snapshot && tracer_uses_snapshot(iter->trace))
return ERR_PTR(-EBUSY);
- #endif
if (*pos != iter->pos) {
iter->ent = NULL;
@@@ -4028,10 -3282,8 +3282,8 @@@ static void s_stop(struct seq_file *m,
{
struct trace_iterator *iter = m->private;
- #ifdef CONFIG_TRACER_MAX_TRACE
- if (iter->snapshot && iter->trace->use_max_tr)
+ if (iter->snapshot && tracer_uses_snapshot(iter->trace))
return;
- #endif
trace_access_unlock(iter->cpu_file);
trace_event_read_unlock();
@@@ -4508,7 -3760,7 +3760,7 @@@ static void test_ftrace_alive(struct se
"# MAY BE MISSING FUNCTION EVENTS\n");
}
- #ifdef CONFIG_TRACER_MAX_TRACE
+ #ifdef CONFIG_TRACER_SNAPSHOT
static void show_snapshot_main_help(struct seq_file *m)
{
seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
@@@ -4686,10 -3938,10 +3938,10 @@@ __tracing_open(struct inode *inode, str
iter->tr = tr;
- #ifdef CONFIG_TRACER_MAX_TRACE
+ #ifdef CONFIG_TRACER_SNAPSHOT
/* Currently only the top directory has a snapshot */
if (tr->current_trace->print_max || snapshot)
- iter->array_buffer = &tr->max_buffer;
+ iter->array_buffer = &tr->snapshot_buffer;
else
#endif
iter->array_buffer = &tr->array_buffer;
@@@ -4758,11 -4010,6 +4010,6 @@@ int tracing_open_generic(struct inode *
return 0;
}
- bool tracing_is_disabled(void)
- {
- return (tracing_disabled) ? true: false;
- }
-
/*
* Open and update trace_array ref count.
* Must have the current trace_array passed to it.
@@@ -4897,7 -4144,7 +4144,7 @@@ static int tracing_open(struct inode *i
#ifdef CONFIG_TRACER_MAX_TRACE
if (tr->current_trace->print_max)
- trace_buf = &tr->max_buffer;
+ trace_buf = &tr->snapshot_buffer;
#endif
if (cpu == RING_BUFFER_ALL_CPUS)
@@@ -4928,11 -4175,9 +4175,9 @@@
static bool
trace_ok_for_array(struct tracer *t, struct trace_array *tr)
{
- #ifdef CONFIG_TRACER_SNAPSHOT
/* arrays with mapped buffer range do not have snapshots */
- if (tr->range_addr_start && t->use_max_tr)
+ if (tr->range_addr_start && tracer_uses_snapshot(t))
return false;
- #endif
return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
}
@@@ -5109,15 -4354,15 +4354,15 @@@ int tracing_set_cpumask(struct trace_ar
if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
- #ifdef CONFIG_TRACER_MAX_TRACE
- ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
+ #ifdef CONFIG_TRACER_SNAPSHOT
+ ring_buffer_record_disable_cpu(tr->snapshot_buffer.buffer, cpu);
#endif
}
if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
cpumask_test_cpu(cpu, tracing_cpumask_new)) {
ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
- #ifdef CONFIG_TRACER_MAX_TRACE
- ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
+ #ifdef CONFIG_TRACER_SNAPSHOT
+ ring_buffer_record_enable_cpu(tr->snapshot_buffer.buffer, cpu);
#endif
}
}
@@@ -5326,8 -4571,8 +4571,8 @@@ int set_tracer_flag(struct trace_array
case TRACE_ITER(OVERWRITE):
ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
- #ifdef CONFIG_TRACER_MAX_TRACE
- ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
+ #ifdef CONFIG_TRACER_SNAPSHOT
+ ring_buffer_change_overwrite(tr->snapshot_buffer.buffer, enabled);
#endif
break;
@@@ -5990,7 -5235,7 +5235,7 @@@ static void update_buffer_entries(struc
}
}
- #ifdef CONFIG_TRACER_MAX_TRACE
+ #ifdef CONFIG_TRACER_SNAPSHOT
/* resize @tr's buffer to the size of @size_tr's entries */
static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
struct array_buffer *size_buf, int cpu_id)
@@@ -6016,7 -5261,7 +5261,7 @@@
return ret;
}
- #endif /* CONFIG_TRACER_MAX_TRACE */
+ #endif /* CONFIG_TRACER_SNAPSHOT */
static int __tracing_resize_ring_buffer(struct trace_array *tr,
unsigned long size, int cpu)
@@@ -6041,11 -5286,11 +5286,11 @@@
if (ret < 0)
goto out_start;
- #ifdef CONFIG_TRACER_MAX_TRACE
+ #ifdef CONFIG_TRACER_SNAPSHOT
if (!tr->allocated_snapshot)
goto out;
- ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
+ ret = ring_buffer_resize(tr->snapshot_buffer.buffer, size, cpu);
if (ret < 0) {
int r = resize_buffer_duplicate_size(&tr->array_buffer,
&tr->array_buffer, cpu);
@@@ -6070,10 -5315,10 +5315,10 @@@
goto out_start;
}
- update_buffer_entries(&tr->max_buffer, cpu);
+ update_buffer_entries(&tr->snapshot_buffer, cpu);
out:
- #endif /* CONFIG_TRACER_MAX_TRACE */
+ #endif /* CONFIG_TRACER_SNAPSHOT */
update_buffer_entries(&tr->array_buffer, cpu);
out_start:
@@@ -6264,6 -5509,9 +5509,9 @@@ int tracing_update_buffers(struct trace
{
int ret = 0;
+ if (!tr)
+ tr = &global_trace;
+
guard(mutex)(&trace_types_lock);
update_last_data(tr);
@@@ -6298,9 -5546,7 +5546,7 @@@ int tracing_set_tracer(struct trace_arr
{
struct tracer *trace = NULL;
struct tracers *t;
- #ifdef CONFIG_TRACER_MAX_TRACE
bool had_max_tr;
- #endif
int ret;
guard(mutex)(&trace_types_lock);
@@@ -6328,7 -5574,7 +5574,7 @@@
return 0;
#ifdef CONFIG_TRACER_SNAPSHOT
- if (trace->use_max_tr) {
+ if (tracer_uses_snapshot(trace)) {
local_irq_disable();
arch_spin_lock(&tr->max_lock);
ret = tr->cond_snapshot ? -EBUSY : 0;
@@@ -6360,14 -5606,13 +5606,13 @@@
if (tr->current_trace->reset)
tr->current_trace->reset(tr);
- #ifdef CONFIG_TRACER_MAX_TRACE
- had_max_tr = tr->current_trace->use_max_tr;
+ had_max_tr = tracer_uses_snapshot(tr->current_trace);
/* Current trace needs to be nop_trace before synchronize_rcu */
tr->current_trace = &nop_trace;
tr->current_trace_flags = nop_trace.flags;
- if (had_max_tr && !trace->use_max_tr) {
+ if (had_max_tr && !tracer_uses_snapshot(trace)) {
/*
* We need to make sure that the update_max_tr sees that
* current_trace changed to nop_trace to keep it from
@@@ -6380,24 -5625,19 +5625,19 @@@
tracing_disarm_snapshot(tr);
}
- if (!had_max_tr && trace->use_max_tr) {
+ if (!had_max_tr && tracer_uses_snapshot(trace)) {
ret = tracing_arm_snapshot_locked(tr);
if (ret)
return ret;
}
- #else
- tr->current_trace = &nop_trace;
- #endif
tr->current_trace_flags = t->flags ? : t->tracer->flags;
if (trace->init) {
ret = tracer_init(trace, tr);
if (ret) {
- #ifdef CONFIG_TRACER_MAX_TRACE
- if (trace->use_max_tr)
+ if (tracer_uses_snapshot(trace))
tracing_disarm_snapshot(tr);
- #endif
tr->current_trace_flags = nop_trace.flags;
return ret;
}
@@@ -7602,7 -6842,7 +6842,7 @@@ tracing_mark_write(struct file *filp, c
unsigned long ip;
char *buf;
- if (tracing_disabled)
+ if (unlikely(tracing_disabled))
return -EINVAL;
if (!(tr->trace_flags & TRACE_ITER(MARKERS)))
@@@ -7682,7 -6922,7 +6922,7 @@@ tracing_mark_raw_write(struct file *fil
ssize_t written = -ENODEV;
char *buf;
- if (tracing_disabled)
+ if (unlikely(tracing_disabled))
return -EINVAL;
if (!(tr->trace_flags & TRACE_ITER(MARKERS)))
@@@ -7783,10 -7023,10 +7023,10 @@@ int tracing_set_clock(struct trace_arra
*/
tracing_reset_online_cpus(&tr->array_buffer);
- #ifdef CONFIG_TRACER_MAX_TRACE
- if (tr->max_buffer.buffer)
- ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
- tracing_reset_online_cpus(&tr->max_buffer);
+ #ifdef CONFIG_TRACER_SNAPSHOT
+ if (tr->snapshot_buffer.buffer)
+ ring_buffer_set_clock(tr->snapshot_buffer.buffer, trace_clocks[i].func);
+ tracing_reset_online_cpus(&tr->snapshot_buffer);
#endif
if (tr->scratch && !(tr->flags & TRACE_ARRAY_FL_LAST_BOOT)) {
@@@ -7880,26 -7120,6 +7120,6 @@@ u64 tracing_event_time_stamp(struct tra
return ring_buffer_event_time_stamp(buffer, rbe);
}
- /*
- * Set or disable using the per CPU trace_buffer_event when possible.
- */
- int tracing_set_filter_buffering(struct trace_array *tr, bool set)
- {
- guard(mutex)(&trace_types_lock);
-
- if (set && tr->no_filter_buffering_ref++)
- return 0;
-
- if (!set) {
- if (WARN_ON_ONCE(!tr->no_filter_buffering_ref))
- return -EINVAL;
-
- --tr->no_filter_buffering_ref;
- }
-
- return 0;
- }
-
struct ftrace_buffer_info {
struct trace_iterator iter;
void *spare;
@@@ -7938,7 -7158,7 +7158,7 @@@ static int tracing_snapshot_open(struc
ret = 0;
iter->tr = tr;
- iter->array_buffer = &tr->max_buffer;
+ iter->array_buffer = &tr->snapshot_buffer;
iter->cpu_file = tracing_get_cpu(inode);
m->private = iter;
file->private_data = m;
@@@ -7975,7 -7195,7 +7195,7 @@@ tracing_snapshot_write(struct file *fil
guard(mutex)(&trace_types_lock);
- if (tr->current_trace->use_max_tr)
+ if (tracer_uses_snapshot(tr->current_trace))
return -EBUSY;
local_irq_disable();
@@@ -8001,7 -7221,7 +7221,7 @@@
return -EINVAL;
#endif
if (tr->allocated_snapshot)
- ret = resize_buffer_duplicate_size(&tr->max_buffer,
+ ret = resize_buffer_duplicate_size(&tr->snapshot_buffer,
&tr->array_buffer, iter->cpu_file);
ret = tracing_arm_snapshot_locked(tr);
@@@ -8022,9 -7242,9 +7242,9 @@@
default:
if (tr->allocated_snapshot) {
if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
- tracing_reset_online_cpus(&tr->max_buffer);
+ tracing_reset_online_cpus(&tr->snapshot_buffer);
else
- tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
+ tracing_reset_cpu(&tr->snapshot_buffer, iter->cpu_file);
}
break;
}
@@@ -8074,13 -7294,13 +7294,13 @@@ static int snapshot_raw_open(struct ino
info = filp->private_data;
- if (info->iter.trace->use_max_tr) {
+ if (tracer_uses_snapshot(info->iter.trace)) {
tracing_buffers_release(inode, filp);
return -EBUSY;
}
info->iter.snapshot = true;
- info->iter.array_buffer = &info->iter.tr->max_buffer;
+ info->iter.array_buffer = &info->iter.tr->snapshot_buffer;
return ret;
}
@@@ -8630,10 -7850,8 +7850,8 @@@ tracing_buffers_read(struct file *filp
if (!count)
return 0;
- #ifdef CONFIG_TRACER_MAX_TRACE
- if (iter->snapshot && iter->tr->current_trace->use_max_tr)
+ if (iter->snapshot && tracer_uses_snapshot(iter->tr->current_trace))
return -EBUSY;
- #endif
page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
@@@ -8817,10 -8035,8 +8035,8 @@@ tracing_buffers_splice_read(struct fil
int entries, i;
ssize_t ret = 0;
- #ifdef CONFIG_TRACER_MAX_TRACE
- if (iter->snapshot && iter->tr->current_trace->use_max_tr)
+ if (iter->snapshot && tracer_uses_snapshot(iter->tr->current_trace))
return -EBUSY;
- #endif
page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
if (*ppos & (page_size - 1))
@@@ -8954,7 -8170,7 +8170,7 @@@ static long tracing_buffers_ioctl(struc
return 0;
}
- #ifdef CONFIG_TRACER_MAX_TRACE
+ #ifdef CONFIG_TRACER_SNAPSHOT
static int get_snapshot_map(struct trace_array *tr)
{
int err = 0;
@@@ -9958,12 -9174,12 +9174,12 @@@ buffer_subbuf_size_write(struct file *f
if (ret)
goto out;
- #ifdef CONFIG_TRACER_MAX_TRACE
+ #ifdef CONFIG_TRACER_SNAPSHOT
if (!tr->allocated_snapshot)
goto out_max;
- ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
+ ret = ring_buffer_subbuf_order_set(tr->snapshot_buffer.buffer, order);
if (ret) {
/* Put back the old order */
cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order);
@@@ -10179,12 -9395,12 +9395,12 @@@ static int allocate_trace_buffers(struc
if (ret)
return ret;
- #ifdef CONFIG_TRACER_MAX_TRACE
+ #ifdef CONFIG_TRACER_SNAPSHOT
/* Fix mapped buffer trace arrays do not have snapshot buffers */
if (tr->range_addr_start)
return 0;
- ret = allocate_trace_buffer(tr, &tr->max_buffer,
+ ret = allocate_trace_buffer(tr, &tr->snapshot_buffer,
allocate_snapshot ? size : 1);
if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
free_trace_buffer(&tr->array_buffer);
@@@ -10206,8 -9422,8 +9422,8 @@@ static void free_trace_buffers(struct t
free_trace_buffer(&tr->array_buffer);
kfree(tr->module_delta);
- #ifdef CONFIG_TRACER_MAX_TRACE
- free_trace_buffer(&tr->max_buffer);
+ #ifdef CONFIG_TRACER_SNAPSHOT
+ free_trace_buffer(&tr->snapshot_buffer);
#endif
}
@@@ -10348,7 -9564,7 +9564,7 @@@ trace_array_create_systems(const char *
tr->syscall_buf_sz = global_trace.syscall_buf_sz;
tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
- #ifdef CONFIG_TRACER_MAX_TRACE
+ #ifdef CONFIG_TRACER_SNAPSHOT
spin_lock_init(&tr->snapshot_trigger_lock);
#endif
tr->current_trace = &nop_trace;
@@@ -10673,9 -9889,7 +9889,7 @@@ init_tracer_tracefs(struct trace_array
create_trace_options_dir(tr);
- #ifdef CONFIG_TRACER_MAX_TRACE
trace_create_maxlat_file(tr, d_tracer);
- #endif
if (ftrace_create_function_files(tr, d_tracer))
MEM_FAIL(1, "Could not allocate function filter files");
@@@ -10774,7 -9988,7 +9988,7 @@@ int tracing_init_dentry(void
extern struct trace_eval_map *__start_ftrace_eval_maps[];
extern struct trace_eval_map *__stop_ftrace_eval_maps[];
- static struct workqueue_struct *eval_map_wq __initdata;
+ struct workqueue_struct *trace_init_wq __initdata;
static struct work_struct eval_map_work __initdata;
static struct work_struct tracerfs_init_work __initdata;
@@@ -10790,15 -10004,15 +10004,15 @@@ static int __init trace_eval_init(void
{
INIT_WORK(&eval_map_work, eval_map_work_func);
- eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
- if (!eval_map_wq) {
- pr_err("Unable to allocate eval_map_wq\n");
+ trace_init_wq = alloc_workqueue("trace_init_wq", WQ_UNBOUND, 0);
+ if (!trace_init_wq) {
+ pr_err("Unable to allocate trace_init_wq\n");
/* Do work here */
eval_map_work_func(&eval_map_work);
return -ENOMEM;
}
- queue_work(eval_map_wq, &eval_map_work);
+ queue_work(trace_init_wq, &eval_map_work);
return 0;
}
@@@ -10807,8 -10021,8 +10021,8 @@@ subsys_initcall(trace_eval_init)
static int __init trace_eval_sync(void)
{
/* Make sure the eval map updates are finished */
- if (eval_map_wq)
- destroy_workqueue(eval_map_wq);
+ if (trace_init_wq)
+ destroy_workqueue(trace_init_wq);
return 0;
}
@@@ -10969,9 -10183,9 +10183,9 @@@ static __init int tracer_init_tracefs(v
if (ret)
return 0;
- if (eval_map_wq) {
+ if (trace_init_wq) {
INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
- queue_work(eval_map_wq, &tracerfs_init_work);
+ queue_work(trace_init_wq, &tracerfs_init_work);
} else {
tracer_init_tracefs_work_func(NULL);
}
@@@ -11304,7 -10518,7 +10518,7 @@@ ssize_t trace_parse_run_command(struct
return done;
}
- #ifdef CONFIG_TRACER_MAX_TRACE
+ #ifdef CONFIG_TRACER_SNAPSHOT
__init static bool tr_needs_alloc_snapshot(const char *name)
{
char *test;
@@@ -11494,7 -10708,7 +10708,7 @@@ __init static void enable_instances(voi
}
} else {
/* Only non mapped buffers have snapshot buffers */
- if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
+ if (IS_ENABLED(CONFIG_TRACER_SNAPSHOT))
do_allocate_snapshot(name);
}
@@@ -11621,7 -10835,7 +10835,7 @@@ __init static int tracer_alloc_buffers(
global_trace.current_trace_flags = nop_trace.flags;
global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
- #ifdef CONFIG_TRACER_MAX_TRACE
+ #ifdef CONFIG_TRACER_SNAPSHOT
spin_lock_init(&global_trace.snapshot_trigger_lock);
#endif
ftrace_init_global_array_ops(&global_trace);
@@@ -11689,7 -10903,7 +10903,7 @@@ struct trace_array *trace_get_global_ar
void __init ftrace_boot_snapshot(void)
{
- #ifdef CONFIG_TRACER_MAX_TRACE
+ #ifdef CONFIG_TRACER_SNAPSHOT
struct trace_array *tr;
if (!snapshot_at_boot)
On Mon, 9 Feb 2026 15:43:49 +0000 Mark Brown <broonie@kernel.org> wrote:
> Today's linux-next merge of the ftrace tree got a conflict in:
>
> kernel/trace/trace.c
>
> between commit:
>
> 86e685ff364394 ("tracing: remove size parameter in __trace_puts()")
>
> from the mm-nonmm-stable tree and commits:
>
> 27931ee8f45415 ("tracing: Move trace_printk functions out of trace.c and into trace_printk.c")
> 0e730bc067e7a7 ("tracing: Move __trace_buffer_{un}lock_*() functions to trace.h")
>
> from the ftrace tree.
>
> I fixed it up (see below) and can carry the fix as necessary. This
> is now fixed as far as linux-next is concerned, but any non trivial
> conflicts should be mentioned to your upstream maintainer when your tree
> is submitted for merging. You may also want to consider cooperating
> with the maintainer of the conflicting tree to minimise any particularly
> complex conflicts.
Yikes.
How about I drop "tracing: remove size parameter in __trace_puts()" and
"tracing: move tracing declarations from kernel.h to a dedicated
header"?
On Mon, 9 Feb 2026 08:58:10 -0800 Andrew Morton <akpm@linux-foundation.org> wrote: > How about I drop "tracing: remove size parameter in __trace_puts()" and > "tracing: move tracing declarations from kernel.h to a dedicated > header"? Andrew, Don't worry about it. When I submit my pull request I'll mention this conflict and how to solve it. -- Steve
On Mon, 9 Feb 2026 08:58:10 -0800 Andrew Morton <akpm@linux-foundation.org> wrote: > > I fixed it up (see below) and can carry the fix as necessary. This > > is now fixed as far as linux-next is concerned, but any non trivial > > conflicts should be mentioned to your upstream maintainer when your tree > > is submitted for merging. You may also want to consider cooperating > > with the maintainer of the conflicting tree to minimise any particularly > > complex conflicts. > > Yikes. > > How about I drop "tracing: remove size parameter in __trace_puts()" and > "tracing: move tracing declarations from kernel.h to a dedicated > header"? It should be a trivial merge conflict. I only moved the code from trace.c into trace_printk.c. I made no other changes to it. Basically, the commit in your tree would be the same, it would just be applied to kernel/trace/trace_printk.c instead of kernel/trace/trace.c. Both changes are similar in the fact they are cleanups in where code lives. Or would you prefer if I take that change and rebase it on top of my tree? -- Steve
On Mon, Feb 09, 2026 at 08:58:10AM -0800, Andrew Morton wrote:
> On Mon, 9 Feb 2026 15:43:49 +0000 Mark Brown <broonie@kernel.org> wrote:
> > between commit:
> >
> > 86e685ff364394 ("tracing: remove size parameter in __trace_puts()")
> >
> > from the mm-nonmm-stable tree and commits:
> >
> > 27931ee8f45415 ("tracing: Move trace_printk functions out of trace.c and into trace_printk.c")
> > 0e730bc067e7a7 ("tracing: Move __trace_buffer_{un}lock_*() functions to trace.h")
> >
> > from the ftrace tree.
> Yikes.
> How about I drop "tracing: remove size parameter in __trace_puts()" and
> "tracing: move tracing declarations from kernel.h to a dedicated
> header"?
FWIW that diffstat looked a lot worse than it was since it was one of
these git shows as an empty diff and I used diff -c instead which
usually looks more alarming - the change was trivial code motion
deleting code in the ftrace tree with the actual fixup being very small
but in a different file. Was a bit weird that the parameter removal
didn't go via the ftrace tree though.
On Mon, 9 Feb 2026 17:07:18 +0000
Mark Brown <broonie@kernel.org> wrote:
> FWIW that diffstat looked a lot worse than it was since it was one of
> these git shows as an empty diff and I used diff -c instead which
> usually looks more alarming - the change was trivial code motion
> deleting code in the ftrace tree with the actual fixup being very small
> but in a different file. Was a bit weird that the parameter removal
> didn't go via the ftrace tree though.
If you look at the change log of the commit in the mm tree, you'll see why
it went there:
This fixes a header recursion issue where using strlen() in the macro
calling __trace_puts() requires adding #include <linux/string.h> in order
to use strlen(). Removing the use of strlen() from the header fixes the
recursion issue.
The mm tree took code to move things around, and by moving the
trace_printk() into its own headers caused the build to fail!
I guess I could have taken the moving of the trace_printk() code in my
tree. But it was part of a series that did other moves so I didn't
recommend it.
-- Steve
© 2016 - 2026 Red Hat, Inc.