kernel/trace/trace.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-)
From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
There's no reason to give an arbitrary limit to the size of a raw trace
marker. Just let it be as big as the size that is allowed by the ring
buffer itself.
And there's also no reason to artificially break up the write to
TRACE_BUF_SIZE, as that's not even used.
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
[
Depends on: https://lore.kernel.org/linux-trace-kernel/20231209175003.63db40ab@gandalf.local.home/
]
kernel/trace/trace.c | 13 ++++---------
1 file changed, 4 insertions(+), 9 deletions(-)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index da837119a446..077b20e83e7c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -7351,9 +7351,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
return written;
}
-/* Limit it for now to 3K (including tag) */
-#define RAW_DATA_MAX_SIZE (1024*3)
-
static ssize_t
tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *fpos)
@@ -7375,18 +7372,16 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
return -EINVAL;
/* The marker must at least have a tag id */
- if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
+ if (cnt < sizeof(unsigned int))
return -EINVAL;
- if (cnt > TRACE_BUF_SIZE)
- cnt = TRACE_BUF_SIZE;
-
- BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
-
size = sizeof(*entry) + cnt;
if (cnt < FAULT_SIZE_ID)
size += FAULT_SIZE_ID - cnt;
+ if (size > ring_buffer_max_event_size(buffer))
+ return -EINVAL;
+
buffer = tr->array_buffer.buffer;
event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
tracing_gen_ctx());
--
2.42.0
On Sat, 9 Dec 2023 17:57:16 -0500 Steven Rostedt <rostedt@goodmis.org> wrote: > From: "Steven Rostedt (Google)" <rostedt@goodmis.org> > > There's no reason to give an arbitrary limit to the size of a raw trace > marker. Just let it be as big as the size that is allowed by the ring > buffer itself. > > And there's also no reason to artificially break up the write to > TRACE_BUF_SIZE, as that's not even used. Looks good to me. Reivewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> Thanks! > > Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org> > --- > [ > Depends on: https://lore.kernel.org/linux-trace-kernel/20231209175003.63db40ab@gandalf.local.home/ > ] > kernel/trace/trace.c | 13 ++++--------- > 1 file changed, 4 insertions(+), 9 deletions(-) > > diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c > index da837119a446..077b20e83e7c 100644 > --- a/kernel/trace/trace.c > +++ b/kernel/trace/trace.c > @@ -7351,9 +7351,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, > return written; > } > > -/* Limit it for now to 3K (including tag) */ > -#define RAW_DATA_MAX_SIZE (1024*3) > - > static ssize_t > tracing_mark_raw_write(struct file *filp, const char __user *ubuf, > size_t cnt, loff_t *fpos) > @@ -7375,18 +7372,16 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf, > return -EINVAL; > > /* The marker must at least have a tag id */ > - if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE) > + if (cnt < sizeof(unsigned int)) > return -EINVAL; > > - if (cnt > TRACE_BUF_SIZE) > - cnt = TRACE_BUF_SIZE; > - > - BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); > - > size = sizeof(*entry) + cnt; > if (cnt < FAULT_SIZE_ID) > size += FAULT_SIZE_ID - cnt; > > + if (size > ring_buffer_max_event_size(buffer)) > + return -EINVAL; > + > buffer = tr->array_buffer.buffer; > event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, > tracing_gen_ctx()); > -- > 2.42.0 > -- Masami Hiramatsu (Google) <mhiramat@kernel.org>
© 2016 - 2025 Red Hat, Inc.