kernel/trace/ring_buffer.c | 5 +++++ 1 file changed, 5 insertions(+)
From: Steven Rostedt <rostedt@goodmis.org>
The rb_check_pages() scans the ring buffer sub buffers to make sure they
are valid after an update. But locks are released during the update to not
hold preemption for too long.
The ring_buffer_subbuf_order_set() updates the counter used by
rb_check_pages() without any locks. But it also updates the pages. Even
though it is likely that the buffer->mutex is enough to protect this, but
since rb_check_pages() uses the cpu_buffer->reader_lock for
synchronization, take that lock as well when updating the pages and
counter in ring_buffer_subbuf_order_set().
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
Note, this is based on top of:
https://lore.kernel.org/linux-trace-kernel/20240715145141.5528-1-petr.pavlu@suse.com/
kernel/trace/ring_buffer.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 696d422d5b35..0672df07b599 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -6774,6 +6774,7 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
}
for_each_buffer_cpu(buffer, cpu) {
+ unsigned long flags;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
continue;
@@ -6800,11 +6801,15 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
struct buffer_page, list);
list_del_init(&cpu_buffer->reader_page->list);
+ /* Synchronize with rb_check_pages() */
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+
/* The cpu_buffer pages are a link list with no head */
cpu_buffer->pages = cpu_buffer->new_pages.next;
cpu_buffer->new_pages.next->prev = cpu_buffer->new_pages.prev;
cpu_buffer->new_pages.prev->next = cpu_buffer->new_pages.next;
cpu_buffer->cnt++;
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
/* Clear the new_pages list */
INIT_LIST_HEAD(&cpu_buffer->new_pages);
--
2.45.2
On Fri, 11 Oct 2024 11:28:50 -0400 Steven Rostedt <rostedt@goodmis.org> wrote: > From: Steven Rostedt <rostedt@goodmis.org> > > The rb_check_pages() scans the ring buffer sub buffers to make sure they > are valid after an update. But locks are released during the update to not > hold preemption for too long. > > The ring_buffer_subbuf_order_set() updates the counter used by > rb_check_pages() without any locks. But it also updates the pages. Even > though it is likely that the buffer->mutex is enough to protect this, but > since rb_check_pages() uses the cpu_buffer->reader_lock for > synchronization, take that lock as well when updating the pages and > counter in ring_buffer_subbuf_order_set(). > > Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org> I rejected this patch as I believe the version Petr has is required. -- Steve > --- > Note, this is based on top of: > > https://lore.kernel.org/linux-trace-kernel/20240715145141.5528-1-petr.pavlu@suse.com/ > > kernel/trace/ring_buffer.c | 5 +++++ > 1 file changed, 5 insertions(+) > > diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c > index 696d422d5b35..0672df07b599 100644 > --- a/kernel/trace/ring_buffer.c > +++ b/kernel/trace/ring_buffer.c > @@ -6774,6 +6774,7 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order) > } > > for_each_buffer_cpu(buffer, cpu) { > + unsigned long flags; > > if (!cpumask_test_cpu(cpu, buffer->cpumask)) > continue; > @@ -6800,11 +6801,15 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order) > struct buffer_page, list); > list_del_init(&cpu_buffer->reader_page->list); > > + /* Synchronize with rb_check_pages() */ > + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); > + > /* The cpu_buffer pages are a link list with no head */ > cpu_buffer->pages = cpu_buffer->new_pages.next; > cpu_buffer->new_pages.next->prev = cpu_buffer->new_pages.prev; > cpu_buffer->new_pages.prev->next = cpu_buffer->new_pages.next; > cpu_buffer->cnt++; > + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); > > /* Clear the new_pages list */ > INIT_LIST_HEAD(&cpu_buffer->new_pages);
© 2016 - 2024 Red Hat, Inc.