From nobody Fri Dec 19 13:49:10 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 76D8D19D892 for ; Tue, 27 Aug 2024 09:27:04 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1724750824; cv=none; b=pfBvaRoEfDCpPY0JE01CI7G6AHl/ZFsFoUh51PzHONIPPeJN/PX/aq1ZybfC6eIXxHpvnbfnz0UNi27XzwOizy6zmBwi8b5OebCSL3aHxOkKcS49UqlzmOR9bD5yD4jk+SvHYHj7vgkdhjpBNEVn6zPnNa8wPfFfiUMzOrNv9rU= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1724750824; c=relaxed/simple; bh=TKiJo5+xBSU6Jnu3/wyofZzugr39xRcvGRJWKdE3S1Q=; h=Message-ID:Date:From:To:Cc:Subject:References:MIME-Version: Content-Type; b=PRdyDiwsrXVT3oRf4Njco47tLjmzBpdWyIuhm3JBq2R0UK+268ZnTE+jfaa7waBGZxP51sia8EzFxXUivAnkvIlrbRij6zDzQu7omtIziTO1X054HDKdiNUx37/zPYVbMt+xCHM347CkBZyqOLi8kdbn2LPKGCnISrobZatWjpI= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 Received: by smtp.kernel.org (Postfix) with ESMTPSA id 0EA19C8B7BC; Tue, 27 Aug 2024 09:27:04 +0000 (UTC) Received: from rostedt by gandalf with local (Exim 4.98) (envelope-from ) id 1sisUM-00000004SLl-2kAi; Tue, 27 Aug 2024 05:27:46 -0400 Message-ID: <20240827092746.514052482@goodmis.org> User-Agent: quilt/0.68 Date: Tue, 27 Aug 2024 05:27:19 -0400 From: Steven Rostedt To: linux-kernel@vger.kernel.org Cc: Masami Hiramatsu , Mark Rutland , Mathieu Desnoyers , Andrew Morton , Vincent Donnefort Subject: [for-next][PATCH 3/8] ring-buffer: Align meta-page to sub-buffers for improved TLB usage References: <20240827092716.515115830@goodmis.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Vincent Donnefort Previously, the mapped ring-buffer layout caused misalignment between the meta-page and sub-buffers when the sub-buffer size was not a multiple of PAGE_SIZE. This prevented hardware with larger TLB entries from utilizing them effectively. Add a padding with the zero-page between the meta-page and sub-buffers. Also update the ring-buffer map_test to verify that padding. Link: https://lore.kernel.org/20240628104611.1443542-1-vdonnefort@google.com Signed-off-by: Vincent Donnefort Signed-off-by: Steven Rostedt (Google) --- kernel/trace/ring_buffer.c | 33 +++++++++++-------- .../testing/selftests/ring-buffer/map_test.c | 14 ++++++++ 2 files changed, 34 insertions(+), 13 deletions(-) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index c3a5e6cbb940..77dc0b25140e 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -6852,10 +6852,10 @@ static void rb_setup_ids_meta_page(struct ring_buff= er_per_cpu *cpu_buffer, /* install subbuf ID to kern VA translation */ cpu_buffer->subbuf_ids =3D subbuf_ids; =20 - meta->meta_page_size =3D PAGE_SIZE; meta->meta_struct_len =3D sizeof(*meta); meta->nr_subbufs =3D nr_subbufs; meta->subbuf_size =3D cpu_buffer->buffer->subbuf_size + BUF_PAGE_HDR_SIZE; + meta->meta_page_size =3D meta->subbuf_size; =20 rb_update_meta_page(cpu_buffer); } @@ -6949,6 +6949,12 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *= cpu_buffer, !(vma->vm_flags & VM_MAYSHARE)) return -EPERM; =20 + subbuf_order =3D cpu_buffer->buffer->subbuf_order; + subbuf_pages =3D 1 << subbuf_order; + + if (subbuf_order && pgoff % subbuf_pages) + return -EINVAL; + /* * Make sure the mapping cannot become writable later. Also tell the VM * to not touch these pages (VM_DONTCOPY | VM_DONTEXPAND). @@ -6958,11 +6964,8 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *= cpu_buffer, =20 lockdep_assert_held(&cpu_buffer->mapping_lock); =20 - subbuf_order =3D cpu_buffer->buffer->subbuf_order; - subbuf_pages =3D 1 << subbuf_order; - nr_subbufs =3D cpu_buffer->nr_pages + 1; /* + reader-subbuf */ - nr_pages =3D ((nr_subbufs) << subbuf_order) - pgoff + 1; /* + meta-page */ + nr_pages =3D ((nr_subbufs + 1) << subbuf_order) - pgoff; /* + meta-page */ =20 nr_vma_pages =3D vma_pages(vma); if (!nr_vma_pages || nr_vma_pages > nr_pages) @@ -6975,20 +6978,24 @@ static int __rb_map_vma(struct ring_buffer_per_cpu = *cpu_buffer, return -ENOMEM; =20 if (!pgoff) { + unsigned long meta_page_padding; + pages[p++] =3D virt_to_page(cpu_buffer->meta_page); =20 /* - * TODO: Align sub-buffers on their size, once - * vm_insert_pages() supports the zero-page. + * Pad with the zero-page to align the meta-page with the + * sub-buffers. */ - } else { - /* Skip the meta-page */ - pgoff--; + meta_page_padding =3D subbuf_pages - 1; + while (meta_page_padding-- && p < nr_pages) { + unsigned long __maybe_unused zero_addr =3D + vma->vm_start + (PAGE_SIZE * p); =20 - if (pgoff % subbuf_pages) { - err =3D -EINVAL; - goto out; + pages[p++] =3D ZERO_PAGE(zero_addr); } + } else { + /* Skip the meta-page */ + pgoff -=3D subbuf_pages; =20 s +=3D pgoff / subbuf_pages; } diff --git a/tools/testing/selftests/ring-buffer/map_test.c b/tools/testing= /selftests/ring-buffer/map_test.c index a9006fa7097e..4bb0192e43f3 100644 --- a/tools/testing/selftests/ring-buffer/map_test.c +++ b/tools/testing/selftests/ring-buffer/map_test.c @@ -228,6 +228,20 @@ TEST_F(map, data_mmap) data =3D mmap(NULL, data_len, PROT_READ, MAP_SHARED, desc->cpu_fd, meta_len); ASSERT_EQ(data, MAP_FAILED); + + /* Verify meta-page padding */ + if (desc->meta->meta_page_size > getpagesize()) { + void *addr; + + data_len =3D desc->meta->meta_page_size; + data =3D mmap(NULL, data_len, + PROT_READ, MAP_SHARED, desc->cpu_fd, 0); + ASSERT_NE(data, MAP_FAILED); + + addr =3D (void *)((unsigned long)data + getpagesize()); + ASSERT_EQ(*((int *)addr), 0); + munmap(data, data_len); + } } =20 FIXTURE(snapshot) { --=20 2.43.0