tools/testing/selftests/mm/merge.c | 89 ++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+)
Commit 2697dd8ae721 ("mm/mseal: update VMA end correctly on merge") fixed
an issue in the loop which iterates through VMAs applying mseal, which was
triggered by mseal()'ing a range of VMAs where the second was mseal()'d and
the first mergeable with it, once mseal()'d.
Add a regression test to assert that this behaviour is correct. We place it
in the merge selftests as this is strictly an issue with merging (via a
vma_modify() invocation).
It also assert that mseal()'d ranges are correctly merged as you'd expect.
The test is implemented such that it is skipped if mseal() is not
available on the system.
Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
---
v2:
* Added tools/ based header so __NR_mseal should always be available.
* However, for completeness, also check to see if defined, and assume ENOSYS if
not.
* Thanks to Mike for reporting issues in his build on this test!
v1:
https://lore.kernel.org/all/20260330135011.107036-1-ljs@kernel.org/
tools/testing/selftests/mm/merge.c | 89 ++++++++++++++++++++++++++++++
1 file changed, 89 insertions(+)
diff --git a/tools/testing/selftests/mm/merge.c b/tools/testing/selftests/mm/merge.c
index 10b686102b79..f73803b3679a 100644
--- a/tools/testing/selftests/mm/merge.c
+++ b/tools/testing/selftests/mm/merge.c
@@ -2,6 +2,7 @@
#define _GNU_SOURCE
#include "kselftest_harness.h"
+#include <asm-generic/unistd.h>
#include <linux/prctl.h>
#include <fcntl.h>
#include <stdio.h>
@@ -48,6 +49,19 @@ static pid_t do_fork(struct procmap_fd *procmap)
return 0;
}
+#ifdef __NR_mseal
+static int sys_mseal(void *ptr, size_t len, unsigned long flags)
+{
+ return syscall(__NR_mseal, (unsigned long)ptr, len, flags);
+}
+#else
+static int sys_mseal(void *ptr, size_t len, unsigned long flags)
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
+
FIXTURE_SETUP(merge)
{
self->page_size = psize();
@@ -1217,6 +1231,81 @@ TEST_F(merge, mremap_correct_placed_faulted)
ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
}
+TEST_F(merge, merge_vmas_with_mseal)
+{
+ unsigned int page_size = self->page_size;
+ struct procmap_fd *procmap = &self->procmap;
+ char *ptr, *ptr2, *ptr3;
+ /* We need our own as cannot munmap() once sealed. */
+ char *carveout;
+
+ /* Invalid mseal() call to see if implemented. */
+ ASSERT_EQ(sys_mseal(NULL, 0, ~0UL), -1);
+ if (errno == ENOSYS)
+ SKIP(return, "mseal not supported, skipping.");
+
+ /* Map carveout. */
+ carveout = mmap(NULL, 17 * page_size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ ASSERT_NE(carveout, MAP_FAILED);
+
+ /*
+ * Map 3 separate VMAs:
+ *
+ * |-----------|-----------|-----------|
+ * | RW | RWE | RO |
+ * |-----------|-----------|-----------|
+ * ptr ptr2 ptr3
+ */
+ ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+ ptr2 = mmap(&carveout[page_size * 6], 5 * page_size,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr2, MAP_FAILED);
+ ptr3 = mmap(&carveout[page_size * 11], 5 * page_size, PROT_READ,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr3, MAP_FAILED);
+
+ /*
+ * mseal the second VMA:
+ *
+ * |-----------|-----------|-----------|
+ * | RW | RWES | RO |
+ * |-----------|-----------|-----------|
+ * ptr ptr2 ptr3
+ */
+ ASSERT_EQ(sys_mseal(ptr2, 5 * page_size, 0), 0);
+
+ /* Make first VMA mergeable upon mseal. */
+ ASSERT_EQ(mprotect(ptr, 5 * page_size,
+ PROT_READ | PROT_WRITE | PROT_EXEC), 0);
+ /*
+ * At this point we have:
+ *
+ * |-----------|-----------|-----------|
+ * | RWE | RWES | RO |
+ * |-----------|-----------|-----------|
+ * ptr ptr2 ptr3
+ *
+ * Now mseal all of the VMAs.
+ */
+ ASSERT_EQ(sys_mseal(ptr, 15 * page_size, 0), 0);
+
+ /*
+ * We should end up with:
+ *
+ * |-----------------------|-----------|
+ * | RWES | ROS |
+ * |-----------------------|-----------|
+ * ptr ptr3
+ */
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
+}
+
TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_prev)
{
struct procmap_fd *procmap = &self->procmap;
--
2.53.0
On Tue, Mar 31, 2026 at 08:36:27AM +0100, Lorenzo Stoakes (Oracle) wrote:
> Commit 2697dd8ae721 ("mm/mseal: update VMA end correctly on merge") fixed
> an issue in the loop which iterates through VMAs applying mseal, which was
> triggered by mseal()'ing a range of VMAs where the second was mseal()'d and
> the first mergeable with it, once mseal()'d.
>
> Add a regression test to assert that this behaviour is correct. We place it
> in the merge selftests as this is strictly an issue with merging (via a
> vma_modify() invocation).
>
> It also assert that mseal()'d ranges are correctly merged as you'd expect.
>
> The test is implemented such that it is skipped if mseal() is not
> available on the system.
>
> Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
> ---
> v2:
> * Added tools/ based header so __NR_mseal should always be available.
> * However, for completeness, also check to see if defined, and assume ENOSYS if
> not.
> * Thanks to Mike for reporting issues in his build on this test!
>
> v1:
> https://lore.kernel.org/all/20260330135011.107036-1-ljs@kernel.org/
>
> tools/testing/selftests/mm/merge.c | 89 ++++++++++++++++++++++++++++++
> 1 file changed, 89 insertions(+)
>
> diff --git a/tools/testing/selftests/mm/merge.c b/tools/testing/selftests/mm/merge.c
> index 10b686102b79..f73803b3679a 100644
> --- a/tools/testing/selftests/mm/merge.c
> +++ b/tools/testing/selftests/mm/merge.c
> @@ -2,6 +2,7 @@
>
> #define _GNU_SOURCE
> #include "kselftest_harness.h"
> +#include <asm-generic/unistd.h>
> #include <linux/prctl.h>
> #include <fcntl.h>
> #include <stdio.h>
> @@ -48,6 +49,19 @@ static pid_t do_fork(struct procmap_fd *procmap)
> return 0;
> }
>
> +#ifdef __NR_mseal
> +static int sys_mseal(void *ptr, size_t len, unsigned long flags)
> +{
> + return syscall(__NR_mseal, (unsigned long)ptr, len, flags);
> +}
> +#else
> +static int sys_mseal(void *ptr, size_t len, unsigned long flags)
> +{
> + errno = ENOSYS;
> + return -1;
> +}
> +#endif
> +
> FIXTURE_SETUP(merge)
> {
> self->page_size = psize();
> @@ -1217,6 +1231,81 @@ TEST_F(merge, mremap_correct_placed_faulted)
> ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
> }
>
> +TEST_F(merge, merge_vmas_with_mseal)
> +{
> + unsigned int page_size = self->page_size;
> + struct procmap_fd *procmap = &self->procmap;
> + char *ptr, *ptr2, *ptr3;
> + /* We need our own as cannot munmap() once sealed. */
> + char *carveout;
> +
> + /* Invalid mseal() call to see if implemented. */
> + ASSERT_EQ(sys_mseal(NULL, 0, ~0UL), -1);
> + if (errno == ENOSYS)
> + SKIP(return, "mseal not supported, skipping.");
> +
> + /* Map carveout. */
> + carveout = mmap(NULL, 17 * page_size, PROT_NONE,
> + MAP_PRIVATE | MAP_ANON, -1, 0);
I think it would be simpler if we did 5 pages (1 for each VMA, plus the two guard VMAs around it).
> + ASSERT_NE(carveout, MAP_FAILED);
> +
> + /*
> + * Map 3 separate VMAs:
> + *
> + * |-----------|-----------|-----------|
> + * | RW | RWE | RO |
> + * |-----------|-----------|-----------|
> + * ptr ptr2 ptr3
> + */
And now carveout += page_size; would leave us with
mmap(&carveout[0 * page_size])
mmap(&carveout[1 * page_size])
mmap(&carveout[2 * page_size])
and we don't need to deal with the arbitrary 5 pages below, which looks a good bit nicer.
> + ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
> + MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
> + ASSERT_NE(ptr, MAP_FAILED);
> + ptr2 = mmap(&carveout[page_size * 6], 5 * page_size,
> + PROT_READ | PROT_WRITE | PROT_EXEC,
> + MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
> + ASSERT_NE(ptr2, MAP_FAILED);
> + ptr3 = mmap(&carveout[page_size * 11], 5 * page_size, PROT_READ,
> + MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
> + ASSERT_NE(ptr3, MAP_FAILED);
> +
> + /*
> + * mseal the second VMA:
> + *
> + * |-----------|-----------|-----------|
> + * | RW | RWES | RO |
> + * |-----------|-----------|-----------|
> + * ptr ptr2 ptr3
> + */
> + ASSERT_EQ(sys_mseal(ptr2, 5 * page_size, 0), 0);
> +
> + /* Make first VMA mergeable upon mseal. */
> + ASSERT_EQ(mprotect(ptr, 5 * page_size,
> + PROT_READ | PROT_WRITE | PROT_EXEC), 0);
> + /*
> + * At this point we have:
> + *
> + * |-----------|-----------|-----------|
> + * | RWE | RWES | RO |
> + * |-----------|-----------|-----------|
> + * ptr ptr2 ptr3
> + *
> + * Now mseal all of the VMAs.
> + */
> + ASSERT_EQ(sys_mseal(ptr, 15 * page_size, 0), 0);
> +
> + /*
> + * We should end up with:
> + *
> + * |-----------------------|-----------|
> + * | RWES | ROS |
> + * |-----------------------|-----------|
> + * ptr ptr3
> + */
> + ASSERT_TRUE(find_vma_procmap(procmap, ptr));
> + ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
> + ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
> +}
Apart from these nits, the test LGTM
Acked-by: Pedro Falcato <pfalcato@suse.de>
--
Pedro
Then again, and contradicting myself elsewhere on fix-patches blah blah blah, since this is just test code, I checked the below and it works the same and triggers the regression. So maybe we can just apply that? Cheers, Lorenzo ----8<---- From bcf5c7ea63961756913110694cfd34173e3f0fc4 Mon Sep 17 00:00:00 2001 From: "Lorenzo Stoakes (Oracle)" <ljs@kernel.org> Date: Tue, 31 Mar 2026 11:01:35 +0100 Subject: [PATCH] fix Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org> --- tools/testing/selftests/mm/merge.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/mm/merge.c b/tools/testing/selftests/mm/merge.c index f73803b3679a..efcb100fd865 100644 --- a/tools/testing/selftests/mm/merge.c +++ b/tools/testing/selftests/mm/merge.c @@ -1245,7 +1245,7 @@ TEST_F(merge, merge_vmas_with_mseal) SKIP(return, "mseal not supported, skipping."); /* Map carveout. */ - carveout = mmap(NULL, 17 * page_size, PROT_NONE, + carveout = mmap(NULL, 5 * page_size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0); ASSERT_NE(carveout, MAP_FAILED); @@ -1257,14 +1257,14 @@ TEST_F(merge, merge_vmas_with_mseal) * |-----------|-----------|-----------| * ptr ptr2 ptr3 */ - ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE, + ptr = mmap(&carveout[page_size], page_size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0); ASSERT_NE(ptr, MAP_FAILED); - ptr2 = mmap(&carveout[page_size * 6], 5 * page_size, + ptr2 = mmap(&carveout[2 * page_size], page_size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0); ASSERT_NE(ptr2, MAP_FAILED); - ptr3 = mmap(&carveout[page_size * 11], 5 * page_size, PROT_READ, + ptr3 = mmap(&carveout[3 * page_size], page_size, PROT_READ, MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0); ASSERT_NE(ptr3, MAP_FAILED); @@ -1276,10 +1276,10 @@ TEST_F(merge, merge_vmas_with_mseal) * |-----------|-----------|-----------| * ptr ptr2 ptr3 */ - ASSERT_EQ(sys_mseal(ptr2, 5 * page_size, 0), 0); + ASSERT_EQ(sys_mseal(ptr2, page_size, 0), 0); /* Make first VMA mergeable upon mseal. */ - ASSERT_EQ(mprotect(ptr, 5 * page_size, + ASSERT_EQ(mprotect(ptr, page_size, PROT_READ | PROT_WRITE | PROT_EXEC), 0); /* * At this point we have: @@ -1291,7 +1291,7 @@ TEST_F(merge, merge_vmas_with_mseal) * * Now mseal all of the VMAs. */ - ASSERT_EQ(sys_mseal(ptr, 15 * page_size, 0), 0); + ASSERT_EQ(sys_mseal(ptr, 3 * page_size, 0), 0); /* * We should end up with: @@ -1303,7 +1303,7 @@ TEST_F(merge, merge_vmas_with_mseal) */ ASSERT_TRUE(find_vma_procmap(procmap, ptr)); ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr); - ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size); + ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size); } TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_prev) -- 2.53.0
On Tue, 31 Mar 2026 11:08:03 +0100 "Lorenzo Stoakes (Oracle)" <ljs@kernel.org> wrote:
> Then again, and contradicting myself elsewhere on fix-patches blah blah blah,
> since this is just test code, I checked the below and it works the same and
> triggers the regression.
>
> So maybe we can just apply that?
Here's how the two folded together will look:
From: "Lorenzo Stoakes (Oracle)" <ljs@kernel.org>
Subject: tools/testing/selftests: add merge test for partial msealed range
Date: Tue, 31 Mar 2026 08:36:27 +0100
Commit 2697dd8ae721 ("mm/mseal: update VMA end correctly on merge") fixed
an issue in the loop which iterates through VMAs applying mseal, which was
triggered by mseal()'ing a range of VMAs where the second was mseal()'d
and the first mergeable with it, once mseal()'d.
Add a regression test to assert that this behaviour is correct. We place
it in the merge selftests as this is strictly an issue with merging (via a
vma_modify() invocation).
It also asserts that mseal()'d ranges are correctly merged as you'd
expect.
The test is implemented such that it is skipped if mseal() is not
available on the system.
[ljs@kernel.org: simplifications per Pedro]
Link: https://lkml.kernel.org/r/1c9c922d-5cb5-4cff-9273-b737cdb57ca1@lucifer.local
Link: https://lkml.kernel.org/r/20260331073627.50010-1-ljs@kernel.org
Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: Jann Horn <jannh@google.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <ljs@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Pedro Falcato <pfalcato@suse.de>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
tools/testing/selftests/mm/merge.c | 89 +++++++++++++++++++++++++++
1 file changed, 89 insertions(+)
--- a/tools/testing/selftests/mm/merge.c~tools-testing-selftests-add-merge-test-for-partial-msealed-range
+++ a/tools/testing/selftests/mm/merge.c
@@ -2,6 +2,7 @@
#define _GNU_SOURCE
#include "kselftest_harness.h"
+#include <asm-generic/unistd.h>
#include <linux/prctl.h>
#include <fcntl.h>
#include <stdio.h>
@@ -48,6 +49,19 @@ static pid_t do_fork(struct procmap_fd *
return 0;
}
+#ifdef __NR_mseal
+static int sys_mseal(void *ptr, size_t len, unsigned long flags)
+{
+ return syscall(__NR_mseal, (unsigned long)ptr, len, flags);
+}
+#else
+static int sys_mseal(void *ptr, size_t len, unsigned long flags)
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
+
FIXTURE_SETUP(merge)
{
self->page_size = psize();
@@ -1217,6 +1231,81 @@ TEST_F(merge, mremap_correct_placed_faul
ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
}
+TEST_F(merge, merge_vmas_with_mseal)
+{
+ unsigned int page_size = self->page_size;
+ struct procmap_fd *procmap = &self->procmap;
+ char *ptr, *ptr2, *ptr3;
+ /* We need our own as cannot munmap() once sealed. */
+ char *carveout;
+
+ /* Invalid mseal() call to see if implemented. */
+ ASSERT_EQ(sys_mseal(NULL, 0, ~0UL), -1);
+ if (errno == ENOSYS)
+ SKIP(return, "mseal not supported, skipping.");
+
+ /* Map carveout. */
+ carveout = mmap(NULL, 5 * page_size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ ASSERT_NE(carveout, MAP_FAILED);
+
+ /*
+ * Map 3 separate VMAs:
+ *
+ * |-----------|-----------|-----------|
+ * | RW | RWE | RO |
+ * |-----------|-----------|-----------|
+ * ptr ptr2 ptr3
+ */
+ ptr = mmap(&carveout[page_size], page_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+ ptr2 = mmap(&carveout[2 * page_size], page_size,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr2, MAP_FAILED);
+ ptr3 = mmap(&carveout[3 * page_size], page_size, PROT_READ,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr3, MAP_FAILED);
+
+ /*
+ * mseal the second VMA:
+ *
+ * |-----------|-----------|-----------|
+ * | RW | RWES | RO |
+ * |-----------|-----------|-----------|
+ * ptr ptr2 ptr3
+ */
+ ASSERT_EQ(sys_mseal(ptr2, page_size, 0), 0);
+
+ /* Make first VMA mergeable upon mseal. */
+ ASSERT_EQ(mprotect(ptr, page_size,
+ PROT_READ | PROT_WRITE | PROT_EXEC), 0);
+ /*
+ * At this point we have:
+ *
+ * |-----------|-----------|-----------|
+ * | RWE | RWES | RO |
+ * |-----------|-----------|-----------|
+ * ptr ptr2 ptr3
+ *
+ * Now mseal all of the VMAs.
+ */
+ ASSERT_EQ(sys_mseal(ptr, 3 * page_size, 0), 0);
+
+ /*
+ * We should end up with:
+ *
+ * |-----------------------|-----------|
+ * | RWES | ROS |
+ * |-----------------------|-----------|
+ * ptr ptr3
+ */
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
+}
+
TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_prev)
{
struct procmap_fd *procmap = &self->procmap;
_
On Tue, Mar 31, 2026 at 05:58:51PM -0700, Andrew Morton wrote:
> On Tue, 31 Mar 2026 11:08:03 +0100 "Lorenzo Stoakes (Oracle)" <ljs@kernel.org> wrote:
>
> > Then again, and contradicting myself elsewhere on fix-patches blah blah blah,
> > since this is just test code, I checked the below and it works the same and
> > triggers the regression.
> >
> > So maybe we can just apply that?
>
> Here's how the two folded together will look:
Thanks, LGTM!
Cheers, Lorenzo
>
>
> From: "Lorenzo Stoakes (Oracle)" <ljs@kernel.org>
> Subject: tools/testing/selftests: add merge test for partial msealed range
> Date: Tue, 31 Mar 2026 08:36:27 +0100
>
> Commit 2697dd8ae721 ("mm/mseal: update VMA end correctly on merge") fixed
> an issue in the loop which iterates through VMAs applying mseal, which was
> triggered by mseal()'ing a range of VMAs where the second was mseal()'d
> and the first mergeable with it, once mseal()'d.
>
> Add a regression test to assert that this behaviour is correct. We place
> it in the merge selftests as this is strictly an issue with merging (via a
> vma_modify() invocation).
>
> It also asserts that mseal()'d ranges are correctly merged as you'd
> expect.
>
> The test is implemented such that it is skipped if mseal() is not
> available on the system.
>
> [ljs@kernel.org: simplifications per Pedro]
> Link: https://lkml.kernel.org/r/1c9c922d-5cb5-4cff-9273-b737cdb57ca1@lucifer.local
> Link: https://lkml.kernel.org/r/20260331073627.50010-1-ljs@kernel.org
> Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
> Cc: David Hildenbrand <david@kernel.org>
> Cc: Jann Horn <jannh@google.com>
> Cc: Liam Howlett <liam.howlett@oracle.com>
> Cc: Lorenzo Stoakes <ljs@kernel.org>
> Cc: Michal Hocko <mhocko@suse.com>
> Cc: Mike Rapoport <rppt@kernel.org>
> Cc: Pedro Falcato <pfalcato@suse.de>
> Cc: Shuah Khan <shuah@kernel.org>
> Cc: Suren Baghdasaryan <surenb@google.com>
> Cc: Vlastimil Babka <vbabka@kernel.org>
> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
> ---
>
> tools/testing/selftests/mm/merge.c | 89 +++++++++++++++++++++++++++
> 1 file changed, 89 insertions(+)
>
> --- a/tools/testing/selftests/mm/merge.c~tools-testing-selftests-add-merge-test-for-partial-msealed-range
> +++ a/tools/testing/selftests/mm/merge.c
> @@ -2,6 +2,7 @@
>
> #define _GNU_SOURCE
> #include "kselftest_harness.h"
> +#include <asm-generic/unistd.h>
> #include <linux/prctl.h>
> #include <fcntl.h>
> #include <stdio.h>
> @@ -48,6 +49,19 @@ static pid_t do_fork(struct procmap_fd *
> return 0;
> }
>
> +#ifdef __NR_mseal
> +static int sys_mseal(void *ptr, size_t len, unsigned long flags)
> +{
> + return syscall(__NR_mseal, (unsigned long)ptr, len, flags);
> +}
> +#else
> +static int sys_mseal(void *ptr, size_t len, unsigned long flags)
> +{
> + errno = ENOSYS;
> + return -1;
> +}
> +#endif
> +
> FIXTURE_SETUP(merge)
> {
> self->page_size = psize();
> @@ -1217,6 +1231,81 @@ TEST_F(merge, mremap_correct_placed_faul
> ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
> }
>
> +TEST_F(merge, merge_vmas_with_mseal)
> +{
> + unsigned int page_size = self->page_size;
> + struct procmap_fd *procmap = &self->procmap;
> + char *ptr, *ptr2, *ptr3;
> + /* We need our own as cannot munmap() once sealed. */
> + char *carveout;
> +
> + /* Invalid mseal() call to see if implemented. */
> + ASSERT_EQ(sys_mseal(NULL, 0, ~0UL), -1);
> + if (errno == ENOSYS)
> + SKIP(return, "mseal not supported, skipping.");
> +
> + /* Map carveout. */
> + carveout = mmap(NULL, 5 * page_size, PROT_NONE,
> + MAP_PRIVATE | MAP_ANON, -1, 0);
> + ASSERT_NE(carveout, MAP_FAILED);
> +
> + /*
> + * Map 3 separate VMAs:
> + *
> + * |-----------|-----------|-----------|
> + * | RW | RWE | RO |
> + * |-----------|-----------|-----------|
> + * ptr ptr2 ptr3
> + */
> + ptr = mmap(&carveout[page_size], page_size, PROT_READ | PROT_WRITE,
> + MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
> + ASSERT_NE(ptr, MAP_FAILED);
> + ptr2 = mmap(&carveout[2 * page_size], page_size,
> + PROT_READ | PROT_WRITE | PROT_EXEC,
> + MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
> + ASSERT_NE(ptr2, MAP_FAILED);
> + ptr3 = mmap(&carveout[3 * page_size], page_size, PROT_READ,
> + MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
> + ASSERT_NE(ptr3, MAP_FAILED);
> +
> + /*
> + * mseal the second VMA:
> + *
> + * |-----------|-----------|-----------|
> + * | RW | RWES | RO |
> + * |-----------|-----------|-----------|
> + * ptr ptr2 ptr3
> + */
> + ASSERT_EQ(sys_mseal(ptr2, page_size, 0), 0);
> +
> + /* Make first VMA mergeable upon mseal. */
> + ASSERT_EQ(mprotect(ptr, page_size,
> + PROT_READ | PROT_WRITE | PROT_EXEC), 0);
> + /*
> + * At this point we have:
> + *
> + * |-----------|-----------|-----------|
> + * | RWE | RWES | RO |
> + * |-----------|-----------|-----------|
> + * ptr ptr2 ptr3
> + *
> + * Now mseal all of the VMAs.
> + */
> + ASSERT_EQ(sys_mseal(ptr, 3 * page_size, 0), 0);
> +
> + /*
> + * We should end up with:
> + *
> + * |-----------------------|-----------|
> + * | RWES | ROS |
> + * |-----------------------|-----------|
> + * ptr ptr3
> + */
> + ASSERT_TRUE(find_vma_procmap(procmap, ptr));
> + ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
> + ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
> +}
> +
> TEST_F(merge_with_fork, mremap_faulted_to_unfaulted_prev)
> {
> struct procmap_fd *procmap = &self->procmap;
> _
>
On Tue, Mar 31, 2026 at 11:08:03AM +0100, Lorenzo Stoakes (Oracle) wrote: > Then again, and contradicting myself elsewhere on fix-patches blah blah blah, > since this is just test code, I checked the below and it works the same and > triggers the regression. Thanks for taking a look :) > > So maybe we can just apply that? If you also like it, SGTM. I was going to say you didn't need to (because it's test code, etc) but you were faster than me anyway :D -- Pedro
On Tue, Mar 31, 2026 at 10:35:33AM +0100, Pedro Falcato wrote:
> On Tue, Mar 31, 2026 at 08:36:27AM +0100, Lorenzo Stoakes (Oracle) wrote:
> > Commit 2697dd8ae721 ("mm/mseal: update VMA end correctly on merge") fixed
> > an issue in the loop which iterates through VMAs applying mseal, which was
> > triggered by mseal()'ing a range of VMAs where the second was mseal()'d and
> > the first mergeable with it, once mseal()'d.
> >
> > Add a regression test to assert that this behaviour is correct. We place it
> > in the merge selftests as this is strictly an issue with merging (via a
> > vma_modify() invocation).
> >
> > It also assert that mseal()'d ranges are correctly merged as you'd expect.
> >
> > The test is implemented such that it is skipped if mseal() is not
> > available on the system.
> >
> > Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
> > ---
> > v2:
> > * Added tools/ based header so __NR_mseal should always be available.
> > * However, for completeness, also check to see if defined, and assume ENOSYS if
> > not.
> > * Thanks to Mike for reporting issues in his build on this test!
> >
> > v1:
> > https://lore.kernel.org/all/20260330135011.107036-1-ljs@kernel.org/
> >
> > tools/testing/selftests/mm/merge.c | 89 ++++++++++++++++++++++++++++++
> > 1 file changed, 89 insertions(+)
> >
> > diff --git a/tools/testing/selftests/mm/merge.c b/tools/testing/selftests/mm/merge.c
> > index 10b686102b79..f73803b3679a 100644
> > --- a/tools/testing/selftests/mm/merge.c
> > +++ b/tools/testing/selftests/mm/merge.c
> > @@ -2,6 +2,7 @@
> >
> > #define _GNU_SOURCE
> > #include "kselftest_harness.h"
> > +#include <asm-generic/unistd.h>
> > #include <linux/prctl.h>
> > #include <fcntl.h>
> > #include <stdio.h>
> > @@ -48,6 +49,19 @@ static pid_t do_fork(struct procmap_fd *procmap)
> > return 0;
> > }
> >
> > +#ifdef __NR_mseal
> > +static int sys_mseal(void *ptr, size_t len, unsigned long flags)
> > +{
> > + return syscall(__NR_mseal, (unsigned long)ptr, len, flags);
> > +}
> > +#else
> > +static int sys_mseal(void *ptr, size_t len, unsigned long flags)
> > +{
> > + errno = ENOSYS;
> > + return -1;
> > +}
> > +#endif
> > +
> > FIXTURE_SETUP(merge)
> > {
> > self->page_size = psize();
> > @@ -1217,6 +1231,81 @@ TEST_F(merge, mremap_correct_placed_faulted)
> > ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 15 * page_size);
> > }
> >
> > +TEST_F(merge, merge_vmas_with_mseal)
> > +{
> > + unsigned int page_size = self->page_size;
> > + struct procmap_fd *procmap = &self->procmap;
> > + char *ptr, *ptr2, *ptr3;
> > + /* We need our own as cannot munmap() once sealed. */
> > + char *carveout;
> > +
> > + /* Invalid mseal() call to see if implemented. */
> > + ASSERT_EQ(sys_mseal(NULL, 0, ~0UL), -1);
> > + if (errno == ENOSYS)
> > + SKIP(return, "mseal not supported, skipping.");
> > +
> > + /* Map carveout. */
> > + carveout = mmap(NULL, 17 * page_size, PROT_NONE,
> > + MAP_PRIVATE | MAP_ANON, -1, 0);
>
> I think it would be simpler if we did 5 pages (1 for each VMA, plus the two guard VMAs around it).
>
> > + ASSERT_NE(carveout, MAP_FAILED);
> > +
> > + /*
> > + * Map 3 separate VMAs:
> > + *
> > + * |-----------|-----------|-----------|
> > + * | RW | RWE | RO |
> > + * |-----------|-----------|-----------|
> > + * ptr ptr2 ptr3
> > + */
>
> And now carveout += page_size; would leave us with
>
> mmap(&carveout[0 * page_size])
> mmap(&carveout[1 * page_size])
> mmap(&carveout[2 * page_size])
>
> and we don't need to deal with the arbitrary 5 pages below, which looks a good bit nicer.
OK I mean I'm not sure I should be sending a v3 so fast and it's too much for a
fix-patch.
Given it's cosmetic do we really care?
>
> > + ptr = mmap(&carveout[page_size], 5 * page_size, PROT_READ | PROT_WRITE,
> > + MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
> > + ASSERT_NE(ptr, MAP_FAILED);
> > + ptr2 = mmap(&carveout[page_size * 6], 5 * page_size,
> > + PROT_READ | PROT_WRITE | PROT_EXEC,
> > + MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
> > + ASSERT_NE(ptr2, MAP_FAILED);
> > + ptr3 = mmap(&carveout[page_size * 11], 5 * page_size, PROT_READ,
> > + MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
> > + ASSERT_NE(ptr3, MAP_FAILED);
> > +
> > + /*
> > + * mseal the second VMA:
> > + *
> > + * |-----------|-----------|-----------|
> > + * | RW | RWES | RO |
> > + * |-----------|-----------|-----------|
> > + * ptr ptr2 ptr3
> > + */
> > + ASSERT_EQ(sys_mseal(ptr2, 5 * page_size, 0), 0);
> > +
> > + /* Make first VMA mergeable upon mseal. */
> > + ASSERT_EQ(mprotect(ptr, 5 * page_size,
> > + PROT_READ | PROT_WRITE | PROT_EXEC), 0);
> > + /*
> > + * At this point we have:
> > + *
> > + * |-----------|-----------|-----------|
> > + * | RWE | RWES | RO |
> > + * |-----------|-----------|-----------|
> > + * ptr ptr2 ptr3
> > + *
> > + * Now mseal all of the VMAs.
> > + */
> > + ASSERT_EQ(sys_mseal(ptr, 15 * page_size, 0), 0);
> > +
> > + /*
> > + * We should end up with:
> > + *
> > + * |-----------------------|-----------|
> > + * | RWES | ROS |
> > + * |-----------------------|-----------|
> > + * ptr ptr3
> > + */
> > + ASSERT_TRUE(find_vma_procmap(procmap, ptr));
> > + ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
> > + ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 10 * page_size);
> > +}
>
> Apart from these nits, the test LGTM
>
> Acked-by: Pedro Falcato <pfalcato@suse.de>
Thanks.
>
> --
> Pedro
© 2016 - 2026 Red Hat, Inc.