Add test to assert that we have now allowed merging of VMAs when KSM
merging-by-default has been set by prctl(PR_SET_MEMORY_MERGE, ...).
We simply perform a trivial mapping of adjacent VMAs expecting a merge,
however prior to recent changes implementing this mode earlier than before,
these merges would not have succeeded.
Assert that we have fixed this!
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev>
Tested-by: Chengming Zhou <chengming.zhou@linux.dev>
---
tools/testing/selftests/mm/merge.c | 78 ++++++++++++++++++++++++++++++
1 file changed, 78 insertions(+)
diff --git a/tools/testing/selftests/mm/merge.c b/tools/testing/selftests/mm/merge.c
index c76646cdf6e6..2380a5a6a529 100644
--- a/tools/testing/selftests/mm/merge.c
+++ b/tools/testing/selftests/mm/merge.c
@@ -2,10 +2,12 @@
#define _GNU_SOURCE
#include "../kselftest_harness.h"
+#include <linux/prctl.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/mman.h>
+#include <sys/prctl.h>
#include <sys/wait.h>
#include "vm_util.h"
@@ -31,6 +33,11 @@ FIXTURE_TEARDOWN(merge)
{
ASSERT_EQ(munmap(self->carveout, 12 * self->page_size), 0);
ASSERT_EQ(close_procmap(&self->procmap), 0);
+ /*
+ * Clear unconditionally, as some tests set this. It is no issue if this
+ * fails (KSM may be disabled for instance).
+ */
+ prctl(PR_SET_MEMORY_MERGE, 0, 0, 0, 0);
}
TEST_F(merge, mprotect_unfaulted_left)
@@ -452,4 +459,75 @@ TEST_F(merge, forked_source_vma)
ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr2 + 5 * page_size);
}
+TEST_F(merge, ksm_merge)
+{
+ unsigned int page_size = self->page_size;
+ char *carveout = self->carveout;
+ struct procmap_fd *procmap = &self->procmap;
+ char *ptr, *ptr2;
+ int err;
+
+ /*
+ * Map two R/W immediately adjacent to one another, they should
+ * trivially merge:
+ *
+ * |-----------|-----------|
+ * | R/W | R/W |
+ * |-----------|-----------|
+ * ptr ptr2
+ */
+
+ ptr = mmap(&carveout[page_size], page_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+ ptr2 = mmap(&carveout[2 * page_size], page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr2, MAP_FAILED);
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
+
+ /* Unmap the second half of this merged VMA. */
+ ASSERT_EQ(munmap(ptr2, page_size), 0);
+
+ /* OK, now enable global KSM merge. We clear this on test teardown. */
+ err = prctl(PR_SET_MEMORY_MERGE, 1, 0, 0, 0);
+ if (err == -1) {
+ int errnum = errno;
+
+ /* Only non-failure case... */
+ ASSERT_EQ(errnum, EINVAL);
+ /* ...but indicates we should skip. */
+ SKIP(return, "KSM memory merging not supported, skipping.");
+ }
+
+ /*
+ * Now map a VMA adjacent to the existing that was just made
+ * VM_MERGEABLE, this should merge as well.
+ */
+ ptr2 = mmap(&carveout[2 * page_size], page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr2, MAP_FAILED);
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
+
+ /* Now this VMA altogether. */
+ ASSERT_EQ(munmap(ptr, 2 * page_size), 0);
+
+ /* Try the same operation as before, asserting this also merges fine. */
+ ptr = mmap(&carveout[page_size], page_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+ ptr2 = mmap(&carveout[2 * page_size], page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ ASSERT_NE(ptr2, MAP_FAILED);
+ ASSERT_TRUE(find_vma_procmap(procmap, ptr));
+ ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
+ ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
+}
+
TEST_HARNESS_MAIN
--
2.49.0
* Lorenzo Stoakes <lorenzo.stoakes@oracle.com> [250521 14:20]:
> Add test to assert that we have now allowed merging of VMAs when KSM
> merging-by-default has been set by prctl(PR_SET_MEMORY_MERGE, ...).
>
> We simply perform a trivial mapping of adjacent VMAs expecting a merge,
> however prior to recent changes implementing this mode earlier than before,
> these merges would not have succeeded.
>
> Assert that we have fixed this!
>
> Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
> Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev>
> Tested-by: Chengming Zhou <chengming.zhou@linux.dev>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
> ---
> tools/testing/selftests/mm/merge.c | 78 ++++++++++++++++++++++++++++++
> 1 file changed, 78 insertions(+)
>
> diff --git a/tools/testing/selftests/mm/merge.c b/tools/testing/selftests/mm/merge.c
> index c76646cdf6e6..2380a5a6a529 100644
> --- a/tools/testing/selftests/mm/merge.c
> +++ b/tools/testing/selftests/mm/merge.c
> @@ -2,10 +2,12 @@
>
> #define _GNU_SOURCE
> #include "../kselftest_harness.h"
> +#include <linux/prctl.h>
> #include <stdio.h>
> #include <stdlib.h>
> #include <unistd.h>
> #include <sys/mman.h>
> +#include <sys/prctl.h>
> #include <sys/wait.h>
> #include "vm_util.h"
>
> @@ -31,6 +33,11 @@ FIXTURE_TEARDOWN(merge)
> {
> ASSERT_EQ(munmap(self->carveout, 12 * self->page_size), 0);
> ASSERT_EQ(close_procmap(&self->procmap), 0);
> + /*
> + * Clear unconditionally, as some tests set this. It is no issue if this
> + * fails (KSM may be disabled for instance).
> + */
> + prctl(PR_SET_MEMORY_MERGE, 0, 0, 0, 0);
> }
>
> TEST_F(merge, mprotect_unfaulted_left)
> @@ -452,4 +459,75 @@ TEST_F(merge, forked_source_vma)
> ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr2 + 5 * page_size);
> }
>
> +TEST_F(merge, ksm_merge)
> +{
> + unsigned int page_size = self->page_size;
> + char *carveout = self->carveout;
> + struct procmap_fd *procmap = &self->procmap;
> + char *ptr, *ptr2;
> + int err;
> +
> + /*
> + * Map two R/W immediately adjacent to one another, they should
> + * trivially merge:
> + *
> + * |-----------|-----------|
> + * | R/W | R/W |
> + * |-----------|-----------|
> + * ptr ptr2
> + */
> +
> + ptr = mmap(&carveout[page_size], page_size, PROT_READ | PROT_WRITE,
> + MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
> + ASSERT_NE(ptr, MAP_FAILED);
> + ptr2 = mmap(&carveout[2 * page_size], page_size,
> + PROT_READ | PROT_WRITE,
> + MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
> + ASSERT_NE(ptr2, MAP_FAILED);
> + ASSERT_TRUE(find_vma_procmap(procmap, ptr));
> + ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
> + ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
> +
> + /* Unmap the second half of this merged VMA. */
> + ASSERT_EQ(munmap(ptr2, page_size), 0);
> +
> + /* OK, now enable global KSM merge. We clear this on test teardown. */
> + err = prctl(PR_SET_MEMORY_MERGE, 1, 0, 0, 0);
> + if (err == -1) {
> + int errnum = errno;
> +
> + /* Only non-failure case... */
> + ASSERT_EQ(errnum, EINVAL);
> + /* ...but indicates we should skip. */
> + SKIP(return, "KSM memory merging not supported, skipping.");
> + }
> +
> + /*
> + * Now map a VMA adjacent to the existing that was just made
> + * VM_MERGEABLE, this should merge as well.
> + */
> + ptr2 = mmap(&carveout[2 * page_size], page_size,
> + PROT_READ | PROT_WRITE,
> + MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
> + ASSERT_NE(ptr2, MAP_FAILED);
> + ASSERT_TRUE(find_vma_procmap(procmap, ptr));
> + ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
> + ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
> +
> + /* Now this VMA altogether. */
> + ASSERT_EQ(munmap(ptr, 2 * page_size), 0);
> +
> + /* Try the same operation as before, asserting this also merges fine. */
> + ptr = mmap(&carveout[page_size], page_size, PROT_READ | PROT_WRITE,
> + MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
> + ASSERT_NE(ptr, MAP_FAILED);
> + ptr2 = mmap(&carveout[2 * page_size], page_size,
> + PROT_READ | PROT_WRITE,
> + MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
> + ASSERT_NE(ptr2, MAP_FAILED);
> + ASSERT_TRUE(find_vma_procmap(procmap, ptr));
> + ASSERT_EQ(procmap->query.vma_start, (unsigned long)ptr);
> + ASSERT_EQ(procmap->query.vma_end, (unsigned long)ptr + 2 * page_size);
> +}
> +
> TEST_HARNESS_MAIN
> --
> 2.49.0
>
© 2016 - 2025 Red Hat, Inc.