[PATCH v2 2/2] selftests: futex: Add tests for robust release operations

André Almeida posted 2 patches 1 week ago
[PATCH v2 2/2] selftests: futex: Add tests for robust release operations
Posted by André Almeida 1 week ago
Add tests for __vdso_futex_robust_listXX_try_unlock() and for the futex()
op FUTEX_ROBUST_UNLOCK.

Test the contended and uncontended cases for the vDSO functions and all
ops combinations for FUTEX_ROBUST_UNLOCK.

Signed-off-by: André Almeida <andrealmeid@igalia.com>
---
Change from v2:
 - Add test variants for FUTEX_ROBUST_LIST32
 - Skip 64 bit tests for 32 bit builds
---
 .../selftests/futex/functional/robust_list.c       | 247 +++++++++++++++++++++
 tools/testing/selftests/futex/include/futextest.h  |   6 +
 2 files changed, 253 insertions(+)

diff --git a/tools/testing/selftests/futex/functional/robust_list.c b/tools/testing/selftests/futex/functional/robust_list.c
index e7d1254e18ca..c2e4e47d7cc6 100644
--- a/tools/testing/selftests/futex/functional/robust_list.c
+++ b/tools/testing/selftests/futex/functional/robust_list.c
@@ -27,12 +27,15 @@
 #include "futextest.h"
 #include "../../kselftest_harness.h"
 
+#include <dlfcn.h>
 #include <errno.h>
 #include <pthread.h>
 #include <signal.h>
+#include <stdint.h>
 #include <stdatomic.h>
 #include <stdbool.h>
 #include <stddef.h>
+#include <sys/auxv.h>
 #include <sys/mman.h>
 #include <sys/wait.h>
 
@@ -42,6 +45,10 @@
 
 #define SLEEP_US 100
 
+#if UINTPTR_MAX == 0xffffffffffffffff
+# define BUILD_64
+#endif
+
 static pthread_barrier_t barrier, barrier2;
 
 static int set_robust_list(struct robust_list_head *head, size_t len)
@@ -54,6 +61,12 @@ static int get_robust_list(int pid, struct robust_list_head **head, size_t *len_
 	return syscall(SYS_get_robust_list, pid, head, len_ptr);
 }
 
+static int sys_futex_robust_unlock(_Atomic(uint32_t) *uaddr, unsigned int op, int val,
+				   void *list_op_pending, unsigned int val3)
+{
+	return syscall(SYS_futex, uaddr, op, val, NULL, list_op_pending, val3, 0);
+}
+
 /*
  * Basic lock struct, contains just the futex word and the robust list element
  * Real implementations have also a *prev to easily walk in the list
@@ -549,4 +562,238 @@ TEST(test_circular_list)
 		ksft_test_result_pass("%s\n", __func__);
 }
 
+/*
+ * Bellow are tests for the fix of robust release race condition. Please read the following
+ * thread to learn more about the issue in the first place and why the following functions fix it:
+ * https://lore.kernel.org/lkml/20260316162316.356674433@kernel.org/
+ */
+
+/*
+ * Auxiliary code for loading the vDSO functions
+ */
+#define VDSO_SIZE 0x4000
+
+void *get_vdso_func_addr(const char *str)
+{
+	void *vdso_base = (void *) getauxval(AT_SYSINFO_EHDR), *addr;
+	Dl_info info;
+
+	if (!vdso_base) {
+		perror("Error to get AT_SYSINFO_EHDR");
+		return NULL;
+	}
+
+	for (addr = vdso_base; addr < vdso_base + VDSO_SIZE; addr += sizeof(addr)) {
+		if (dladdr(addr, &info) == 0 || !info.dli_sname)
+			continue;
+
+		if (!strcmp(info.dli_sname, str))
+			return info.dli_saddr;
+	}
+
+	return NULL;
+}
+
+/*
+ * These are the real vDSO function signatures:
+ *
+ *	__vdso_futex_robust_list64_try_unlock(__u32 *lock, __u32 tid, __u64 *pop)
+ *	__vdso_futex_robust_list32_try_unlock(__u32 *lock, __u32 tid, __u32 *pop)
+ *
+ * So for the generic entry point we need to use a void pointer as the last argument
+ */
+FIXTURE(vdso_unlock)
+{
+	uint32_t (*vdso)(_Atomic(uint32_t) *lock, uint32_t tid, void *pop);
+};
+
+FIXTURE_VARIANT(vdso_unlock)
+{
+	bool is_32;
+	char func_name[];
+};
+
+FIXTURE_SETUP(vdso_unlock)
+{
+	self->vdso = get_vdso_func_addr(variant->func_name);
+}
+
+FIXTURE_TEARDOWN(vdso_unlock) {}
+
+FIXTURE_VARIANT_ADD(vdso_unlock, 32)
+{
+	.func_name = "__vdso_futex_robust_list32_try_unlock",
+	.is_32 = true,
+};
+
+FIXTURE_VARIANT_ADD(vdso_unlock, 64)
+{
+	.func_name = "__vdso_futex_robust_list64_try_unlock",
+	.is_32 = false,
+};
+
+/*
+ * Test the vDSO robust_listXX_try_unlock() for the uncontended case. The virtual syscall should
+ * return the thread ID of the lock owner, the lock word must be 0 and the list_op_pending should
+ * be NULL.
+ */
+TEST_F(vdso_unlock, test_robust_try_unlock_uncontended)
+{
+	struct lock_struct lock = { .futex = 0 };
+	_Atomic(unsigned int) *futex = &lock.futex;
+	struct robust_list_head head;
+	uintptr_t exp = (uintptr_t) NULL;
+	pid_t tid = gettid();
+	int ret;
+
+	if (!self->vdso) {
+		ksft_test_result_skip("%s not found\n", variant->func_name);
+		return;
+	}
+
+	*futex = tid;
+
+	ret = set_list(&head);
+	if (ret)
+		ksft_test_result_fail("set_robust_list error\n");
+
+	head.list_op_pending = &lock.list;
+
+	ret = self->vdso(futex, tid, &head.list_op_pending);
+
+	ASSERT_EQ(ret, tid);
+	ASSERT_EQ(*futex, 0);
+
+	/* Check only the lower 32 bits for the 32-bit entry point */
+	if (variant->is_32) {
+		exp = (uintptr_t)(unsigned long)&lock.list;
+		exp &= ~0xFFFFFFFFULL;
+	}
+
+	ASSERT_EQ((uintptr_t)(unsigned long)head.list_op_pending, exp);
+}
+
+/*
+ * If the lock is contended, the operation fails. The return value is the value found at the
+ * futex word (tid | FUTEX_WAITERS), the futex word is not modified and the list_op_pending is_32
+ * not cleared.
+ */
+TEST_F(vdso_unlock, test_robust_try_unlock_contended)
+{
+	struct lock_struct lock = { .futex = 0 };
+	_Atomic(unsigned int) *futex = &lock.futex;
+	struct robust_list_head head;
+	pid_t tid = gettid();
+	int ret;
+
+	if (!self->vdso) {
+		ksft_test_result_skip("%s not found\n", variant->func_name);
+		return;
+	}
+
+	*futex = tid | FUTEX_WAITERS;
+
+	ret = set_list(&head);
+	if (ret)
+		ksft_test_result_fail("set_robust_list error\n");
+
+	head.list_op_pending = &lock.list;
+
+	ret = self->vdso(futex, tid, &head.list_op_pending);
+
+	ASSERT_EQ(ret, tid | FUTEX_WAITERS);
+	ASSERT_EQ(*futex, tid | FUTEX_WAITERS);
+	ASSERT_EQ(head.list_op_pending, &lock.list);
+}
+
+FIXTURE(futex_op) {};
+
+FIXTURE_VARIANT(futex_op)
+{
+	unsigned int op;
+	unsigned int val3;
+};
+
+FIXTURE_SETUP(futex_op) {}
+
+FIXTURE_TEARDOWN(futex_op) {}
+
+FIXTURE_VARIANT_ADD(futex_op, wake)
+{
+	.op = FUTEX_WAKE,
+	.val3 = 0,
+};
+
+FIXTURE_VARIANT_ADD(futex_op, wake_bitset)
+{
+	.op = FUTEX_WAKE_BITSET,
+	.val3 = FUTEX_BITSET_MATCH_ANY,
+};
+
+FIXTURE_VARIANT_ADD(futex_op, unlock_pi)
+{
+	.op = FUTEX_UNLOCK_PI,
+	.val3 = 0,
+};
+
+FIXTURE_VARIANT_ADD(futex_op, wake32)
+{
+	.op = FUTEX_WAKE | FUTEX_ROBUST_LIST32,
+	.val3 = 0,
+};
+
+FIXTURE_VARIANT_ADD(futex_op, wake_bitset32)
+{
+	.op = FUTEX_WAKE_BITSET | FUTEX_ROBUST_LIST32,
+	.val3 = FUTEX_BITSET_MATCH_ANY,
+};
+
+FIXTURE_VARIANT_ADD(futex_op, unlock_pi32)
+{
+	.op = FUTEX_UNLOCK_PI | FUTEX_ROBUST_LIST32,
+	.val3 = 0,
+};
+
+/*
+ * The syscall should return the number of tasks waken (for this test, 0), clear the futex word and
+ * clear list_op_pending
+ */
+TEST_F(futex_op, test_futex_robust_unlock)
+{
+	struct lock_struct lock = { .futex = 0 };
+	_Atomic(unsigned int) *futex = &lock.futex;
+	uintptr_t exp = (uintptr_t) NULL;
+	struct robust_list_head head;
+	pid_t tid = gettid();
+	int ret;
+
+#ifndef BUILD_64
+	if (!(variant->op & FUTEX_ROBUST_LIST32)) {
+		ksft_test_result_skip("Not supported for 32 bit build\n");
+		return;
+	}
+#endif
+
+	*futex = tid | FUTEX_WAITERS;
+
+	ret = set_list(&head);
+	if (ret)
+		ksft_test_result_fail("set_robust_list error\n");
+
+	head.list_op_pending = &lock.list;
+
+	ret = sys_futex_robust_unlock(futex, FUTEX_ROBUST_UNLOCK | variant->op, tid,
+				      &head.list_op_pending, variant->val3);
+
+	ASSERT_EQ(ret, 0);
+	ASSERT_EQ(*futex, 0);
+
+	if (variant->op & FUTEX_ROBUST_LIST32) {
+		exp = (uint64_t)(unsigned long)&lock.list;
+		exp &= ~0xFFFFFFFFULL;
+	}
+
+	ASSERT_EQ((uintptr_t)(unsigned long)head.list_op_pending, exp);
+}
+
 TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/futex/include/futextest.h b/tools/testing/selftests/futex/include/futextest.h
index 3d48e9789d9f..df33f31d6994 100644
--- a/tools/testing/selftests/futex/include/futextest.h
+++ b/tools/testing/selftests/futex/include/futextest.h
@@ -38,6 +38,12 @@ typedef volatile u_int32_t futex_t;
 #ifndef FUTEX_CMP_REQUEUE_PI
 #define FUTEX_CMP_REQUEUE_PI		12
 #endif
+#ifndef FUTEX_ROBUST_UNLOCK
+#define FUTEX_ROBUST_UNLOCK		512
+#endif
+#ifndef FUTEX_ROBUST_LIST32
+#define FUTEX_ROBUST_LIST32		1024
+#endif
 #ifndef FUTEX_WAIT_REQUEUE_PI_PRIVATE
 #define FUTEX_WAIT_REQUEUE_PI_PRIVATE	(FUTEX_WAIT_REQUEUE_PI | \
 					 FUTEX_PRIVATE_FLAG)

-- 
2.53.0

Re: [PATCH v2 2/2] selftests: futex: Add tests for robust release operations
Posted by Thomas Weißschuh 1 week ago
On 2026-03-29 18:39:28-0300, André Almeida wrote:
> Add tests for __vdso_futex_robust_listXX_try_unlock() and for the futex()
> op FUTEX_ROBUST_UNLOCK.
> 
> Test the contended and uncontended cases for the vDSO functions and all
> ops combinations for FUTEX_ROBUST_UNLOCK.
> 
> Signed-off-by: André Almeida <andrealmeid@igalia.com>
> ---
> Change from v2:
>  - Add test variants for FUTEX_ROBUST_LIST32
>  - Skip 64 bit tests for 32 bit builds
> ---
>  .../selftests/futex/functional/robust_list.c       | 247 +++++++++++++++++++++
>  tools/testing/selftests/futex/include/futextest.h  |   6 +
>  2 files changed, 253 insertions(+)
> 
> diff --git a/tools/testing/selftests/futex/functional/robust_list.c b/tools/testing/selftests/futex/functional/robust_list.c
> index e7d1254e18ca..c2e4e47d7cc6 100644
> --- a/tools/testing/selftests/futex/functional/robust_list.c
> +++ b/tools/testing/selftests/futex/functional/robust_list.c
> @@ -27,12 +27,15 @@
>  #include "futextest.h"
>  #include "../../kselftest_harness.h"
>  
> +#include <dlfcn.h>
>  #include <errno.h>
>  #include <pthread.h>
>  #include <signal.h>
> +#include <stdint.h>
>  #include <stdatomic.h>
>  #include <stdbool.h>
>  #include <stddef.h>
> +#include <sys/auxv.h>
>  #include <sys/mman.h>
>  #include <sys/wait.h>
>  
> @@ -42,6 +45,10 @@
>  
>  #define SLEEP_US 100
>  
> +#if UINTPTR_MAX == 0xffffffffffffffff
> +# define BUILD_64
> +#endif

#if __BITS_PER_LONG == 64

would be easier.

> +
>  static pthread_barrier_t barrier, barrier2;
>  
>  static int set_robust_list(struct robust_list_head *head, size_t len)
> @@ -54,6 +61,12 @@ static int get_robust_list(int pid, struct robust_list_head **head, size_t *len_
>  	return syscall(SYS_get_robust_list, pid, head, len_ptr);
>  }
>  
> +static int sys_futex_robust_unlock(_Atomic(uint32_t) *uaddr, unsigned int op, int val,
> +				   void *list_op_pending, unsigned int val3)
> +{
> +	return syscall(SYS_futex, uaddr, op, val, NULL, list_op_pending, val3, 0);
> +}
> +
>  /*
>   * Basic lock struct, contains just the futex word and the robust list element
>   * Real implementations have also a *prev to easily walk in the list
> @@ -549,4 +562,238 @@ TEST(test_circular_list)
>  		ksft_test_result_pass("%s\n", __func__);
>  }
>  
> +/*
> + * Bellow are tests for the fix of robust release race condition. Please read the following
> + * thread to learn more about the issue in the first place and why the following functions fix it:
> + * https://lore.kernel.org/lkml/20260316162316.356674433@kernel.org/
> + */
> +
> +/*
> + * Auxiliary code for loading the vDSO functions
> + */
> +#define VDSO_SIZE 0x4000

There is no guarantee that the vDSO has any certain size.

> +void *get_vdso_func_addr(const char *str)
> +{
> +	void *vdso_base = (void *) getauxval(AT_SYSINFO_EHDR), *addr;
> +	Dl_info info;
> +
> +	if (!vdso_base) {
> +		perror("Error to get AT_SYSINFO_EHDR");

getauxval() does not set errno, so perror() doesn't do much.

> +		return NULL;
> +	}
> +
> +	for (addr = vdso_base; addr < vdso_base + VDSO_SIZE; addr += sizeof(addr)) {
> +		if (dladdr(addr, &info) == 0 || !info.dli_sname)
> +			continue;
> +
> +		if (!strcmp(info.dli_sname, str))
> +			return info.dli_saddr;
> +	}

That's a weird new way to look up vDSO symbols.
We have tools/testing/selftests/vDSO/parse_vdso.c to do that properly.
Or look at tools/testing/selftests/x86/vdso_restorer.c for another
non-standard way, but at least it is somewhat consistent.

> +
> +	return NULL;
> +}
> +
> +/*
> + * These are the real vDSO function signatures:
> + *
> + *	__vdso_futex_robust_list64_try_unlock(__u32 *lock, __u32 tid, __u64 *pop)
> + *	__vdso_futex_robust_list32_try_unlock(__u32 *lock, __u32 tid, __u32 *pop)
> + *
> + * So for the generic entry point we need to use a void pointer as the last argument
> + */

We probably should have UAPI headers with the vDSO prototypes.

(...)

> +/*
> + * Test the vDSO robust_listXX_try_unlock() for the uncontended case. The virtual syscall should
> + * return the thread ID of the lock owner, the lock word must be 0 and the list_op_pending should
> + * be NULL.
> + */
> +TEST_F(vdso_unlock, test_robust_try_unlock_uncontended)
> +{
> +	struct lock_struct lock = { .futex = 0 };
> +	_Atomic(unsigned int) *futex = &lock.futex;
> +	struct robust_list_head head;
> +	uintptr_t exp = (uintptr_t) NULL;
> +	pid_t tid = gettid();
> +	int ret;
> +
> +	if (!self->vdso) {
> +		ksft_test_result_skip("%s not found\n", variant->func_name);

ksft_test_result_skip() does not really work in a harness test.
Use SKIP() instead.

> +		return;
> +	}
> +
> +	*futex = tid;
> +
> +	ret = set_list(&head);
> +	if (ret)
> +		ksft_test_result_fail("set_robust_list error\n");

ksft_test_result_fail() does not work properly here either.

> +
> +	head.list_op_pending = &lock.list;
> +
> +	ret = self->vdso(futex, tid, &head.list_op_pending);
> +
> +	ASSERT_EQ(ret, tid);
> +	ASSERT_EQ(*futex, 0);
> +
> +	/* Check only the lower 32 bits for the 32-bit entry point */
> +	if (variant->is_32) {
> +		exp = (uintptr_t)(unsigned long)&lock.list;

The cast through 'unsigned long' looks unnecessary.

> +		exp &= ~0xFFFFFFFFULL;
> +	}
> +
> +	ASSERT_EQ((uintptr_t)(unsigned long)head.list_op_pending, exp);
> +}

(...)

Note: The futex selftests seem to fail on musl libc.


Thomas