Add a test to v_ptrace test suite to verify that vector csr registers
are clobbered on syscalls.
Signed-off-by: Sergey Matyukevich <geomatsi@gmail.com>
---
.../riscv/vector/validate_v_ptrace.c | 124 ++++++++++++++++++
1 file changed, 124 insertions(+)
diff --git a/tools/testing/selftests/riscv/vector/validate_v_ptrace.c b/tools/testing/selftests/riscv/vector/validate_v_ptrace.c
index a8d64d351edd..2dd0c727e520 100644
--- a/tools/testing/selftests/riscv/vector/validate_v_ptrace.c
+++ b/tools/testing/selftests/riscv/vector/validate_v_ptrace.c
@@ -212,4 +212,128 @@ TEST(ptrace_v_early_debug)
}
}
+TEST(ptrace_v_syscall_clobbering)
+{
+ pid_t pid;
+
+ if (!is_vector_supported() && !is_xtheadvector_supported())
+ SKIP(return, "Vector not supported");
+
+ chld_lock = 1;
+ pid = fork();
+ ASSERT_LE(0, pid)
+ TH_LOG("fork: %m");
+
+ if (pid == 0) {
+ unsigned long vl;
+
+ while (chld_lock == 1)
+ asm volatile("" : : "g"(chld_lock) : "memory");
+
+ if (is_xtheadvector_supported()) {
+ asm volatile (
+ // 0 | zimm[10:0] | rs1 | 1 1 1 | rd |1010111| vsetvli
+ // vsetvli t4, x0, e16, m2, d1
+ ".4byte 0b00000000010100000111111011010111\n"
+ "mv %[new_vl], t4\n"
+ : [new_vl] "=r" (vl) : : "t4");
+ } else {
+ asm volatile (
+ ".option push\n"
+ ".option arch, +zve32x\n"
+ "vsetvli %[new_vl], x0, e16, m2, tu, mu\n"
+ ".option pop\n"
+ : [new_vl] "=r"(vl) : : );
+ }
+
+ while (1) {
+ asm volatile (
+ ".option push\n"
+ ".option norvc\n"
+ "ebreak\n"
+ ".option pop\n");
+
+ sleep(0);
+ }
+ } else {
+ struct __riscv_v_regset_state *regset_data;
+ unsigned long vlenb = get_vr_len();
+ struct user_regs_struct regs;
+ size_t regset_size;
+ struct iovec iov;
+ int status;
+
+ /* attach */
+
+ ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid, NULL, NULL));
+ ASSERT_EQ(pid, waitpid(pid, &status, 0));
+ ASSERT_TRUE(WIFSTOPPED(status));
+
+ /* unlock */
+
+ ASSERT_EQ(0, ptrace(PTRACE_POKEDATA, pid, &chld_lock, 0));
+
+ /* resume and wait for the 1st ebreak */
+
+ ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
+ ASSERT_EQ(pid, waitpid(pid, &status, 0));
+ ASSERT_TRUE(WIFSTOPPED(status));
+
+ /* read tracee vector csr regs using ptrace GETREGSET */
+
+ regset_size = sizeof(*regset_data) + vlenb * 32;
+ regset_data = calloc(1, regset_size);
+
+ iov.iov_base = regset_data;
+ iov.iov_len = regset_size;
+
+ ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov));
+
+ /* verify initial vsetvli settings */
+
+ if (is_xtheadvector_supported()) {
+ EXPECT_EQ(5UL, regset_data->vtype);
+ } else {
+ EXPECT_EQ(9UL, regset_data->vtype);
+ }
+
+ EXPECT_EQ(regset_data->vlenb, regset_data->vl);
+ EXPECT_EQ(vlenb, regset_data->vlenb);
+ EXPECT_EQ(0UL, regset_data->vstart);
+ EXPECT_EQ(0UL, regset_data->vcsr);
+
+ /* skip 1st ebreak, then resume and wait for the 2nd ebreak */
+
+ iov.iov_base = ®s;
+ iov.iov_len = sizeof(regs);
+
+ ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, &iov));
+ regs.pc += 4;
+ ASSERT_EQ(0, ptrace(PTRACE_SETREGSET, pid, NT_PRSTATUS, &iov));
+
+ ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
+ ASSERT_EQ(pid, waitpid(pid, &status, 0));
+ ASSERT_TRUE(WIFSTOPPED(status));
+
+ /* read tracee vtype using ptrace GETREGSET */
+
+ iov.iov_base = regset_data;
+ iov.iov_len = regset_size;
+
+ ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov));
+
+ /* verify that V state is illegal after syscall */
+
+ EXPECT_EQ((1UL << (__riscv_xlen - 1)), regset_data->vtype);
+ EXPECT_EQ(vlenb, regset_data->vlenb);
+ EXPECT_EQ(0UL, regset_data->vstart);
+ EXPECT_EQ(0UL, regset_data->vcsr);
+ EXPECT_EQ(0UL, regset_data->vl);
+
+ /* cleanup */
+
+ ASSERT_EQ(0, kill(pid, SIGKILL));
+ }
+}
+
TEST_HARNESS_MAIN
--
2.52.0