Add a test to v_ptrace test suite to verify that ptrace rejects the
invalid input combinations of vector csr registers. Use kselftest
fixture variants to create multiple invalid inputs for the test.

Signed-off-by: Sergey Matyukevich <[email protected]>
---
 .../testing/selftests/riscv/vector/v_ptrace.c | 238 ++++++++++++++++++
 1 file changed, 238 insertions(+)

diff --git a/tools/testing/selftests/riscv/vector/v_ptrace.c 
b/tools/testing/selftests/riscv/vector/v_ptrace.c
index 9fea29f7b686..023e1faa84bf 100644
--- a/tools/testing/selftests/riscv/vector/v_ptrace.c
+++ b/tools/testing/selftests/riscv/vector/v_ptrace.c
@@ -183,4 +183,242 @@ TEST(ptrace_v_early_debug)
        }
 }
 
+FIXTURE(v_csr_invalid)
+{
+};
+
+FIXTURE_SETUP(v_csr_invalid)
+{
+}
+
+FIXTURE_TEARDOWN(v_csr_invalid)
+{
+}
+
+/* modifications of the initial vsetvli settings */
+FIXTURE_VARIANT(v_csr_invalid)
+{
+       unsigned long vstart;
+       unsigned long vl;
+       unsigned long vtype;
+       unsigned long vcsr;
+       unsigned long vlenb_mul;
+       unsigned long vlenb_min;
+       unsigned long vlenb_max;
+};
+
+/* unexpected vlenb value */
+FIXTURE_VARIANT_ADD(v_csr_invalid, new_vlenb)
+{
+       .vstart = 0x0,
+       .vl = 0x0,
+       .vtype = 0x3,
+       .vcsr = 0x0,
+       .vlenb_mul = 0x2,
+       .vlenb_min = 0x0,
+       .vlenb_max = 0x0,
+};
+
+/* invalid reserved bits in vcsr */
+FIXTURE_VARIANT_ADD(v_csr_invalid, vcsr_invalid_reserved_bits)
+{
+       .vstart = 0x0,
+       .vl = 0x0,
+       .vtype = 0x3,
+       .vcsr = 0x1UL << 8,
+       .vlenb_mul = 0x1,
+       .vlenb_min = 0x0,
+       .vlenb_max = 0x0,
+};
+
+/* invalid reserved bits in vtype */
+FIXTURE_VARIANT_ADD(v_csr_invalid, vtype_invalid_reserved_bits)
+{
+       .vstart = 0x0,
+       .vl = 0x0,
+       .vtype = (0x1UL << 8) | 0x3,
+       .vcsr = 0x0,
+       .vlenb_mul = 0x1,
+       .vlenb_min = 0x0,
+       .vlenb_max = 0x0,
+};
+
+/* set vill bit */
+FIXTURE_VARIANT_ADD(v_csr_invalid, invalid_vill_bit)
+{
+       .vstart = 0x0,
+       .vl = 0x0,
+       .vtype = (0x1UL << (__riscv_xlen - 1)) | 0x3,
+       .vcsr = 0x0,
+       .vlenb_mul = 0x1,
+       .vlenb_min = 0x0,
+       .vlenb_max = 0x0,
+};
+
+/* reserved vsew value: vsew > 3 */
+FIXTURE_VARIANT_ADD(v_csr_invalid, reserved_vsew)
+{
+       .vstart = 0x0,
+       .vl = 0x0,
+       .vtype = 0x4UL << 3,
+       .vcsr = 0x0,
+       .vlenb_mul = 0x1,
+       .vlenb_min = 0x0,
+       .vlenb_max = 0x0,
+};
+
+/* reserved vlmul value: vlmul == 4 */
+FIXTURE_VARIANT_ADD(v_csr_invalid, reserved_vlmul)
+{
+       .vstart = 0x0,
+       .vl = 0x0,
+       .vtype = 0x4,
+       .vcsr = 0x0,
+       .vlenb_mul = 0x1,
+       .vlenb_min = 0x0,
+       .vlenb_max = 0x0,
+};
+
+/* invalid fractional LMUL for VLEN <= 256: LMUL= 1/8, SEW = 64 */
+FIXTURE_VARIANT_ADD(v_csr_invalid, frac_lmul1)
+{
+       .vstart = 0x0,
+       .vl = 0x0,
+       .vtype = 0x1d,
+       .vcsr = 0x0,
+       .vlenb_mul = 0x1,
+       .vlenb_min = 0x0,
+       .vlenb_max = 0x20,
+};
+
+/* invalid integral LMUL for VLEN <= 16: LMUL= 2, SEW = 64 */
+FIXTURE_VARIANT_ADD(v_csr_invalid, int_lmul1)
+{
+       .vstart = 0x0,
+       .vl = 0x0,
+       .vtype = 0x19,
+       .vcsr = 0x0,
+       .vlenb_mul = 0x1,
+       .vlenb_min = 0x0,
+       .vlenb_max = 0x2,
+};
+
+/* invalid VL for VLEN <= 128: LMUL= 2, SEW = 64, VL = 8 */
+FIXTURE_VARIANT_ADD(v_csr_invalid, vl1)
+{
+       .vstart = 0x0,
+       .vl = 0x8,
+       .vtype = 0x19,
+       .vcsr = 0x0,
+       .vlenb_mul = 0x1,
+       .vlenb_min = 0x0,
+       .vlenb_max = 0x10,
+};
+
+TEST_F(v_csr_invalid, ptrace_v_invalid_values)
+{
+       unsigned long vlenb;
+       pid_t pid;
+
+       if (!is_vector_supported())
+               SKIP(return, "Vector not supported");
+
+       asm volatile("csrr %[vlenb], vlenb" : [vlenb] "=r"(vlenb));
+       if (variant->vlenb_min) {
+               if (vlenb < variant->vlenb_min)
+                       SKIP(return, "This test does not support VLEN < %lu\n",
+                            variant->vlenb_min * 8);
+       }
+       if (variant->vlenb_max) {
+               if (vlenb > variant->vlenb_max)
+                       SKIP(return, "This test does not support VLEN > %lu\n",
+                            variant->vlenb_max * 8);
+       }
+
+       chld_lock = 1;
+       pid = fork();
+       ASSERT_LE(0, pid)
+               TH_LOG("fork: %m");
+
+       if (pid == 0) {
+               unsigned long vl;
+
+               while (chld_lock == 1)
+                       asm volatile("" : : "g"(chld_lock) : "memory");
+
+               asm(".option arch, +zve32x\n");
+               asm(".option arch, +c\n");
+
+               asm volatile("vsetvli %[new_vl], x0, e16, m2, tu, mu\n"
+                            : [new_vl] "=r"(vl)
+                            :
+                            :);
+
+               while (1) {
+                       asm volatile("c.ebreak");
+                       asm volatile("c.nop");
+               }
+       } else {
+               struct __riscv_v_regset_state *regset_data;
+               size_t regset_size;
+               struct iovec iov;
+               int status;
+               int ret;
+
+               /* attach */
+
+               ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid, NULL, NULL));
+               ASSERT_EQ(pid, waitpid(pid, &status, 0));
+               ASSERT_TRUE(WIFSTOPPED(status));
+
+               /* unlock */
+
+               ASSERT_EQ(0, ptrace(PTRACE_POKEDATA, pid, &chld_lock, 0));
+
+               /* resume and wait for the 1st c.ebreak */
+
+               ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
+               ASSERT_EQ(pid, waitpid(pid, &status, 0));
+               ASSERT_TRUE(WIFSTOPPED(status));
+
+               /* read tracee vector csr regs using ptrace GETREGSET */
+
+               regset_size = sizeof(*regset_data) + vlenb * 32;
+               regset_data = calloc(1, regset_size);
+
+               iov.iov_base = regset_data;
+               iov.iov_len = regset_size;
+
+               ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, 
&iov));
+
+               /* verify initial vsetvli settings */
+
+               EXPECT_EQ(regset_data->vlenb, regset_data->vl);
+               EXPECT_EQ(9UL, regset_data->vtype);
+               EXPECT_EQ(vlenb, regset_data->vlenb);
+               EXPECT_EQ(0UL, regset_data->vstart);
+               EXPECT_EQ(0UL, regset_data->vcsr);
+
+               /* apply invalid settings from fixture variants */
+
+               regset_data->vlenb *= variant->vlenb_mul;
+               regset_data->vstart = variant->vstart;
+               regset_data->vtype = variant->vtype;
+               regset_data->vcsr = variant->vcsr;
+               regset_data->vl = variant->vl;
+
+               iov.iov_base = regset_data;
+               iov.iov_len = regset_size;
+
+               errno = 0;
+               ret = ptrace(PTRACE_SETREGSET, pid, NT_RISCV_VECTOR, &iov);
+               ASSERT_EQ(errno, EINVAL);
+               ASSERT_EQ(ret, -1);
+
+               /* cleanup */
+
+               ASSERT_EQ(0, kill(pid, SIGKILL));
+       }
+}
+
 TEST_HARNESS_MAIN
-- 
2.51.0


Reply via email to