Add strict validation for vector csr registers when setting them via
ptrace:
- reject attempts to set reserved bits or invalid field combinations
- enforce strict VL checks against calculated VLMAX values
Vector specs 0.7.1 and 1.0 allow normal applications to set candidate
VL values and read back the hardware-adjusted results, see section 6
for details. Disallow such flexibility in vector ptrace operations
and strictly enforce valid VL input.
The traced process may not update its saved vector context if no vector
instructions execute between breakpoints. So the purpose of the strict
ptrace approach is to make sure that debuggers maintain an accurate view
of the tracee's vector context across multiple halt/resume debug cycles.
Signed-off-by: Sergey Matyukevich <geomatsi@gmail.com>
---
arch/riscv/kernel/ptrace.c | 88 +++++++++++++++++++++++++++++++++++++-
1 file changed, 87 insertions(+), 1 deletion(-)
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 9d203fb84f5e..5d18fe241697 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -124,6 +124,92 @@ static int riscv_vr_get(struct task_struct *target,
return membuf_write(&to, vstate->datap, riscv_v_vsize);
}
+static int invalid_ptrace_v_csr(struct __riscv_v_ext_state *vstate,
+ struct __riscv_v_regset_state *ptrace)
+{
+ unsigned long vsew, vlmul, vfrac, vl;
+ unsigned long elen, vlen;
+ unsigned long sew, lmul;
+ unsigned long reserved;
+
+ vlen = vstate->vlenb * 8;
+ if (vstate->vlenb != ptrace->vlenb)
+ return 1;
+
+ /* do not allow to set vcsr/vxrm/vxsat reserved bits */
+ reserved = ~(CSR_VXSAT_MASK | (CSR_VXRM_MASK << CSR_VXRM_SHIFT));
+ if (ptrace->vcsr & reserved)
+ return 1;
+
+ if (has_vector()) {
+ /* do not allow to set vtype reserved bits and vill bit */
+ reserved = ~(VTYPE_VSEW | VTYPE_VLMUL | VTYPE_VMA | VTYPE_VTA);
+ if (ptrace->vtype & reserved)
+ return 1;
+
+ elen = riscv_has_extension_unlikely(RISCV_ISA_EXT_ZVE64X) ? 64 : 32;
+ vsew = (ptrace->vtype & VTYPE_VSEW) >> VTYPE_VSEW_SHIFT;
+ sew = 8 << vsew;
+
+ if (sew > elen)
+ return 1;
+
+ vfrac = (ptrace->vtype & VTYPE_VLMUL_FRAC);
+ vlmul = (ptrace->vtype & VTYPE_VLMUL);
+
+ /* RVV 1.0 spec 3.4.2: VLMUL(0x4) reserved */
+ if (vlmul == 4)
+ return 1;
+
+ /* RVV 1.0 spec 3.4.2: (LMUL < SEW_min / ELEN) reserved */
+ if (vlmul == 5 && elen == 32)
+ return 1;
+
+ /* for zero vl verify that at least one element is possible */
+ vl = ptrace->vl ? ptrace->vl : 1;
+
+ if (vfrac) {
+ /* integer 1/LMUL: VL =< VLMAX = VLEN / SEW / LMUL */
+ lmul = 2 << (3 - (vlmul - vfrac));
+ if (vlen < vl * sew * lmul)
+ return 1;
+ } else {
+ /* integer LMUL: VL =< VLMAX = LMUL * VLEN / SEW */
+ lmul = 1 << vlmul;
+ if (vl * sew > lmul * vlen)
+ return 1;
+ }
+ }
+
+ if (has_xtheadvector()) {
+ /* do not allow to set vtype reserved bits and vill bit */
+ reserved = ~(VTYPE_VSEW_THEAD | VTYPE_VLMUL_THEAD | VTYPE_VEDIV_THEAD);
+ if (ptrace->vtype & reserved)
+ return 1;
+
+ /*
+ * THead ISA Extension spec chapter 16:
+ * divided element extension ('Zvediv') is not part of XTheadVector
+ */
+ if (ptrace->vtype & VTYPE_VEDIV_THEAD)
+ return 1;
+
+ vsew = (ptrace->vtype & VTYPE_VSEW_THEAD) >> VTYPE_VSEW_THEAD_SHIFT;
+ sew = 8 << vsew;
+
+ vlmul = (ptrace->vtype & VTYPE_VLMUL_THEAD);
+ lmul = 1 << vlmul;
+
+ /* for zero vl verify that at least one element is possible */
+ vl = ptrace->vl ? ptrace->vl : 1;
+
+ if (vl * sew > lmul * vlen)
+ return 1;
+ }
+
+ return 0;
+}
+
static int riscv_vr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
@@ -145,7 +231,7 @@ static int riscv_vr_set(struct task_struct *target,
if (unlikely(ret))
return ret;
- if (vstate->vlenb != ptrace_vstate.vlenb)
+ if (invalid_ptrace_v_csr(vstate, &ptrace_vstate))
return -EINVAL;
vstate->vstart = ptrace_vstate.vstart;
--
2.52.0