When ptrace updates vector CSR registers for a traced process, the
changes may not be immediately visible to the next ptrace operations
due to vector context switch optimizations.

The function 'riscv_v_vstate_save' saves context only if mstatus.VS is
'dirty'. However mstatus.VS of the traced process context may remain
'clean' between two breakpoints, if no vector instructions were executed
between those two breakpoints. In this case the vector context will not
be saved at the second breakpoint. As a result, the second ptrace may
read stale vector CSR values.

Fix this by introducing a TIF flag that forces vector context save on
the next context switch, regardless of mstatus.VS state. Set this
flag on ptrace oprations that modify vector CSR registers.

Signed-off-by: Sergey Matyukevich <[email protected]>
---
 arch/riscv/include/asm/thread_info.h | 2 ++
 arch/riscv/include/asm/vector.h      | 3 +++
 arch/riscv/kernel/process.c          | 2 ++
 arch/riscv/kernel/ptrace.c           | 5 +++++
 4 files changed, 12 insertions(+)

diff --git a/arch/riscv/include/asm/thread_info.h 
b/arch/riscv/include/asm/thread_info.h
index 836d80dd2921..e05e9aa89c43 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -118,7 +118,9 @@ int arch_dup_task_struct(struct task_struct *dst, struct 
task_struct *src);
 
 #define TIF_32BIT                      16      /* compat-mode 32bit process */
 #define TIF_RISCV_V_DEFER_RESTORE      17      /* restore Vector before 
returing to user */
+#define TIF_RISCV_V_FORCE_SAVE         13      /* force Vector context save */
 
 #define _TIF_RISCV_V_DEFER_RESTORE     BIT(TIF_RISCV_V_DEFER_RESTORE)
+#define _TIF_RISCV_V_FORCE_SAVE                BIT(TIF_RISCV_V_FORCE_SAVE)
 
 #endif /* _ASM_RISCV_THREAD_INFO_H */
diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
index b61786d43c20..d3770e13da93 100644
--- a/arch/riscv/include/asm/vector.h
+++ b/arch/riscv/include/asm/vector.h
@@ -370,6 +370,9 @@ static inline void __switch_to_vector(struct task_struct 
*prev,
 {
        struct pt_regs *regs;
 
+       if (test_and_clear_tsk_thread_flag(prev, TIF_RISCV_V_FORCE_SAVE))
+               __riscv_v_vstate_dirty(task_pt_regs(prev));
+
        if (riscv_preempt_v_started(prev)) {
                if (riscv_v_is_on()) {
                        WARN_ON(prev->thread.riscv_v_flags & 
RISCV_V_CTX_DEPTH_MASK);
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 31a392993cb4..47959c55cefb 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -183,6 +183,7 @@ void flush_thread(void)
        kfree(current->thread.vstate.datap);
        memset(&current->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
        clear_tsk_thread_flag(current, TIF_RISCV_V_DEFER_RESTORE);
+       clear_tsk_thread_flag(current, TIF_RISCV_V_FORCE_SAVE);
 #endif
 #ifdef CONFIG_RISCV_ISA_SUPM
        if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
@@ -205,6 +206,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct 
task_struct *src)
        memset(&dst->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
        memset(&dst->thread.kernel_vstate, 0, sizeof(struct 
__riscv_v_ext_state));
        clear_tsk_thread_flag(dst, TIF_RISCV_V_DEFER_RESTORE);
+       clear_tsk_thread_flag(dst, TIF_RISCV_V_FORCE_SAVE);
 
        return 0;
 }
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 906cf1197edc..569f756bef23 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -148,6 +148,11 @@ static int riscv_vr_set(struct task_struct *target,
        if (vstate->vlenb != ptrace_vstate.vlenb)
                return -EINVAL;
 
+       if (vstate->vtype != ptrace_vstate.vtype ||
+           vstate->vcsr != ptrace_vstate.vcsr ||
+           vstate->vl != ptrace_vstate.vl)
+               set_tsk_thread_flag(target, TIF_RISCV_V_FORCE_SAVE);
+
        vstate->vstart = ptrace_vstate.vstart;
        vstate->vl = ptrace_vstate.vl;
        vstate->vtype = ptrace_vstate.vtype;
-- 
2.51.0


Reply via email to