[PATCH] sparc: Fix stack corruption

2023-09-20 Thread Sebastian Huber
Fix a potential stack corruption in uniprocessor configurations during
start multitasking .

The system initialization uses the interrupt stack.  A first level
interrupt shall never interrupt a context which uses the interrupt
stack.  Such a use would lead to stack corruption and undefined system
behaviour.  Unfortunately, in uniprocessor configurations this was the
case.  Multiprocessing is started using _CPU_Context_restore().  The
caller of this function (_Thread_Start_multitasking()) uses the
interrupt stack.  Later we have in cpukit/score/cpu/sparc/cpu_asm.S:

mov %g1, %psr ! restore status register and
  !  ENABLE TRAPS 

ld  [%o1 + G5_OFFSET], %g5! restore the global registers
ld  [%o1 + G7_OFFSET], %g7

! Load thread specific ISR dispatch prevention flag
ld  [%o1 + ISR_DISPATCH_DISABLE_STACK_OFFSET], %o2
! Store it to memory later to use the cycles

ldd [%o1 + L0_OFFSET], %l0! restore the local registers
ldd [%o1 + L2_OFFSET], %l2
ldd [%o1 + L4_OFFSET], %l4
ldd [%o1 + L6_OFFSET], %l6

! Now restore thread specific ISR dispatch prevention flag
st  %o2, [%g6 + PER_CPU_ISR_DISPATCH_DISABLE]

ldd [%o1 + I0_OFFSET], %i0! restore the input registers
ldd [%o1 + I2_OFFSET], %i2
ldd [%o1 + I4_OFFSET], %i4
ldd [%o1 + I6_FP_OFFSET], %i6

ldd [%o1 + O6_SP_OFFSET], %o6 ! restore the output registers

Between the ENABLE TRAPS and the restore of the output registers, we
still use the stack of the caller and interrupts may be enabled.  If an
interrupt happens in this code block, the interrupt stack is
concurrently used which may lead to a crash.

Fix this by adding a new function _SPARC_Start_multiprocessing() for
uniprocessor configurations.  This function first sets the stack pointer
to use the stack of the heir thread.

Close #4955.
---
 cpukit/score/cpu/sparc/cpu_asm.S  | 29 ++-
 .../score/cpu/sparc/include/rtems/score/cpu.h | 19 
 2 files changed, 47 insertions(+), 1 deletion(-)

diff --git a/cpukit/score/cpu/sparc/cpu_asm.S b/cpukit/score/cpu/sparc/cpu_asm.S
index 287c2c4cd9..fd7186b499 100644
--- a/cpukit/score/cpu/sparc/cpu_asm.S
+++ b/cpukit/score/cpu/sparc/cpu_asm.S
@@ -246,6 +246,14 @@ done_flushing:
 mov %g1, %psr ! restore status register and
   !  ENABLE TRAPS 
 
+/*
+ * WARNING: This code does not run with the restored stack pointer.  In
+ * SMP configurations, it uses a processor-specific stack.  In
+ * uniprocessor configurations, it uses the stack of the caller.  In
+ * this case, the caller shall ensure that it is not the interrupt
+ * stack (which is also the system initialization stack).
+ */
+
 ld  [%o1 + G5_OFFSET], %g5! restore the global registers
 ld  [%o1 + G7_OFFSET], %g7
 
@@ -266,7 +274,9 @@ done_flushing:
 ldd [%o1 + I4_OFFSET], %i4
 ldd [%o1 + I6_FP_OFFSET], %i6
 
-ldd [%o1 + O6_SP_OFFSET], %o6 ! restore the output registers
+ldd [%o1 + O6_SP_OFFSET], %o6 ! restore the non-volatile output
+  ! registers (stack pointer,
+  ! link register)
 
 jmp %o7 + 8   ! return
 nop   ! delay slot
@@ -325,6 +335,23 @@ SYM(_CPU_Context_restore):
 ba  SYM(_CPU_Context_restore_heir)
 mov %i0, %o1  ! in the delay slot
 
+#if !defined(RTEMS_SMP)
+.align 4
+PUBLIC(_SPARC_Start_multitasking)
+SYM(_SPARC_Start_multitasking):
+/*
+ * Restore the stack pointer right now, so that the window flushing and
+ * interrupts during _CPU_Context_restore_heir() use the stack of the
+ * heir thread.  This is crucial for the interrupt handling to prevent
+ * a concurrent use of the interrupt stack (which is also the system
+ * initialization stack).
+ */
+ld  [%o0 + O6_SP_OFFSET], %o6
+
+ba  SYM(_CPU_Context_restore)
+ nop
+#endif
+
 /*
  *  void _SPARC_Interrupt_trap()
  *
diff --git a/cpukit/score/cpu/sparc/include/rtems/score/cpu.h 
b/cpukit/score/cpu/sparc/include/rtems/score/cpu.h
index 2021b108db..a21cef371f 100644
--- a/cpukit/score/cpu/sparc/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/sparc/include/rtems/score/cpu.h
@@ -993,6 +993,25 @@ RTEMS_NO_RETURN void _CPU_Context_switch_no_return(
  */
 RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context );
 
+#if !defined(RTEMS_SMP)
+/**
+ * @brief Starts multitasking in uniprocessor configura

[PATCH 2/2] cpukit/jffs2: Use unprotected chain calls

2023-09-20 Thread Kinsey Moore
Use unprotected chain calls for delayed write workqueues since these
calls are either protected by locks or only operate on local chains and
are never accessed from interrupt context.

Updates #4956
---
 cpukit/libfs/src/jffs2/src/fs-rtems.c | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/cpukit/libfs/src/jffs2/src/fs-rtems.c 
b/cpukit/libfs/src/jffs2/src/fs-rtems.c
index 36bada9a98..644c7385c5 100644
--- a/cpukit/libfs/src/jffs2/src/fs-rtems.c
+++ b/cpukit/libfs/src/jffs2/src/fs-rtems.c
@@ -1246,7 +1246,7 @@ void jffs2_queue_delayed_work(struct delayed_work *work, 
int delay_ms)
mutex_lock(&delayed_work_mutex);
if (rtems_chain_is_node_off_chain(&work->work.node)) {
work->execution_time = rtems_clock_get_uptime_nanoseconds() + 
delay_ms*100;
-   rtems_chain_append(&delayed_work_chain, &work->work.node);
+   rtems_chain_append_unprotected(&delayed_work_chain, 
&work->work.node);
}
mutex_unlock(&delayed_work_mutex);
 }
@@ -1267,7 +1267,7 @@ static void jffs2_remove_delayed_work(struct delayed_work 
*dwork)
work = (struct delayed_work*) node;
rtems_chain_node* next_node = rtems_chain_next(node);
if (work == dwork) {
-   rtems_chain_extract(node);
+   rtems_chain_extract_unprotected(node);
rtems_chain_set_off_chain(node);
mutex_unlock(&delayed_work_mutex);
return;
@@ -1296,8 +1296,8 @@ static void process_delayed_work(void)
work = (struct delayed_work*) node;
rtems_chain_node* next_node = rtems_chain_next(node);
if (rtems_clock_get_uptime_nanoseconds() >= 
work->execution_time) {
-   rtems_chain_extract(node);
-   rtems_chain_append(&process_work_chain, node);
+   rtems_chain_extract_unprotected(node);
+   rtems_chain_append_unprotected(&process_work_chain, 
node);
}
node = next_node;
}
@@ -1315,7 +1315,7 @@ static void process_delayed_work(void)
 #ifdef RTEMS_DEBUG
mutex_lock(&delayed_work_mutex);
 #endif
-   rtems_chain_extract(node);
+   rtems_chain_extract_unprotected(node);
 #ifdef RTEMS_DEBUG
node->next = node;
mutex_unlock(&delayed_work_mutex);
-- 
2.39.2

___
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel


[PATCH 1/2] cpukit/jffs2: Set extracted nodes off-chain

2023-09-20 Thread Kinsey Moore
The code here was developed under RTEMS_DEBUG=true which automatically
sets nodes off-chain upon extraction. Extraction does not set nodes
off-chain when not running under RTEMS_DEBUG=true. This code relies on
this behavior, so set nodes off-chain as needed.

Updates #4956
---
 cpukit/libfs/src/jffs2/src/fs-rtems.c | 15 +++
 1 file changed, 15 insertions(+)

diff --git a/cpukit/libfs/src/jffs2/src/fs-rtems.c 
b/cpukit/libfs/src/jffs2/src/fs-rtems.c
index 1a677c9772..36bada9a98 100644
--- a/cpukit/libfs/src/jffs2/src/fs-rtems.c
+++ b/cpukit/libfs/src/jffs2/src/fs-rtems.c
@@ -1268,6 +1268,7 @@ static void jffs2_remove_delayed_work(struct delayed_work 
*dwork)
rtems_chain_node* next_node = rtems_chain_next(node);
if (work == dwork) {
rtems_chain_extract(node);
+   rtems_chain_set_off_chain(node);
mutex_unlock(&delayed_work_mutex);
return;
}
@@ -1306,8 +1307,22 @@ static void process_delayed_work(void)
while (!rtems_chain_is_tail(&process_work_chain, node)) {
work = (struct delayed_work*) node;
rtems_chain_node* next_node = rtems_chain_next(node);
+
+   /*
+* Don't leave extracted node exposed to other operations
+* under RTEMS_DEBUG
+*/
+#ifdef RTEMS_DEBUG
+   mutex_lock(&delayed_work_mutex);
+#endif
rtems_chain_extract(node);
+#ifdef RTEMS_DEBUG
+   node->next = node;
+   mutex_unlock(&delayed_work_mutex);
+#endif
+
work->callback(&work->work);
+   rtems_chain_set_off_chain(node);
node = next_node;
}
 }
-- 
2.39.2

___
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel