---
 .../bspsupport/ppc_exc_async_normal.S              | 25 ++++++++++++++++++++
 cpukit/score/cpu/arm/arm_exc_interrupt.S           | 27 ++++++++++++++++++++++
 2 files changed, 52 insertions(+)

diff --git 
a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S 
b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S
index 59e621f..6a1ea34 100644
--- a/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S
+++ b/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_async_normal.S
@@ -423,6 +423,31 @@ thread_dispatching_done:
 #endif
        PPC_GPR_LOAD    HANDLER_REGISTER, HANDLER_OFFSET(r1)
 
+       /*
+        * We must clear reservations here, since otherwise compare-and-swap
+        * atomic operations with interrupts enabled may yield wrong results.
+        * A compare-and-swap atomic operation is generated by the compiler
+        * like this:
+        *
+        *   .L1:
+        *     lwarx  r9, r0, r3
+        *     cmpw   r9, r4
+        *     bne-   .L2
+        *     stwcx. r5, r0, r3
+        *     bne-   .L1
+        *   .L2:
+        *
+        * Consider the following scenario.  A thread is interrupted right
+        * before the stwcx.  The interrupt updates the value using a
+        * compare-and-swap sequence.  Everything is fine up to this point.
+        * The interrupt performs now a compare-and-swap sequence which fails
+        * with a branch to .L2.  The current processor has now a reservation.
+        * The interrupt returns without further stwcx.  The thread updates the
+        * value using the unrelated reservation of the interrupt.
+        */
+       li      SCRATCH_0_REGISTER, HANDLER_OFFSET
+       stwcx.  SCRATCH_0_REGISTER, r1, SCRATCH_0_REGISTER
+
        /* Restore SRR0, SRR1, CR, CTR, XER, and LR */
        mtsrr0  SCRATCH_0_REGISTER
        PPC_GPR_LOAD    SCRATCH_0_REGISTER, SCRATCH_0_OFFSET(r1)
diff --git a/cpukit/score/cpu/arm/arm_exc_interrupt.S 
b/cpukit/score/cpu/arm/arm_exc_interrupt.S
index 7930c32..fcb1510 100644
--- a/cpukit/score/cpu/arm/arm_exc_interrupt.S
+++ b/cpukit/score/cpu/arm/arm_exc_interrupt.S
@@ -209,6 +209,33 @@ thread_dispatch_done:
        /* Restore EXCHANGE_LR and EXCHANGE_SPSR registers from exchange area */
        ldmia   sp!, {EXCHANGE_LR, EXCHANGE_SPSR}
 
+#ifdef ARM_MULTILIB_HAS_LOAD_STORE_EXCLUSIVE
+       /*
+        * We must clear reservations here, since otherwise compare-and-swap
+        * atomic operations with interrupts enabled may yield wrong results.
+        * A compare-and-swap atomic operation is generated by the compiler
+        * like this:
+        *
+        *   .L1:
+        *     ldrex r1, [r0]
+        *     cmp   r1, r3
+        *     bne   .L2
+        *     strex r3, r2, [r0]
+        *     cmp   r3, #0
+        *     bne   .L1
+        *   .L2:
+        *
+        * Consider the following scenario.  A thread is interrupted right
+        * before the strex.  The interrupt updates the value using a
+        * compare-and-swap sequence.  Everything is fine up to this point.
+        * The interrupt performs now a compare-and-swap sequence which fails
+        * with a branch to .L2.  The current processor has now a reservation.
+        * The interrupt returns without further strex.  The thread updates the
+        * value using the unrelated reservation of the interrupt.
+        */
+       clrex
+#endif
+
        /* Return from interrupt */
        subs    pc, lr, #4
 
-- 
1.8.4.5

_______________________________________________
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel

Reply via email to