Currently, libatomic's _sync_fetch_and_#op# and 
__sync_val_compare_and_swap methods are not sufficiently strong for the
ATOMIC_SEQ_CST memory model.

This can be shown using the following Herd litmus test:

RISCV LRSC-LIB-CALL

{
0:x6=a; 0:x8=b; 0:x10=c;
1:x6=a; 1:x8=b; 1:x10=c;
}

 P0                  | P1           ;
 ori x1,x0,1         | lw x7,0(x8) ;
 sw x1,0(x10)        | fence rw,rw  ;
 lr.w.aq x7,0(x8)    | lw x5,0(x6)  ;
 ori x7,x0,1         |              ;
 sc.w.rl x3,x7,0(x8) |              ;
 fence rw,w          |              ;
 sw x1,0(x6)         |              ;

exists (1:x7=1 /\ 1:x5=0 /\ b=1)

This patch enforces SEQ_CST by setting the .aqrl bits on the LR and .rl
bits on SC ops.

2022-03-07 Patrick O'Neill <patr...@rivosinc.com>

        PR target/104831
        * atomic.c: Change LR.aq/SC.rl pairs into sequentially
        consistent LR.aqrl/SC.rl pair.

Signed-off-by: Patrick O'Neill <patr...@rivosinc.com>
---
Changelog v2:
 - Weakened LR/SC pairs to be in-line with ISA manual.
 - Updated litmus tests to reflect the relevant leading fences present
   in the RISCV implementation.
---
RISCV LRSC-BUGFIX

{
0:x6=a; 0:x8=b; 0:x10=c;
1:x6=a; 1:x8=b; 1:x10=c;
}

 P0                  | P1           ;
 ori x1,x0,1         | lw x7,0(x8) ;
 sw x1,0(x10)        | fence rw,rw  ;
 lr.w.aq x7,0(x8)    | lw x5,0(x6)  ;
 ori x7,x0,1         |              ;
 sc.w.rl x3,x7,0(x8) |              ;
 fence rw,w          |              ;
 sw x1,0(x6)         |              ;

exists (1:x7=1 /\ 1:x5=0 /\ b=1)
---
Below are other Herd litmus tests used to test the LR.aqrl/SC.aqrl fix.
---
RISCV LRSC-READ

(*
  LR/SC with .aq .rl bits does not allow read operations to be reordered
  within/above it.
*)

{
0:x6=a; 0:x8=b;
1:x6=a; 1:x8=b;
}

 P0                  | P1          ;
 lr.w.aq.rl x7,0(x8) | ori x1,x0,1 ;
 ori x7,x0,1         | sw x1,0(x6) ;
 sc.w.rl x1,x7,0(x8) | fence rw,rw ;
 fence rw,rw         | lw x7,0(x8) ;
 lw x5,0(x6)         |             ;

~exists (0:x5=0 /\ 1:x7=0 /\ b=1)
---
RISCV READ-LRSC

(*
  LR/SC with .aq .rl bits does not allow read operations to be reordered
  within/beneath it.
*)

{
0:x6=a; 0:x8=b;
1:x6=a; 1:x8=b;
}

 P0                  | P1          ;
 lw x5,0(x6)         | ori x1,x0,1 ;
 lr.w.aq.rl x7,0(x8) | sw x1,0(x8) ;
 ori x1,x0,1         | fence rw,rw ;
 sc.w.rl x1,x1,0(x8) | sw x1,0(x6) ;

~exists (0:x5=1 /\ 0:x7=0 /\ b=1)
---
RISCV LRSC-WRITE

(* 
  LR/SC with .aq .rl bits does not allow write operations to be reordered
  within/above it.
*)

{
0:x8=b; 0:x10=c;
1:x8=b; 1:x10=c;
}

 P0                  | P1          ;
 ori x9,x0,1         | lw x9,0(x10);
 lr.w.aq.rl x7,0(x8) | fence rw,rw ;
 ori x7,x0,1         | lw x7,0(x8) ;
 sc.w.rl x1,x7,0(x8) |             ;
 fence rw,w          |             ;
 sw x9,0(x10)        |             ;

~exists (1:x9=1 /\ 1:x7=0 /\ b=1)
---
RISCV WRITE-LRSC

(*
  LR/SC with .aq .rl bits does not allow write operations to be reordered
  within/beneath it.
*)

{
0:x8=b; 0:x10=c;
1:x8=b; 1:x10=c;
}

 P0                  | P1           ;
 ori x1,x0,1         | ori x1,x0,1  ;
 sw x1,0(x10)        | sw x1,0(x8)  ;
 lr.w.aq.rl x7,0(x8) | fence rw,rw  ;
 sc.w.rl x1,x1,0(x8) | lw x9,0(x10) ;

~exists (0:x7=0 /\ 1:x9=0 /\ b=1)
---
 libgcc/config/riscv/atomic.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/libgcc/config/riscv/atomic.c b/libgcc/config/riscv/atomic.c
index 7007e7a20e4..fa0e428963f 100644
--- a/libgcc/config/riscv/atomic.c
+++ b/libgcc/config/riscv/atomic.c
@@ -39,7 +39,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively.  If 
not, see
     unsigned old, tmp1, tmp2;                                          \
                                                                        \
     asm volatile ("1:\n\t"                                             \
-                 "lr.w.aq %[old], %[mem]\n\t"                          \
+                 "lr.w.aqrl %[old], %[mem]\n\t"                        \
                  #insn " %[tmp1], %[old], %[value]\n\t"                \
                  invert                                                \
                  "and %[tmp1], %[tmp1], %[mask]\n\t"                   \
@@ -73,7 +73,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively.  If 
not, see
     unsigned old, tmp1;                                                        
\
                                                                        \
     asm volatile ("1:\n\t"                                             \
-                 "lr.w.aq %[old], %[mem]\n\t"                          \
+                 "lr.w.aqrl %[old], %[mem]\n\t"                                
\
                  "and %[tmp1], %[old], %[mask]\n\t"                    \
                  "bne %[tmp1], %[o], 1f\n\t"                           \
                  "and %[tmp1], %[old], %[not_mask]\n\t"                \
-- 
2.25.1

Reply via email to