Currently, libatomic's _sync_fetch_and_#op# and 
__sync_val_compare_and_swap methods are not sufficiently strong for the
ATOMIC_SEQ_CST memory model.

This can be shown using the following Herd litmus test:

RISCV LRSC-LIB-CALL

{
0:x6=a; 0:x8=b; 0:x10=c;
1:x6=a; 1:x8=b; 1:x10=c;
}

 P0                  | P1           ;
 ori x1,x0,1         | lw x9,0(x10) ;
 sw x1,0(x10)        | fence rw,rw  ;
 lr.w.aq x7,0(x8)    | lw x5,0(x6)  ;
 ori x7,x0,1         |              ;
 sc.w.rl x3,x7,0(x8) |              ;
 sw x1,0(x6)         |              ;

exists (1:x9=1 /\ 1:x5=0 /\ b=1)

This patch enforces SEQ_CST by setting the .aqrl bits on the LR and SC
ops.

2022-03-07 Patrick O'Neill <patr...@rivosinc.com>

        PR target/104831
        * atomic.c: Change LR.aq/SC.rl pairs into sequentially
        consistent LR.aqrl/SC.aqrl pair.

Signed-off-by: Patrick O'Neill <patr...@rivosinc.com>
---
RISCV LRSC-BUGFIX

{
0:x6=a; 0:x8=b; 0:x10=c;
1:x6=a; 1:x8=b; 1:x10=c;
}

 P0                    | P1           ;
 ori x1,x0,1           | lw x9,0(x10) ;
 sw x1,0(x10)          | fence rw,rw  ;
 lr.w.aqrl x7,0(x8)    | lw x5,0(x6)  ;
 ori x7,x0,1           |              ;
 sc.w.aqrl x3,x7,0(x8) |              ;
 sw x1,0(x6)           |              ;

~exists (1:x9=1 /\ 1:x5=0 /\ b=1)
---
Below are other Herd litmus tests used to test the LR.aqrl/SC.aqrl fix.
---
RISCV LRSC-READ

(*
  LR/SC with .aq .rl bits does not allow read operations to be reordered
  within/above it.
*)

{
0:x6=a; 0:x8=b;
1:x6=a; 1:x8=b;
}

 P0                     | P1          ;
 lr.w.aq.rl x7,0(x8)    | ori x1,x0,1 ;
 ori x7,x0,1            | sw x1,0(x6) ;
 sc.w.aq.rl x1,x7,0(x8) | fence rw,rw ;
 lw x5,0(x6)            | lw x7,0(x8) ;

~exists (0:x5=0 /\ 1:x7=0 /\ b=1)
---
RISCV READ-LRSC

(*
  LR/SC with .aq .rl bits does not allow read operations to be reordered
  within/beneath it.
*)

{
0:x6=a; 0:x8=b;
1:x6=a; 1:x8=b;
}

 P0                     | P1          ;
 lw x5,0(x6)            | ori x1,x0,1 ;
 lr.w.aq.rl x7,0(x8)    | sw x1,0(x8) ;
 ori x1,x0,1            | fence rw,rw ;
 sc.w.aq.rl x1,x1,0(x8) | sw x1,0(x6) ;

~exists (0:x5=1 /\ 0:x7=0 /\ b=1)
---
RISCV LRSC-WRITE

(* 
  LR/SC with .aq .rl bits does not allow write operations to be reordered
  within/above it.
*)

{
0:x8=b; 0:x10=c;
1:x8=b; 1:x10=c;
}

 P0                     | P1          ;
 ori x9,x0,1            | lw x9,0(x10);
 lr.w.aq.rl x7,0(x8)    | fence rw,rw ;
 ori x7,x0,1            | lw x7,0(x8) ;
 sc.w.aq.rl x1,x7,0(x8) |             ;
 sw x9,0(x10)           |             ;

~exists (1:x9=1 /\ 1:x7=0 /\ b=1)
---
RISCV WRITE-LRSC

(*
  LR/SC with .aq .rl bits does not allow write operations to be reordered
  within/beneath it.
*)

{
0:x8=b; 0:x10=c;
1:x8=b; 1:x10=c;
}

 P0                     | P1           ;
 ori x1,x0,1            | ori x1,x0,1  ;
 sw x1,0(x10)           | sw x1,0(x8)  ;
 lr.w.aq.rl x7,0(x8)    | fence rw,rw  ;
 sc.w.aq.rl x1,x1,0(x8) | lw x9,0(x10) ;

~exists (0:x7=0 /\ 1:x9=0 /\ b=1)
---
 libgcc/config/riscv/atomic.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/libgcc/config/riscv/atomic.c b/libgcc/config/riscv/atomic.c
index 7007e7a20e4..0c85a6d00ea 100644
--- a/libgcc/config/riscv/atomic.c
+++ b/libgcc/config/riscv/atomic.c
@@ -39,13 +39,13 @@ see the files COPYING3 and COPYING.RUNTIME respectively.  
If not, see
     unsigned old, tmp1, tmp2;                                          \
                                                                        \
     asm volatile ("1:\n\t"                                             \
-                 "lr.w.aq %[old], %[mem]\n\t"                          \
+                 "lr.w.aqrl %[old], %[mem]\n\t"                        \
                  #insn " %[tmp1], %[old], %[value]\n\t"                \
                  invert                                                \
                  "and %[tmp1], %[tmp1], %[mask]\n\t"                   \
                  "and %[tmp2], %[old], %[not_mask]\n\t"                \
                  "or %[tmp2], %[tmp2], %[tmp1]\n\t"                    \
-                 "sc.w.rl %[tmp1], %[tmp2], %[mem]\n\t"                \
+                 "sc.w.aqrl %[tmp1], %[tmp2], %[mem]\n\t"              \
                  "bnez %[tmp1], 1b"                                    \
                  : [old] "=&r" (old),                                  \
                    [mem] "+A" (*(volatile unsigned*) aligned_addr),    \
@@ -73,12 +73,12 @@ see the files COPYING3 and COPYING.RUNTIME respectively.  
If not, see
     unsigned old, tmp1;                                                        
\
                                                                        \
     asm volatile ("1:\n\t"                                             \
-                 "lr.w.aq %[old], %[mem]\n\t"                          \
+                 "lr.w.aqrl %[old], %[mem]\n\t"                                
\
                  "and %[tmp1], %[old], %[mask]\n\t"                    \
                  "bne %[tmp1], %[o], 1f\n\t"                           \
                  "and %[tmp1], %[old], %[not_mask]\n\t"                \
                  "or %[tmp1], %[tmp1], %[n]\n\t"                       \
-                 "sc.w.rl %[tmp1], %[tmp1], %[mem]\n\t"                \
+                 "sc.w.aqrl %[tmp1], %[tmp1], %[mem]\n\t"              \
                  "bnez %[tmp1], 1b\n\t"                                \
                  "1:"                                                  \
                  : [old] "=&r" (old),                                  \
-- 
2.25.1

Reply via email to