Add 4 litmus tests for BPF ringbuf implementation, divided into two different
use cases.

First, two unbounded case, one with 1 producer and another with
2 producers, single consumer. All reservations are supposed to succeed.

Second, bounded case with only 1 record allowed in ring buffer at any given
time. Here failures to reserve space are expected. Again, 1- and 2- producer
cases, single consumer, are validated.

Just for the fun of it, I also wrote a 3-producer cases, it took *16 hours* to
validate, but came back successful as well. I'm not including it in this
patch, because it's not practical to run it. See output for all included
4 cases and one 3-producer one with bounded use case.

Each litmust test implements producer/consumer protocol for BPF ring buffer
implementation found in kernel/bpf/ringbuf.c. Due to limitations, all records
are assumed equal-sized and producer/consumer counters are incremented by 1.
This doesn't change the correctness of the algorithm, though.

Verification results:

$ herd7 -unroll 0 -conf linux-kernel.cfg mpsc-rb+1p1c+bounded.litmus
Test mpsc-rb+1p1c+bounded Allowed
States 2
0:rFail=0; 1:rFail=0; cx=0; dropped=0; len1=1; px=1;
0:rFail=0; 1:rFail=0; cx=1; dropped=0; len1=1; px=1;
Ok
Witnesses
Positive: 3 Negative: 0
Condition exists (0:rFail=0 /\ 1:rFail=0 /\ dropped=0 /\ px=1 /\ len1=1 /\ 
(cx=0 \/ cx=1))
Observation mpsc-rb+1p1c+bounded Always 3 0
Time mpsc-rb+1p1c+bounded 0.03
Hash=554e6af9bfde3d1bfbb2c07bb0e283ad

$ herd7 -unroll 0 -conf linux-kernel.cfg mpsc-rb+2p1c+bounded.litmus
Test mpsc-rb+2p1c+bounded Allowed
States 4
0:rFail=0; 1:rFail=0; 2:rFail=0; cx=0; dropped=1; len1=1; px=1;
0:rFail=0; 1:rFail=0; 2:rFail=0; cx=1; dropped=0; len1=1; px=2;
0:rFail=0; 1:rFail=0; 2:rFail=0; cx=1; dropped=1; len1=1; px=1;
0:rFail=0; 1:rFail=0; 2:rFail=0; cx=2; dropped=0; len1=1; px=2;
Ok
Witnesses
Positive: 40 Negative: 0
Condition exists (0:rFail=0 /\ 1:rFail=0 /\ 2:rFail=0 /\ len1=1 /\ (dropped=0 
/\ px=2 /\ (cx=1 \/ cx=2) \/ dropped=1 /\ px=1 /\ (cx=0 \/ cx=1)))
Observation mpsc-rb+2p1c+bounded Always 40 0
Time mpsc-rb+2p1c+bounded 114.32
Hash=fa7c38edbf482a6605d6b2031c310bdc

$ herd7 -unroll 0 -conf linux-kernel.cfg mpsc-rb+1p1c+unbound.litmus
Test mpsc-rb+1p1c+unbound Allowed
States 2
0:rFail=0; 1:rFail=0; cx=0; len1=1; px=1;
0:rFail=0; 1:rFail=0; cx=1; len1=1; px=1;
Ok
Witnesses
Positive: 4 Negative: 0
Condition exists (0:rFail=0 /\ 1:rFail=0 /\ px=1 /\ len1=1 /\ (cx=0 \/ cx=1))
Observation mpsc-rb+1p1c+unbound Always 4 0
Time mpsc-rb+1p1c+unbound 0.02
Hash=0798c82c96356e6eb25edbcd8561dfcf

$ herd7 -unroll 0 -conf linux-kernel.cfg mpsc-rb+2p1c+unbound.litmus
Test mpsc-rb+2p1c+unbound Allowed
States 3
0:rFail=0; 1:rFail=0; 2:rFail=0; cx=0; len1=1; len2=1; px=2;
0:rFail=0; 1:rFail=0; 2:rFail=0; cx=1; len1=1; len2=1; px=2;
0:rFail=0; 1:rFail=0; 2:rFail=0; cx=2; len1=1; len2=1; px=2;
Ok
Witnesses
Positive: 78 Negative: 0
Condition exists (0:rFail=0 /\ 1:rFail=0 /\ 2:rFail=0 /\ px=2 /\ len1=1 /\ 
len2=1 /\ (cx=0 \/ cx=1 \/ cx=2))
Observation mpsc-rb+2p1c+unbound Always 78 0
Time mpsc-rb+2p1c+unbound 37.85
Hash=a1a73c1810ff3eb91f0d054f232399a1

$ herd7 -unroll 0 -conf linux-kernel.cfg mpsc-rb+3p1c+bounded.litmus
Test mpsc+ringbuf-spinlock Allowed
States 5
0:rFail=0; 1:rFail=0; 2:rFail=0; 3:rFail=0; cx=0; len1=1; len2=1; px=2;
0:rFail=0; 1:rFail=0; 2:rFail=0; 3:rFail=0; cx=1; len1=1; len2=1; px=2;
0:rFail=0; 1:rFail=0; 2:rFail=0; 3:rFail=0; cx=1; len1=1; len2=1; px=3;
0:rFail=0; 1:rFail=0; 2:rFail=0; 3:rFail=0; cx=2; len1=1; len2=1; px=2;
0:rFail=0; 1:rFail=0; 2:rFail=0; 3:rFail=0; cx=2; len1=1; len2=1; px=3;
Ok
Witnesses
Positive: 558 Negative: 0
Condition exists (0:rFail=0 /\ 1:rFail=0 /\ 2:rFail=0 /\ 3:rFail=0 /\ len1=1 /\ 
len2=1 /\ (px=2 /\ (cx=0 \/ cx=1 \/ cx=2) \/ px=3 /\ (cx=1 \/ cx=2)))
Observation mpsc+ringbuf-spinlock Always 558 0
Time mpsc+ringbuf-spinlock 57487.24
Hash=133977dba930d167b4e1b4a6923d5687

Cc: Paul E. McKenney <paul...@kernel.org>
Signed-off-by: Andrii Nakryiko <andr...@fb.com>
---
 .../litmus-tests/mpsc-rb+1p1c+bounded.litmus  |  92 +++++++++++
 .../litmus-tests/mpsc-rb+1p1c+unbound.litmus  |  83 ++++++++++
 .../litmus-tests/mpsc-rb+2p1c+bounded.litmus  | 152 ++++++++++++++++++
 .../litmus-tests/mpsc-rb+2p1c+unbound.litmus  | 137 ++++++++++++++++
 4 files changed, 464 insertions(+)
 create mode 100644 tools/memory-model/litmus-tests/mpsc-rb+1p1c+bounded.litmus
 create mode 100644 tools/memory-model/litmus-tests/mpsc-rb+1p1c+unbound.litmus
 create mode 100644 tools/memory-model/litmus-tests/mpsc-rb+2p1c+bounded.litmus
 create mode 100644 tools/memory-model/litmus-tests/mpsc-rb+2p1c+unbound.litmus

diff --git a/tools/memory-model/litmus-tests/mpsc-rb+1p1c+bounded.litmus 
b/tools/memory-model/litmus-tests/mpsc-rb+1p1c+bounded.litmus
new file mode 100644
index 000000000000..c97b2e1b56f6
--- /dev/null
+++ b/tools/memory-model/litmus-tests/mpsc-rb+1p1c+bounded.litmus
@@ -0,0 +1,92 @@
+C mpsc-rb+1p1c+bounded
+
+(*
+ * Result: Always
+ *
+ * This litmus test validates BPF ring buffer implementation under the
+ * following assumptions:
+ * - 1 producer;
+ * - 1 consumer;
+ * - ring buffer has capacity for only 1 record.
+ *
+ * Expectations:
+ * - 1 record pushed into ring buffer;
+ * - 0 or 1 element is consumed.
+ * - no failures.
+ *)
+
+{
+       max_len = 1;
+       len1 = 0;
+       px = 0;
+       cx = 0;
+       dropped = 0;
+}
+
+P0(int *len1, int *cx, int *px)
+{
+       int *rLenPtr;
+       int rLen;
+       int rPx;
+       int rCx;
+       int rFail;
+
+       rFail = 0;
+       rCx = READ_ONCE(*cx);
+
+       rPx = smp_load_acquire(px);
+       if (rCx < rPx) {
+               if (rCx == 0)
+                       rLenPtr = len1;
+               else
+                       rFail = 1;
+
+               rLen = READ_ONCE(*rLenPtr);
+               if (rLen == 0) {
+                       rFail = 1;
+               } else if (rLen == 1) {
+                       rCx = rCx + 1;
+                       WRITE_ONCE(*cx, rCx);
+               }
+       }
+}
+
+P1(int *len1, spinlock_t *rb_lock, int *px, int *cx, int *dropped, int 
*max_len)
+{
+       int rPx;
+       int rCx;
+       int rFail;
+       int *rLenPtr;
+
+       rFail = 0;
+       rCx = READ_ONCE(*cx);
+
+       spin_lock(rb_lock);
+
+       rPx = *px;
+       if (rPx - rCx >= *max_len) {
+               atomic_inc(dropped);
+               spin_unlock(rb_lock);
+       } else {
+               if (rPx == 0)
+                       rLenPtr = len1;
+               else
+                       rFail = 1;
+
+               WRITE_ONCE(*rLenPtr, -1);
+               smp_wmb();
+               WRITE_ONCE(*px, rPx + 1);
+
+               spin_unlock(rb_lock);
+
+               WRITE_ONCE(*rLenPtr, 1);
+       }
+}
+
+exists (
+       0:rFail=0 /\ 1:rFail=0
+       /\
+       (
+               (dropped=0 /\ px=1 /\ len1=1 /\ (cx=0 \/ cx=1))
+       )
+)
diff --git a/tools/memory-model/litmus-tests/mpsc-rb+1p1c+unbound.litmus 
b/tools/memory-model/litmus-tests/mpsc-rb+1p1c+unbound.litmus
new file mode 100644
index 000000000000..b1e25ec6275e
--- /dev/null
+++ b/tools/memory-model/litmus-tests/mpsc-rb+1p1c+unbound.litmus
@@ -0,0 +1,83 @@
+C mpsc-rb+1p1c+unbound
+
+(*
+ * Result: Always
+ *
+ * This litmus test validates BPF ring buffer implementation under the
+ * following assumptions:
+ * - 1 producer;
+ * - 1 consumer;
+ * - ring buffer capacity is unbounded.
+ *
+ * Expectations:
+ * - 1 record pushed into ring buffer;
+ * - 0 or 1 element is consumed.
+ * - no failures.
+ *)
+
+{
+       len1 = 0;
+       px = 0;
+       cx = 0;
+}
+
+P0(int *len1, int *cx, int *px)
+{
+       int *rLenPtr;
+       int rLen;
+       int rPx;
+       int rCx;
+       int rFail;
+
+       rFail = 0;
+       rCx = READ_ONCE(*cx);
+
+       rPx = smp_load_acquire(px);
+       if (rCx < rPx) {
+               if (rCx == 0)
+                       rLenPtr = len1;
+               else
+                       rFail = 1;
+
+               rLen = READ_ONCE(*rLenPtr);
+               if (rLen == 0) {
+                       rFail = 1;
+               } else if (rLen == 1) {
+                       rCx = rCx + 1;
+                       WRITE_ONCE(*cx, rCx);
+               }
+       }
+}
+
+P1(int *len1, spinlock_t *rb_lock, int *px, int *cx)
+{
+       int rPx;
+       int rCx;
+       int rFail;
+       int *rLenPtr;
+
+       rFail = 0;
+       rCx = READ_ONCE(*cx);
+
+       spin_lock(rb_lock);
+
+       rPx = *px;
+       if (rPx == 0)
+               rLenPtr = len1;
+       else
+               rFail = 1;
+
+       WRITE_ONCE(*rLenPtr, -1);
+       smp_wmb();
+       WRITE_ONCE(*px, rPx + 1);
+
+       spin_unlock(rb_lock);
+
+       WRITE_ONCE(*rLenPtr, 1);
+}
+
+exists (
+       0:rFail=0 /\ 1:rFail=0
+       /\ px=1 /\ len1=1
+       /\ (cx=0 \/ cx=1)
+)
diff --git a/tools/memory-model/litmus-tests/mpsc-rb+2p1c+bounded.litmus 
b/tools/memory-model/litmus-tests/mpsc-rb+2p1c+bounded.litmus
new file mode 100644
index 000000000000..0707aa9ad307
--- /dev/null
+++ b/tools/memory-model/litmus-tests/mpsc-rb+2p1c+bounded.litmus
@@ -0,0 +1,152 @@
+C mpsc-rb+2p1c+bounded
+
+(*
+ * Result: Always
+ *
+ * This litmus test validates BPF ring buffer implementation under the
+ * following assumptions:
+ * - 2 identical producers;
+ * - 1 consumer;
+ * - ring buffer has capacity for only 1 record.
+ *
+ * Expectations:
+ * - either 1 or 2 records are pushed into ring buffer;
+ * - 0, 1, or 2 elements are consumed by consumer;
+ * - appropriate number of dropped records is recorded to satisfy ring buffer
+ *   size bounds;
+ * - no failures.
+ *)
+
+{
+       max_len = 1;
+       len1 = 0;
+       px = 0;
+       cx = 0;
+       dropped = 0;
+}
+
+P0(int *len1, int *cx, int *px)
+{
+       int *rLenPtr;
+       int rLen;
+       int rPx;
+       int rCx;
+       int rFail;
+
+       rFail = 0;
+       rCx = READ_ONCE(*cx);
+
+       rPx = smp_load_acquire(px);
+       if (rCx < rPx) {
+               if (rCx == 0)
+                       rLenPtr = len1;
+               else if (rCx == 1)
+                       rLenPtr = len1;
+               else
+                       rFail = 1;
+
+               rLen = READ_ONCE(*rLenPtr);
+               if (rLen == 0) {
+                       rFail = 1;
+               } else if (rLen == 1) {
+                       rCx = rCx + 1;
+                       WRITE_ONCE(*cx, rCx);
+               }
+       }
+
+       rPx = smp_load_acquire(px);
+       if (rCx < rPx) {
+               if (rCx == 0)
+                       rLenPtr = len1;
+               else if (rCx == 1)
+                       rLenPtr = len1;
+               else
+                       rFail = 1;
+
+               rLen = READ_ONCE(*rLenPtr);
+               if (rLen == 0) {
+                       rFail = 1;
+               } else if (rLen == 1) {
+                       rCx = rCx + 1;
+                       WRITE_ONCE(*cx, rCx);
+               }
+       }
+}
+
+P1(int *len1, spinlock_t *rb_lock, int *px, int *cx, int *dropped, int 
*max_len)
+{
+       int rPx;
+       int rCx;
+       int rFail;
+       int *rLenPtr;
+
+       rFail = 0;
+       rCx = READ_ONCE(*cx);
+
+       spin_lock(rb_lock);
+
+       rPx = *px;
+       if (rPx - rCx >= *max_len) {
+               atomic_inc(dropped);
+               spin_unlock(rb_lock);
+       } else {
+               if (rPx == 0)
+                       rLenPtr = len1;
+               else if (rPx == 1)
+                       rLenPtr = len1;
+               else
+                       rFail = 1;
+
+               WRITE_ONCE(*rLenPtr, -1);
+               smp_wmb();
+               WRITE_ONCE(*px, rPx + 1);
+
+               spin_unlock(rb_lock);
+
+               WRITE_ONCE(*rLenPtr, 1);
+       }
+}
+
+P2(int *len1, spinlock_t *rb_lock, int *px, int *cx, int *dropped, int 
*max_len)
+{
+       int rPx;
+       int rCx;
+       int rFail;
+       int *rLenPtr;
+
+       rFail = 0;
+       rCx = READ_ONCE(*cx);
+
+       spin_lock(rb_lock);
+
+       rPx = *px;
+       if (rPx - rCx >= *max_len) {
+               atomic_inc(dropped);
+               spin_unlock(rb_lock);
+       } else {
+               if (rPx == 0)
+                       rLenPtr = len1;
+               else if (rPx == 1)
+                       rLenPtr = len1;
+               else
+                       rFail = 1;
+
+               WRITE_ONCE(*rLenPtr, -1);
+               smp_wmb();
+               WRITE_ONCE(*px, rPx + 1);
+
+               spin_unlock(rb_lock);
+
+               WRITE_ONCE(*rLenPtr, 1);
+       }
+}
+
+exists (
+       0:rFail=0 /\ 1:rFail=0 /\ 2:rFail=0 /\ len1=1
+       /\
+       (
+               (dropped = 0 /\ px=2 /\ (cx=1 \/ cx=2))
+               \/
+               (dropped = 1 /\ px=1 /\ (cx=0 \/ cx=1))
+       )
+)
diff --git a/tools/memory-model/litmus-tests/mpsc-rb+2p1c+unbound.litmus 
b/tools/memory-model/litmus-tests/mpsc-rb+2p1c+unbound.litmus
new file mode 100644
index 000000000000..f334ffece324
--- /dev/null
+++ b/tools/memory-model/litmus-tests/mpsc-rb+2p1c+unbound.litmus
@@ -0,0 +1,137 @@
+C mpsc-rb+2p1c+unbound
+
+(*
+ * Result: Always
+ *
+ * This litmus test validates BPF ring buffer implementation under the
+ * following assumptions:
+ * - 2 identical producers;
+ * - 1 consumer;
+ * - ring buffer capacity is unbounded.
+ *
+ * Expectations:
+ * - 2 records pushed into ring buffer;
+ * - 0, 1, or 2 elements are consumed.
+ * - no failures.
+ *)
+
+{
+       len1 = 0;
+       len2 = 0;
+       px = 0;
+       cx = 0;
+}
+
+P0(int *len1, int *len2, int *cx, int *px)
+{
+       int *rLenPtr;
+       int rLen;
+       int rPx;
+       int rCx;
+       int rFail;
+
+       rFail = 0;
+       rCx = READ_ONCE(*cx);
+
+       rPx = smp_load_acquire(px);
+       if (rCx < rPx) {
+               if (rCx == 0)
+                       rLenPtr = len1;
+               else if (rCx == 1)
+                       rLenPtr = len2;
+               else
+                       rFail = 1;
+
+               rLen = READ_ONCE(*rLenPtr);
+               if (rLen == 0) {
+                       rFail = 1;
+               } else if (rLen == 1) {
+                       rCx = rCx + 1;
+                       WRITE_ONCE(*cx, rCx);
+               }
+       }
+
+       rPx = smp_load_acquire(px);
+       if (rCx < rPx) {
+               if (rCx == 0)
+                       rLenPtr = len1;
+               else if (rCx == 1)
+                       rLenPtr = len2;
+               else
+                       rFail = 1;
+
+               rLen = READ_ONCE(*rLenPtr);
+               if (rLen == 0) {
+                       rFail = 1;
+               } else if (rLen == 1) {
+                       rCx = rCx + 1;
+                       WRITE_ONCE(*cx, rCx);
+               }
+       }
+}
+
+P1(int *len1, int *len2, spinlock_t *rb_lock, int *px, int *cx)
+{
+       int rPx;
+       int rCx;
+       int rFail;
+       int *rLenPtr;
+
+       rFail = 0;
+       rCx = READ_ONCE(*cx);
+
+       spin_lock(rb_lock);
+
+       rPx = *px;
+       if (rPx == 0)
+               rLenPtr = len1;
+       else if (rPx == 1)
+               rLenPtr = len2;
+       else
+               rFail = 1;
+
+       WRITE_ONCE(*rLenPtr, -1);
+       smp_wmb();
+       WRITE_ONCE(*px, rPx + 1);
+
+       spin_unlock(rb_lock);
+
+       WRITE_ONCE(*rLenPtr, 1);
+}
+
+P2(int *len1, int *len2, spinlock_t *rb_lock, int *px, int *cx)
+{
+       int rPx;
+       int rCx;
+       int rFail;
+       int *rLenPtr;
+
+       rFail = 0;
+       rCx = READ_ONCE(*cx);
+
+       spin_lock(rb_lock);
+
+       rPx = *px;
+       if (rPx == 0)
+               rLenPtr = len1;
+       else if (rPx == 1)
+               rLenPtr = len2;
+       else
+               rFail = 1;
+
+       WRITE_ONCE(*rLenPtr, -1);
+       smp_wmb();
+       WRITE_ONCE(*px, rPx + 1);
+
+       spin_unlock(rb_lock);
+
+       WRITE_ONCE(*rLenPtr, 1);
+}
+
+exists (
+       0:rFail=0 /\ 1:rFail=0 /\ 2:rFail=0
+       /\
+       px=2 /\ len1=1 /\ len2=1
+       /\
+       (cx=0 \/ cx=1 \/ cx=2)
+)
-- 
2.24.1

Reply via email to