Add support for Clang's context analysis for SRCU.

Signed-off-by: Marco Elver <[email protected]>
Acked-by: Paul E. McKenney <[email protected]>
---
v5:
* Fix up annotation for recently added SRCU interfaces.
* Rename "context guard" -> "context lock".
* Use new cleanup.h helpers to properly support scoped lock guards.

v4:
* Rename capability -> context analysis.

v3:
* Switch to DECLARE_LOCK_GUARD_1_ATTRS() (suggested by Peter)
* Support SRCU being reentrant.
---
 Documentation/dev-tools/context-analysis.rst |  2 +-
 include/linux/srcu.h                         | 73 ++++++++++++++------
 include/linux/srcutiny.h                     |  6 ++
 include/linux/srcutree.h                     | 10 ++-
 lib/test_context-analysis.c                  | 25 +++++++
 5 files changed, 91 insertions(+), 25 deletions(-)

diff --git a/Documentation/dev-tools/context-analysis.rst 
b/Documentation/dev-tools/context-analysis.rst
index 3bc72f71fe25..f7736f1c0767 100644
--- a/Documentation/dev-tools/context-analysis.rst
+++ b/Documentation/dev-tools/context-analysis.rst
@@ -80,7 +80,7 @@ Supported Kernel Primitives
 
 Currently the following synchronization primitives are supported:
 `raw_spinlock_t`, `spinlock_t`, `rwlock_t`, `mutex`, `seqlock_t`,
-`bit_spinlock`, RCU.
+`bit_spinlock`, RCU, SRCU (`srcu_struct`).
 
 For context locks with an initialization function (e.g., `spin_lock_init()`),
 calling this function before initializing any guarded members or globals
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 344ad51c8f6c..bb44a0bd7696 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -21,7 +21,7 @@
 #include <linux/workqueue.h>
 #include <linux/rcu_segcblist.h>
 
-struct srcu_struct;
+context_lock_struct(srcu_struct, __reentrant_ctx_lock);
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 
@@ -77,7 +77,7 @@ int init_srcu_struct_fast_updown(struct srcu_struct *ssp);
 #define SRCU_READ_FLAVOR_SLOWGP                (SRCU_READ_FLAVOR_FAST | 
SRCU_READ_FLAVOR_FAST_UPDOWN)
                                                // Flavors requiring 
synchronize_rcu()
                                                // instead of smp_mb().
-void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
+void __srcu_read_unlock(struct srcu_struct *ssp, int idx) 
__releases_shared(ssp);
 
 #ifdef CONFIG_TINY_SRCU
 #include <linux/srcutiny.h>
@@ -131,14 +131,16 @@ static inline bool same_state_synchronize_srcu(unsigned 
long oldstate1, unsigned
 }
 
 #ifdef CONFIG_NEED_SRCU_NMI_SAFE
-int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp);
-void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) 
__releases(ssp);
+int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires_shared(ssp);
+void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) 
__releases_shared(ssp);
 #else
 static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
+       __acquires_shared(ssp)
 {
        return __srcu_read_lock(ssp);
 }
 static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
+       __releases_shared(ssp)
 {
        __srcu_read_unlock(ssp, idx);
 }
@@ -210,6 +212,14 @@ static inline int srcu_read_lock_held(const struct 
srcu_struct *ssp)
 
 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 
+/*
+ * No-op helper to denote that ssp must be held. Because SRCU-protected 
pointers
+ * should still be marked with __rcu_guarded, and we do not want to mark them
+ * with __guarded_by(ssp) as it would complicate annotations for writers, we
+ * choose the following strategy: srcu_dereference_check() calls this helper
+ * that checks that the passed ssp is held, and then fake-acquires 'RCU'.
+ */
+static inline void __srcu_read_lock_must_hold(const struct srcu_struct *ssp) 
__must_hold_shared(ssp) { }
 
 /**
  * srcu_dereference_check - fetch SRCU-protected pointer for later 
dereferencing
@@ -223,9 +233,15 @@ static inline int srcu_read_lock_held(const struct 
srcu_struct *ssp)
  * to 1.  The @c argument will normally be a logical expression containing
  * lockdep_is_held() calls.
  */
-#define srcu_dereference_check(p, ssp, c) \
-       __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
-                               (c) || srcu_read_lock_held(ssp), __rcu)
+#define srcu_dereference_check(p, ssp, c)                                      
\
+({                                                                             
\
+       __srcu_read_lock_must_hold(ssp);                                        
\
+       __acquire_shared_ctx_lock(RCU);                                 \
+       __auto_type __v = __rcu_dereference_check((p), __UNIQUE_ID(rcu),        
\
+                               (c) || srcu_read_lock_held(ssp), __rcu);        
\
+       __release_shared_ctx_lock(RCU);                                 \
+       __v;                                                                    
\
+})
 
 /**
  * srcu_dereference - fetch SRCU-protected pointer for later dereferencing
@@ -268,7 +284,8 @@ static inline int srcu_read_lock_held(const struct 
srcu_struct *ssp)
  * invoke srcu_read_unlock() from one task and the matching srcu_read_lock()
  * from another.
  */
-static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
+static inline int srcu_read_lock(struct srcu_struct *ssp)
+       __acquires_shared(ssp)
 {
        int retval;
 
@@ -304,7 +321,8 @@ static inline int srcu_read_lock(struct srcu_struct *ssp) 
__acquires(ssp)
  * contexts where RCU is watching, that is, from contexts where it would
  * be legal to invoke rcu_read_lock().  Otherwise, lockdep will complain.
  */
-static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct 
*ssp) __acquires(ssp)
+static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct 
*ssp) __acquires_shared(ssp)
+       __acquires_shared(ssp)
 {
        struct srcu_ctr __percpu *retval;
 
@@ -344,7 +362,7 @@ static inline struct srcu_ctr __percpu 
*srcu_read_lock_fast(struct srcu_struct *
  * complain.
  */
 static inline struct srcu_ctr __percpu *srcu_read_lock_fast_updown(struct 
srcu_struct *ssp)
-__acquires(ssp)
+       __acquires_shared(ssp)
 {
        struct srcu_ctr __percpu *retval;
 
@@ -360,7 +378,7 @@ __acquires(ssp)
  * See srcu_read_lock_fast() for more information.
  */
 static inline struct srcu_ctr __percpu *srcu_read_lock_fast_notrace(struct 
srcu_struct *ssp)
-       __acquires(ssp)
+       __acquires_shared(ssp)
 {
        struct srcu_ctr __percpu *retval;
 
@@ -381,7 +399,7 @@ static inline struct srcu_ctr __percpu 
*srcu_read_lock_fast_notrace(struct srcu_
  * and srcu_read_lock_fast().  However, the same definition/initialization
  * requirements called out for srcu_read_lock_safe() apply.
  */
-static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct 
*ssp) __acquires(ssp)
+static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct 
*ssp) __acquires_shared(ssp)
 {
        WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi());
        RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching 
srcu_down_read_fast().");
@@ -400,7 +418,8 @@ static inline struct srcu_ctr __percpu 
*srcu_down_read_fast(struct srcu_struct *
  * then none of the other flavors may be used, whether before, during,
  * or after.
  */
-static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) 
__acquires(ssp)
+static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp)
+       __acquires_shared(ssp)
 {
        int retval;
 
@@ -412,7 +431,8 @@ static inline int srcu_read_lock_nmisafe(struct srcu_struct 
*ssp) __acquires(ssp
 
 /* Used by tracing, cannot be traced and cannot invoke lockdep. */
 static inline notrace int
-srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
+srcu_read_lock_notrace(struct srcu_struct *ssp)
+       __acquires_shared(ssp)
 {
        int retval;
 
@@ -443,7 +463,8 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) 
__acquires(ssp)
  * which calls to down_read() may be nested.  The same srcu_struct may be
  * used concurrently by srcu_down_read() and srcu_read_lock().
  */
-static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp)
+static inline int srcu_down_read(struct srcu_struct *ssp)
+       __acquires_shared(ssp)
 {
        WARN_ON_ONCE(in_nmi());
        srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
@@ -458,7 +479,7 @@ static inline int srcu_down_read(struct srcu_struct *ssp) 
__acquires(ssp)
  * Exit an SRCU read-side critical section.
  */
 static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
-       __releases(ssp)
+       __releases_shared(ssp)
 {
        WARN_ON_ONCE(idx & ~0x1);
        srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
@@ -474,7 +495,7 @@ static inline void srcu_read_unlock(struct srcu_struct 
*ssp, int idx)
  * Exit a light-weight SRCU read-side critical section.
  */
 static inline void srcu_read_unlock_fast(struct srcu_struct *ssp, struct 
srcu_ctr __percpu *scp)
-       __releases(ssp)
+       __releases_shared(ssp)
 {
        srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
        srcu_lock_release(&ssp->dep_map);
@@ -490,7 +511,7 @@ static inline void srcu_read_unlock_fast(struct srcu_struct 
*ssp, struct srcu_ct
  * Exit an SRCU-fast-updown read-side critical section.
  */
 static inline void
-srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu 
*scp) __releases(ssp)
+srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu 
*scp) __releases_shared(ssp)
 {
        srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
        srcu_lock_release(&ssp->dep_map);
@@ -504,7 +525,7 @@ srcu_read_unlock_fast_updown(struct srcu_struct *ssp, 
struct srcu_ctr __percpu *
  * See srcu_read_unlock_fast() for more information.
  */
 static inline void srcu_read_unlock_fast_notrace(struct srcu_struct *ssp,
-                                                struct srcu_ctr __percpu *scp) 
__releases(ssp)
+                                                struct srcu_ctr __percpu *scp) 
__releases_shared(ssp)
 {
        srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
        __srcu_read_unlock_fast(ssp, scp);
@@ -519,7 +540,7 @@ static inline void srcu_read_unlock_fast_notrace(struct 
srcu_struct *ssp,
  * the same context as the maching srcu_down_read_fast().
  */
 static inline void srcu_up_read_fast(struct srcu_struct *ssp, struct srcu_ctr 
__percpu *scp)
-       __releases(ssp)
+       __releases_shared(ssp)
 {
        WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi());
        srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
@@ -535,7 +556,7 @@ static inline void srcu_up_read_fast(struct srcu_struct 
*ssp, struct srcu_ctr __
  * Exit an SRCU read-side critical section, but in an NMI-safe manner.
  */
 static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
-       __releases(ssp)
+       __releases_shared(ssp)
 {
        WARN_ON_ONCE(idx & ~0x1);
        srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI);
@@ -545,7 +566,7 @@ static inline void srcu_read_unlock_nmisafe(struct 
srcu_struct *ssp, int idx)
 
 /* Used by tracing, cannot be traced and cannot call lockdep. */
 static inline notrace void
-srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
+srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) 
__releases_shared(ssp)
 {
        srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
        __srcu_read_unlock(ssp, idx);
@@ -560,7 +581,7 @@ srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) 
__releases(ssp)
  * the same context as the maching srcu_down_read().
  */
 static inline void srcu_up_read(struct srcu_struct *ssp, int idx)
-       __releases(ssp)
+       __releases_shared(ssp)
 {
        WARN_ON_ONCE(idx & ~0x1);
        WARN_ON_ONCE(in_nmi());
@@ -600,15 +621,21 @@ DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct,
                    _T->idx = srcu_read_lock(_T->lock),
                    srcu_read_unlock(_T->lock, _T->idx),
                    int idx)
+DECLARE_LOCK_GUARD_1_ATTRS(srcu, __acquires_shared(_T), 
__releases_shared(*(struct srcu_struct **)_T))
+#define class_srcu_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(srcu, _T)
 
 DEFINE_LOCK_GUARD_1(srcu_fast, struct srcu_struct,
                    _T->scp = srcu_read_lock_fast(_T->lock),
                    srcu_read_unlock_fast(_T->lock, _T->scp),
                    struct srcu_ctr __percpu *scp)
+DECLARE_LOCK_GUARD_1_ATTRS(srcu_fast, __acquires_shared(_T), 
__releases_shared(*(struct srcu_struct **)_T))
+#define class_srcu_fast_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(srcu_fast, _T)
 
 DEFINE_LOCK_GUARD_1(srcu_fast_notrace, struct srcu_struct,
                    _T->scp = srcu_read_lock_fast_notrace(_T->lock),
                    srcu_read_unlock_fast_notrace(_T->lock, _T->scp),
                    struct srcu_ctr __percpu *scp)
+DECLARE_LOCK_GUARD_1_ATTRS(srcu_fast_notrace, __acquires_shared(_T), 
__releases_shared(*(struct srcu_struct **)_T))
+#define class_srcu_fast_notrace_constructor(_T) 
WITH_LOCK_GUARD_1_ATTRS(srcu_fast_notrace, _T)
 
 #endif
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index e0698024667a..dec7cbe015aa 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -73,6 +73,7 @@ void synchronize_srcu(struct srcu_struct *ssp);
  * index that must be passed to the matching srcu_read_unlock().
  */
 static inline int __srcu_read_lock(struct srcu_struct *ssp)
+       __acquires_shared(ssp)
 {
        int idx;
 
@@ -80,6 +81,7 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp)
        idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
        WRITE_ONCE(ssp->srcu_lock_nesting[idx], 
READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
        preempt_enable();
+       __acquire_shared(ssp);
        return idx;
 }
 
@@ -96,22 +98,26 @@ static inline struct srcu_ctr __percpu 
*__srcu_ctr_to_ptr(struct srcu_struct *ss
 }
 
 static inline struct srcu_ctr __percpu *__srcu_read_lock_fast(struct 
srcu_struct *ssp)
+       __acquires_shared(ssp)
 {
        return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp));
 }
 
 static inline void __srcu_read_unlock_fast(struct srcu_struct *ssp, struct 
srcu_ctr __percpu *scp)
+       __releases_shared(ssp)
 {
        __srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp));
 }
 
 static inline struct srcu_ctr __percpu *__srcu_read_lock_fast_updown(struct 
srcu_struct *ssp)
+       __acquires_shared(ssp)
 {
        return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp));
 }
 
 static inline
 void __srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr 
__percpu *scp)
+       __releases_shared(ssp)
 {
        __srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp));
 }
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index d6f978b50472..958cb7ef41cb 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -233,7 +233,7 @@ struct srcu_struct {
 #define DEFINE_STATIC_SRCU_FAST_UPDOWN(name) \
                                        __DEFINE_SRCU(name, 
SRCU_READ_FLAVOR_FAST_UPDOWN, static)
 
-int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
+int __srcu_read_lock(struct srcu_struct *ssp) __acquires_shared(ssp);
 void synchronize_srcu_expedited(struct srcu_struct *ssp);
 void srcu_barrier(struct srcu_struct *ssp);
 void srcu_expedite_current(struct srcu_struct *ssp);
@@ -286,6 +286,7 @@ static inline struct srcu_ctr __percpu 
*__srcu_ctr_to_ptr(struct srcu_struct *ss
  * implementations of this_cpu_inc().
  */
 static inline struct srcu_ctr __percpu notrace *__srcu_read_lock_fast(struct 
srcu_struct *ssp)
+       __acquires_shared(ssp)
 {
        struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp);
 
@@ -294,6 +295,7 @@ static inline struct srcu_ctr __percpu notrace 
*__srcu_read_lock_fast(struct src
        else
                atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks));  // Y, and 
implicit RCU reader.
        barrier(); /* Avoid leaking the critical section. */
+       __acquire_shared(ssp);
        return scp;
 }
 
@@ -308,7 +310,9 @@ static inline struct srcu_ctr __percpu notrace 
*__srcu_read_lock_fast(struct src
  */
 static inline void notrace
 __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+       __releases_shared(ssp)
 {
+       __release_shared(ssp);
        barrier();  /* Avoid leaking the critical section. */
        if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
                this_cpu_inc(scp->srcu_unlocks.counter);  // Z, and implicit 
RCU reader.
@@ -326,6 +330,7 @@ __srcu_read_unlock_fast(struct srcu_struct *ssp, struct 
srcu_ctr __percpu *scp)
  */
 static inline
 struct srcu_ctr __percpu notrace *__srcu_read_lock_fast_updown(struct 
srcu_struct *ssp)
+       __acquires_shared(ssp)
 {
        struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp);
 
@@ -334,6 +339,7 @@ struct srcu_ctr __percpu notrace 
*__srcu_read_lock_fast_updown(struct srcu_struc
        else
                atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks));  // Y, and 
implicit RCU reader.
        barrier(); /* Avoid leaking the critical section. */
+       __acquire_shared(ssp);
        return scp;
 }
 
@@ -348,7 +354,9 @@ struct srcu_ctr __percpu notrace 
*__srcu_read_lock_fast_updown(struct srcu_struc
  */
 static inline void notrace
 __srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr 
__percpu *scp)
+       __releases_shared(ssp)
 {
+       __release_shared(ssp);
        barrier();  /* Avoid leaking the critical section. */
        if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
                this_cpu_inc(scp->srcu_unlocks.counter);  // Z, and implicit 
RCU reader.
diff --git a/lib/test_context-analysis.c b/lib/test_context-analysis.c
index 559df32fb5f8..39e03790c0f6 100644
--- a/lib/test_context-analysis.c
+++ b/lib/test_context-analysis.c
@@ -10,6 +10,7 @@
 #include <linux/rcupdate.h>
 #include <linux/seqlock.h>
 #include <linux/spinlock.h>
+#include <linux/srcu.h>
 
 /*
  * Test that helper macros work as expected.
@@ -369,3 +370,27 @@ static void __used test_rcu_assert_variants(void)
        lockdep_assert_in_rcu_read_lock_sched();
        wants_rcu_held_sched();
 }
+
+struct test_srcu_data {
+       struct srcu_struct srcu;
+       long __rcu_guarded *data;
+};
+
+static void __used test_srcu(struct test_srcu_data *d)
+{
+       init_srcu_struct(&d->srcu);
+
+       int idx = srcu_read_lock(&d->srcu);
+       long *data = srcu_dereference(d->data, &d->srcu);
+       (void)data;
+       srcu_read_unlock(&d->srcu, idx);
+
+       rcu_assign_pointer(d->data, NULL);
+}
+
+static void __used test_srcu_guard(struct test_srcu_data *d)
+{
+       { guard(srcu)(&d->srcu); (void)srcu_dereference(d->data, &d->srcu); }
+       { guard(srcu_fast)(&d->srcu); (void)srcu_dereference(d->data, 
&d->srcu); }
+       { guard(srcu_fast_notrace)(&d->srcu); (void)srcu_dereference(d->data, 
&d->srcu); }
+}
-- 
2.52.0.322.g1dd061c0dc-goog


Reply via email to