This commit adds assertions verifying the consistency of the rcu_node
structure's ->blkd_tasks list and its ->gp_tasks, ->exp_tasks, and
->boost_tasks pointers.  In particular, the ->blkd_tasks lists must be
empty except for leaf rcu_node structures.

Signed-off-by: Paul E. McKenney <[email protected]>
---
 kernel/rcu/tree.c        |  2 ++
 kernel/rcu/tree_plugin.h | 11 +++++++++--
 2 files changed, 11 insertions(+), 2 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 913c90eccd4d..c028eb254704 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2395,6 +2395,8 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state 
*rsp,
                        return;
                }
                WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
+               WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1 &&
+                            rcu_preempt_blocked_readers_cgp(rnp));
                rnp->qsmask &= ~mask;
                trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
                                                 mask, rnp->qsmask, rnp->level,
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 3e3f92e981a1..eadf8b95b5e9 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -180,6 +180,8 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, 
struct rcu_data *rdp)
        struct task_struct *t = current;
 
        lockdep_assert_held(&rnp->lock);
+       WARN_ON_ONCE(rdp->mynode != rnp);
+       WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1);
 
        /*
         * Decide where to queue the newly blocked task.  In theory,
@@ -261,6 +263,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, 
struct rcu_data *rdp)
                rnp->gp_tasks = &t->rcu_node_entry;
        if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
                rnp->exp_tasks = &t->rcu_node_entry;
+       WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
+                    !(rnp->qsmask & rdp->grpmask));
+       WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) !=
+                    !(rnp->expmask & rdp->grpmask));
        raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
 
        /*
@@ -482,6 +488,7 @@ void rcu_read_unlock_special(struct task_struct *t)
                rnp = t->rcu_blocked_node;
                raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
                WARN_ON_ONCE(rnp != t->rcu_blocked_node);
+               WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1);
                empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
                empty_exp = sync_rcu_preempt_exp_done(rnp);
                smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
@@ -495,10 +502,10 @@ void rcu_read_unlock_special(struct task_struct *t)
                if (&t->rcu_node_entry == rnp->exp_tasks)
                        rnp->exp_tasks = np;
                if (IS_ENABLED(CONFIG_RCU_BOOST)) {
-                       if (&t->rcu_node_entry == rnp->boost_tasks)
-                               rnp->boost_tasks = np;
                        /* Snapshot ->boost_mtx ownership w/rnp->lock held. */
                        drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
+                       if (&t->rcu_node_entry == rnp->boost_tasks)
+                               rnp->boost_tasks = np;
                }
 
                /*
-- 
2.5.2

Reply via email to