On a 5.2 kernel, lockdep complains when offlining a CPU and writing to a
parallel_cpumask sysfs file.

  echo 0 > /sys/devices/system/cpu/cpu1/online
  echo ff > /sys/kernel/pcrypt/pencrypt/parallel_cpumask

  ======================================================
  WARNING: possible circular locking dependency detected
  5.2.0-padata-base+ #19 Not tainted
  ------------------------------------------------------
  cpuhp/1/13 is trying to acquire lock:
  ...  (&pinst->lock){+.+.}, at: padata_cpu_prep_down+0x37/0x70

  but task is already holding lock:
  ...  (cpuhp_state-down){+.+.}, at: cpuhp_thread_fun+0x34/0x240

  which lock already depends on the new lock.

padata doesn't take cpu_hotplug_lock and pinst->lock in a consistent
order.  Which should be first?  CPU hotplug calls into padata with
cpu_hotplug_lock already held, so it should have priority.

Remove the cpu_hotplug_lock acquisition from __padata_stop and hoist it
up to padata_stop, before pd->lock is taken.  That fixes a
recursive acquisition of cpu_hotplug_lock in padata_remove_cpu at the
same time:

  padata_remove_cpu
    mutex_lock(&pinst->lock)
    get_online_cpus()
    __padata_remove_cpu
      __padata_stop
        get_online_cpus()

The rest is just switching the order where the two locks are taken
together.

Fixes: 6751fb3c0e0c ("padata: Use get_online_cpus/put_online_cpus")
Fixes: 65ff577e6b6e ("padata: Rearrange set_cpumask functions")
Signed-off-by: Daniel Jordan <daniel.m.jor...@oracle.com>
Cc: Herbert Xu <herb...@gondor.apana.org.au>
Cc: Steffen Klassert <steffen.klass...@secunet.com>
Cc: linux-crypto@vger.kernel.org
Cc: linux-ker...@vger.kernel.org
---

Hello, these two patches are based on all padata fixes now in cryptodev-2.6.

 kernel/padata.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/kernel/padata.c b/kernel/padata.c
index b60cc3dcee58..d056276a96ce 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -487,9 +487,7 @@ static void __padata_stop(struct padata_instance *pinst)
 
        synchronize_rcu();
 
-       get_online_cpus();
        padata_flush_queues(pinst->pd);
-       put_online_cpus();
 }
 
 /* Replace the internal control structure with a new one. */
@@ -614,8 +612,8 @@ int padata_set_cpumask(struct padata_instance *pinst, int 
cpumask_type,
        struct cpumask *serial_mask, *parallel_mask;
        int err = -EINVAL;
 
-       mutex_lock(&pinst->lock);
        get_online_cpus();
+       mutex_lock(&pinst->lock);
 
        switch (cpumask_type) {
        case PADATA_CPU_PARALLEL:
@@ -633,8 +631,8 @@ int padata_set_cpumask(struct padata_instance *pinst, int 
cpumask_type,
        err =  __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
 
 out:
-       put_online_cpus();
        mutex_unlock(&pinst->lock);
+       put_online_cpus();
 
        return err;
 }
@@ -669,9 +667,11 @@ EXPORT_SYMBOL(padata_start);
  */
 void padata_stop(struct padata_instance *pinst)
 {
+       get_online_cpus();
        mutex_lock(&pinst->lock);
        __padata_stop(pinst);
        mutex_unlock(&pinst->lock);
+       put_online_cpus();
 }
 EXPORT_SYMBOL(padata_stop);
 
@@ -739,18 +739,18 @@ int padata_remove_cpu(struct padata_instance *pinst, int 
cpu, int mask)
        if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
                return -EINVAL;
 
+       get_online_cpus();
        mutex_lock(&pinst->lock);
 
-       get_online_cpus();
        if (mask & PADATA_CPU_SERIAL)
                cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu);
        if (mask & PADATA_CPU_PARALLEL)
                cpumask_clear_cpu(cpu, pinst->cpumask.pcpu);
 
        err = __padata_remove_cpu(pinst, cpu);
-       put_online_cpus();
 
        mutex_unlock(&pinst->lock);
+       put_online_cpus();
 
        return err;
 }
-- 
2.22.0

Reply via email to