Kernel code calls cpumask_weight() to compare the weight of cpumask with
a given number. We can do it more efficiently with cpumask_weight_{eq, ...}
because conditional cpumask_weight may stop traversing the cpumask earlier,
as soon as condition is met.

Signed-off-by: Yury Norov <yury.no...@gmail.com>
---
 arch/ia64/mm/tlb.c                       |  2 +-
 arch/mips/cavium-octeon/octeon-irq.c     |  4 +-
 arch/mips/kernel/crash.c                 |  2 +-
 arch/powerpc/kernel/smp.c                |  2 +-
 arch/powerpc/kernel/watchdog.c           |  2 +-
 arch/powerpc/xmon/xmon.c                 |  4 +-
 arch/s390/kernel/perf_cpum_cf.c          |  2 +-
 arch/x86/kernel/smpboot.c                |  4 +-
 drivers/firmware/psci/psci_checker.c     |  2 +-
 drivers/hv/channel_mgmt.c                |  4 +-
 drivers/infiniband/hw/hfi1/affinity.c    |  9 ++---
 drivers/infiniband/hw/qib/qib_file_ops.c |  2 +-
 drivers/infiniband/hw/qib/qib_iba7322.c  |  2 +-
 drivers/scsi/lpfc/lpfc_init.c            |  2 +-
 drivers/soc/fsl/qbman/qman_test_stash.c  |  2 +-
 include/linux/cpumask.h                  | 50 ++++++++++++++++++++++++
 kernel/sched/core.c                      |  8 ++--
 kernel/sched/topology.c                  |  2 +-
 kernel/time/clockevents.c                |  2 +-
 19 files changed, 78 insertions(+), 29 deletions(-)

diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index 135b5135cace..a5bce13ab047 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -332,7 +332,7 @@ __flush_tlb_range (struct vm_area_struct *vma, unsigned 
long start,
 
        preempt_disable();
 #ifdef CONFIG_SMP
-       if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) {
+       if (mm != current->active_mm || !cpumask_weight_eq(mm_cpumask(mm), 1)) {
                ia64_global_tlb_purge(mm, start, end, nbits);
                preempt_enable();
                return;
diff --git a/arch/mips/cavium-octeon/octeon-irq.c 
b/arch/mips/cavium-octeon/octeon-irq.c
index 844f882096e6..914871f15fb7 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -763,7 +763,7 @@ static void octeon_irq_cpu_offline_ciu(struct irq_data 
*data)
        if (!cpumask_test_cpu(cpu, mask))
                return;
 
-       if (cpumask_weight(mask) > 1) {
+       if (cpumask_weight_gt(mask, 1)) {
                /*
                 * It has multi CPU affinity, just remove this CPU
                 * from the affinity set.
@@ -795,7 +795,7 @@ static int octeon_irq_ciu_set_affinity(struct irq_data 
*data,
         * This removes the need to do locking in the .ack/.eoi
         * functions.
         */
-       if (cpumask_weight(dest) != 1)
+       if (!cpumask_weight_eq(dest, 1))
                return -EINVAL;
 
        if (!enable_one)
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
index 81845ba04835..5b690d52491f 100644
--- a/arch/mips/kernel/crash.c
+++ b/arch/mips/kernel/crash.c
@@ -72,7 +72,7 @@ static void crash_kexec_prepare_cpus(void)
         */
        pr_emerg("Sending IPI to other cpus...\n");
        msecs = 10000;
-       while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
+       while (cpumask_weight_lt(&cpus_in_crash, ncpus) && (--msecs > 0)) {
                cpu_relax();
                mdelay(1);
        }
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index c338f9d8ab37..00da2064ddf3 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -1655,7 +1655,7 @@ void start_secondary(void *unused)
                if (has_big_cores)
                        sibling_mask = cpu_smallcore_mask;
 
-               if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu)))
+               if (cpumask_weight_gt(mask, cpumask_weight(sibling_mask(cpu))))
                        shared_caches = true;
        }
 
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index bfc27496fe7e..62937a077de7 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -483,7 +483,7 @@ static void start_watchdog(void *arg)
 
        wd_smp_lock(&flags);
        cpumask_set_cpu(cpu, &wd_cpus_enabled);
-       if (cpumask_weight(&wd_cpus_enabled) == 1) {
+       if (cpumask_weight_eq(&wd_cpus_enabled, 1)) {
                cpumask_set_cpu(cpu, &wd_smp_cpus_pending);
                wd_smp_last_reset_tb = get_tb();
        }
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index f9ae0b398260..b9e9d0b20a7b 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -469,7 +469,7 @@ static bool wait_for_other_cpus(int ncpus)
 
        /* We wait for 2s, which is a metric "little while" */
        for (timeout = 20000; timeout != 0; --timeout) {
-               if (cpumask_weight(&cpus_in_xmon) >= ncpus)
+               if (cpumask_weight_ge(&cpus_in_xmon, ncpus))
                        return true;
                udelay(100);
                barrier();
@@ -1338,7 +1338,7 @@ static int cpu_cmd(void)
                        case 'S':
                        case 't':
                                cpumask_copy(&xmon_batch_cpus, &cpus_in_xmon);
-                               if (cpumask_weight(&xmon_batch_cpus) <= 1) {
+                               if (cpumask_weight_le(&xmon_batch_cpus, 1)) {
                                        printf("There are no other cpus in 
xmon\n");
                                        break;
                                }
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index ee8707abdb6a..4d217f7f5ccf 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -975,7 +975,7 @@ static int cfset_all_start(struct cfset_request *req)
                return -ENOMEM;
        cpumask_and(mask, &req->mask, cpu_online_mask);
        on_each_cpu_mask(mask, cfset_ioctl_on, &p, 1);
-       if (atomic_read(&p.cpus_ack) != cpumask_weight(mask)) {
+       if (!cpumask_weight_eq(mask, atomic_read(&p.cpus_ack))) {
                on_each_cpu_mask(mask, cfset_ioctl_off, &p, 1);
                rc = -EIO;
                debug_sprintf_event(cf_dbg, 4, "%s CPUs missing", __func__);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 617012f4619f..e851e9945eb5 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1608,7 +1608,7 @@ static void remove_siblinginfo(int cpu)
                /*/
                 * last thread sibling in this cpu core going down
                 */
-               if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1)
+               if (cpumask_weight_eq(topology_sibling_cpumask(cpu), 1))
                        cpu_data(sibling).booted_cores--;
        }
 
@@ -1617,7 +1617,7 @@ static void remove_siblinginfo(int cpu)
 
        for_each_cpu(sibling, topology_sibling_cpumask(cpu)) {
                cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
-               if (cpumask_weight(topology_sibling_cpumask(sibling)) == 1)
+               if (cpumask_weight_eq(topology_sibling_cpumask(sibling), 1))
                        cpu_data(sibling).smt_active = false;
        }
 
diff --git a/drivers/firmware/psci/psci_checker.c 
b/drivers/firmware/psci/psci_checker.c
index 116eb465cdb4..90c9473832a9 100644
--- a/drivers/firmware/psci/psci_checker.c
+++ b/drivers/firmware/psci/psci_checker.c
@@ -90,7 +90,7 @@ static unsigned int down_and_up_cpus(const struct cpumask 
*cpus,
                 * cpu_down() checks the number of online CPUs before the TOS
                 * resident CPU.
                 */
-               if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) {
+               if (cpumask_weight_eq(offlined_cpus, nb_available_cpus - 1)) {
                        if (ret != -EBUSY) {
                                pr_err("Unexpected return code %d while trying "
                                       "to power down last online CPU %d\n",
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 2829575fd9b7..da297220230d 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -762,8 +762,8 @@ static void init_vp_index(struct vmbus_channel *channel)
                }
                alloced_mask = &hv_context.hv_numa_map[numa_node];
 
-               if (cpumask_weight(alloced_mask) ==
-                   cpumask_weight(cpumask_of_node(numa_node))) {
+               if (cpumask_weight_eq(alloced_mask,
+                           cpumask_weight(cpumask_of_node(numa_node)))) {
                        /*
                         * We have cycled through all the CPUs in the node;
                         * reset the alloced map.
diff --git a/drivers/infiniband/hw/hfi1/affinity.c 
b/drivers/infiniband/hw/hfi1/affinity.c
index 38eee675369a..7c5ca5c5306a 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -507,7 +507,7 @@ static int _dev_comp_vect_cpu_mask_init(struct hfi1_devdata 
*dd,
         * available CPUs divide it by the number of devices in the
         * local NUMA node.
         */
-       if (cpumask_weight(&entry->comp_vect_mask) == 1) {
+       if (cpumask_weight_eq(&entry->comp_vect_mask, 1)) {
                possible_cpus_comp_vect = 1;
                dd_dev_warn(dd,
                            "Number of kernel receive queues is too large for 
completion vector affinity to be effective\n");
@@ -593,7 +593,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
 {
        struct hfi1_affinity_node *entry;
        const struct cpumask *local_mask;
-       int curr_cpu, possible, i, ret;
+       int curr_cpu, i, ret;
        bool new_entry = false;
 
        local_mask = cpumask_of_node(dd->node);
@@ -626,10 +626,9 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
                            local_mask);
 
                /* fill in the receive list */
-               possible = cpumask_weight(&entry->def_intr.mask);
                curr_cpu = cpumask_first(&entry->def_intr.mask);
 
-               if (possible == 1) {
+               if (cpumask_weight_eq(&entry->def_intr.mask, 1)) {
                        /* only one CPU, everyone will use it */
                        cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask);
                        cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
@@ -1017,7 +1016,7 @@ int hfi1_get_proc_affinity(int node)
                cpu = cpumask_first(proc_mask);
                cpumask_set_cpu(cpu, &set->used);
                goto done;
-       } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) {
+       } else if (cpumask_weight_gt(&set->mask, current->nr_cpus_allowed)) {
                hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
                          current->pid, current->comm,
                          cpumask_pr_args(proc_mask));
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c 
b/drivers/infiniband/hw/qib/qib_file_ops.c
index aa290928cf96..add89bc21b0a 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1151,7 +1151,7 @@ static void assign_ctxt_affinity(struct file *fp, struct 
qib_devdata *dd)
         * reserve a processor for it on the local NUMA node.
         */
        if ((weight >= qib_cpulist_count) &&
-               (cpumask_weight(local_mask) <= qib_cpulist_count)) {
+               (cpumask_weight_le(local_mask, qib_cpulist_count))) {
                for_each_cpu(local_cpu, local_mask)
                        if (!test_and_set_bit(local_cpu, qib_cpulist)) {
                                fd->rec_cpu_num = local_cpu;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c 
b/drivers/infiniband/hw/qib/qib_iba7322.c
index ab98b6a3ae1e..636a080b2952 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -3405,7 +3405,7 @@ static void qib_setup_7322_interrupt(struct qib_devdata 
*dd, int clearpend)
        local_mask = cpumask_of_pcibus(dd->pcidev->bus);
        firstcpu = cpumask_first(local_mask);
        if (firstcpu >= nr_cpu_ids ||
-                       cpumask_weight(local_mask) == num_online_cpus()) {
+                       cpumask_weight_eq(local_mask, num_online_cpus())) {
                local_mask = topology_core_cpumask(0);
                firstcpu = cpumask_first(local_mask);
        }
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index a56f01f659f8..325e9004dacd 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -12643,7 +12643,7 @@ lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int 
cpu,
                 * gone offline yet, we need >1.
                 */
                cpumask_and(tmp, maskp, cpu_online_mask);
-               if (cpumask_weight(tmp) > 1)
+               if (cpumask_weight_gt(tmp, 1))
                        continue;
 
                /* Now that we have an irq to shutdown, get the eq
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c 
b/drivers/soc/fsl/qbman/qman_test_stash.c
index b7e8e5ec884c..28b08568a349 100644
--- a/drivers/soc/fsl/qbman/qman_test_stash.c
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -561,7 +561,7 @@ int qman_test_stash(void)
 {
        int err;
 
-       if (cpumask_weight(cpu_online_mask) < 2) {
+       if (cpumask_weight_lt(cpu_online_mask, 2)) {
                pr_info("%s(): skip - only 1 CPU\n", __func__);
                return 0;
        }
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 64dae70d31f5..1906e3225737 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -575,6 +575,56 @@ static inline unsigned int cpumask_weight(const struct 
cpumask *srcp)
        return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
 }
 
+/**
+ * cpumask_weight_eq - Check if # of bits in *srcp is equal to a given number
+ * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
+ * @num: the number to check.
+ */
+static inline bool cpumask_weight_eq(const struct cpumask *srcp, unsigned int 
num)
+{
+       return bitmap_weight_eq(cpumask_bits(srcp), nr_cpumask_bits, num);
+}
+
+/**
+ * cpumask_weight_gt - Check if # of bits in *srcp is greater than a given 
number
+ * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
+ * @num: the number to check.
+ */
+static inline bool cpumask_weight_gt(const struct cpumask *srcp, int num)
+{
+       return bitmap_weight_gt(cpumask_bits(srcp), nr_cpumask_bits, num);
+}
+
+/**
+ * cpumask_weight_ge - Check if # of bits in *srcp is greater than or equal to 
a given number
+ * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
+ * @num: the number to check.
+ */
+static inline bool cpumask_weight_ge(const struct cpumask *srcp, int num)
+{
+       return bitmap_weight_ge(cpumask_bits(srcp), nr_cpumask_bits, num);
+}
+
+/**
+ * cpumask_weight_lt - Check if # of bits in *srcp is less than a given number
+ * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
+ * @num: the number to check.
+ */
+static inline bool cpumask_weight_lt(const struct cpumask *srcp, int num)
+{
+       return bitmap_weight_lt(cpumask_bits(srcp), nr_cpumask_bits, num);
+}
+
+/**
+ * cpumask_weight_le - Check if # of bits in *srcp is less than or equal to a 
given number
+ * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
+ * @num: the number to check.
+ */
+static inline bool cpumask_weight_le(const struct cpumask *srcp, int num)
+{
+       return bitmap_weight_le(cpumask_bits(srcp), nr_cpumask_bits, num);
+}
+
 /**
  * cpumask_shift_right - *dstp = *srcp >> n
  * @dstp: the cpumask result
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9b3ec14227e1..60f7d04a05f8 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6006,7 +6006,7 @@ static void sched_core_cpu_starting(unsigned int cpu)
        WARN_ON_ONCE(rq->core != rq);
 
        /* if we're the first, we'll be our own leader */
-       if (cpumask_weight(smt_mask) == 1)
+       if (cpumask_weight_eq(smt_mask, 1))
                goto unlock;
 
        /* find the leader */
@@ -6047,7 +6047,7 @@ static void sched_core_cpu_deactivate(unsigned int cpu)
        sched_core_lock(cpu, &flags);
 
        /* if we're the last man standing, nothing to do */
-       if (cpumask_weight(smt_mask) == 1) {
+       if (cpumask_weight_eq(smt_mask, 1)) {
                WARN_ON_ONCE(rq->core != rq);
                goto unlock;
        }
@@ -9053,7 +9053,7 @@ int sched_cpu_activate(unsigned int cpu)
        /*
         * When going up, increment the number of cores with SMT present.
         */
-       if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+       if (cpumask_weight_eq(cpu_smt_mask(cpu), 2))
                static_branch_inc_cpuslocked(&sched_smt_present);
 #endif
        set_cpu_active(cpu, true);
@@ -9128,7 +9128,7 @@ int sched_cpu_deactivate(unsigned int cpu)
        /*
         * When going down, decrement the number of cores with SMT present.
         */
-       if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+       if (cpumask_weight_eq(cpu_smt_mask(cpu), 2))
                static_branch_dec_cpuslocked(&sched_smt_present);
 
        sched_core_cpu_deactivate(cpu);
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 8478e2a8cd65..79395571599f 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -169,7 +169,7 @@ static const unsigned int SD_DEGENERATE_GROUPS_MASK =
 
 static int sd_degenerate(struct sched_domain *sd)
 {
-       if (cpumask_weight(sched_domain_span(sd)) == 1)
+       if (cpumask_weight_eq(sched_domain_span(sd), 1))
                return 1;
 
        /* Following flags need at least 2 groups */
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 003ccf338d20..32d6629a55b2 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -648,7 +648,7 @@ void tick_cleanup_dead_cpu(int cpu)
         */
        list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
                if (cpumask_test_cpu(cpu, dev->cpumask) &&
-                   cpumask_weight(dev->cpumask) == 1 &&
+                   cpumask_weight_eq(dev->cpumask, 1) &&
                    !tick_is_broadcast_device(dev)) {
                        BUG_ON(!clockevent_state_detached(dev));
                        list_del(&dev->list);
-- 
2.30.2


_______________________________________________
linux-snps-arc mailing list
linux-snps-arc@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-snps-arc

Reply via email to