commit:     247c1a7419bf47efcd7780c0a1e2bc567a29391e
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Wed Oct 15 17:49:52 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Wed Oct 15 17:49:52 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=247c1a74

Update BMQ and PDS io scheduler patch to v6.17-r1

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README                                        |   2 +-
 ...=> 5020_BMQ-and-PDS-io-scheduler-v6.17-r1.patch | 272 ++++++++++++---------
 2 files changed, 155 insertions(+), 119 deletions(-)

diff --git a/0000_README b/0000_README
index 0aa228a9..7857b783 100644
--- a/0000_README
+++ b/0000_README
@@ -103,7 +103,7 @@ Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   More ISA levels and uarches for kernel 6.16+
 
-Patch:  5020_BMQ-and-PDS-io-scheduler-v6.17-r0.patch
+Patch:  5020_BMQ-and-PDS-io-scheduler-v6.17-r1.patch
 From:   https://gitlab.com/alfredchen/projectc
 Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from 
PDS(incld). Inspired by the scheduler in zircon.
 

diff --git a/5020_BMQ-and-PDS-io-scheduler-v6.17-r0.patch 
b/5020_BMQ-and-PDS-io-scheduler-v6.17-r1.patch
similarity index 98%
rename from 5020_BMQ-and-PDS-io-scheduler-v6.17-r0.patch
rename to 5020_BMQ-and-PDS-io-scheduler-v6.17-r1.patch
index 6b5e3269..9e1cd866 100644
--- a/5020_BMQ-and-PDS-io-scheduler-v6.17-r0.patch
+++ b/5020_BMQ-and-PDS-io-scheduler-v6.17-r1.patch
@@ -723,10 +723,10 @@ index 8ae86371ddcd..a972ef1e31a7 100644
  obj-y += build_utility.o
 diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
 new file mode 100644
-index 000000000000..8f03f5312e4d
+index 000000000000..db9a57681f70
 --- /dev/null
 +++ b/kernel/sched/alt_core.c
-@@ -0,0 +1,7648 @@
+@@ -0,0 +1,7645 @@
 +/*
 + *  kernel/sched/alt_core.c
 + *
@@ -801,7 +801,7 @@ index 000000000000..8f03f5312e4d
 +__read_mostly int sysctl_resched_latency_warn_ms = 100;
 +__read_mostly int sysctl_resched_latency_warn_once = 1;
 +
-+#define ALT_SCHED_VERSION "v6.17-r0"
++#define ALT_SCHED_VERSION "v6.17-r1"
 +
 +#define STOP_PRIO             (MAX_RT_PRIO - 1)
 +
@@ -842,7 +842,7 @@ index 000000000000..8f03f5312e4d
 + * the domain), this allows us to quickly tell if two cpus are in the same 
cache
 + * domain, see cpus_share_cache().
 + */
-+DEFINE_PER_CPU(int, sd_llc_id);
++static DEFINE_PER_CPU_READ_MOSTLY(int, sd_llc_id);
 +
 +DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 +
@@ -919,7 +919,7 @@ index 000000000000..8f03f5312e4d
 +
 +      if (prio < last_prio) {
 +              if (IDLE_TASK_SCHED_PRIO == last_prio) {
-+                      rq->clear_idle_mask_func(cpu, sched_idle_mask);
++                      sched_clear_idle_mask(cpu);
 +                      last_prio -= 2;
 +              }
 +              CLEAR_CACHED_PREEMPT_MASK(pr, prio, last_prio, cpu);
@@ -928,7 +928,7 @@ index 000000000000..8f03f5312e4d
 +      }
 +      /* last_prio < prio */
 +      if (IDLE_TASK_SCHED_PRIO == prio) {
-+              rq->set_idle_mask_func(cpu, sched_idle_mask);
++              sched_set_idle_mask(cpu);
 +              prio -= 2;
 +      }
 +      SET_CACHED_PREEMPT_MASK(pr, last_prio, prio, cpu);
@@ -2741,7 +2741,7 @@ index 000000000000..8f03f5312e4d
 +      return cpumask_and(preempt_mask, allow_mask, mask);
 +}
 +
-+__read_mostly idle_select_func_t idle_select_func 
____cacheline_aligned_in_smp = cpumask_and;
++DEFINE_STATIC_CALL(sched_idle_select_func, cpumask_and);
 +
 +static inline int select_task_rq(struct task_struct *p)
 +{
@@ -2750,7 +2750,7 @@ index 000000000000..8f03f5312e4d
 +      if (unlikely(!cpumask_and(&allow_mask, p->cpus_ptr, cpu_active_mask)))
 +              return select_fallback_rq(task_cpu(p), p);
 +
-+      if (idle_select_func(&mask, &allow_mask, sched_idle_mask)       ||
++      if (static_call(sched_idle_select_func)(&mask, &allow_mask, 
sched_idle_mask)    ||
 +          preempt_mask_check(&mask, &allow_mask, task_sched_prio(p)))
 +              return best_mask_cpu(task_cpu(p), &mask);
 +
@@ -5281,8 +5281,7 @@ index 000000000000..8f03f5312e4d
 +
 +      if (next == rq->idle) {
 +              if (!take_other_rq_tasks(rq, cpu)) {
-+                      if (likely(rq->balance_func && rq->online))
-+                              rq->balance_func(rq, cpu);
++                      sched_cpu_topology_balance(cpu, rq);
 +
 +                      schedstat_inc(rq->sched_goidle);
 +                      /*printk(KERN_INFO "sched: choose_next_task(%d) idle 
%px\n", cpu, next);*/
@@ -7145,8 +7144,6 @@ index 000000000000..8f03f5312e4d
 +              rq->online = false;
 +              rq->cpu = i;
 +
-+              rq->clear_idle_mask_func = cpumask_clear_cpu;
-+              rq->set_idle_mask_func = cpumask_set_cpu;
 +              rq->balance_func = NULL;
 +              rq->active_balance_arg.active = 0;
 +
@@ -8377,10 +8374,10 @@ index 000000000000..8f03f5312e4d
 +#endif /* CONFIG_SCHED_MM_CID */
 diff --git a/kernel/sched/alt_core.h b/kernel/sched/alt_core.h
 new file mode 100644
-index 000000000000..bb9512c76566
+index 000000000000..55497941a22b
 --- /dev/null
 +++ b/kernel/sched/alt_core.h
-@@ -0,0 +1,177 @@
+@@ -0,0 +1,174 @@
 +#ifndef _KERNEL_SCHED_ALT_CORE_H
 +#define _KERNEL_SCHED_ALT_CORE_H
 +
@@ -8548,10 +8545,7 @@ index 000000000000..bb9512c76566
 +
 +extern struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int 
new_cpu);
 +
-+typedef bool (*idle_select_func_t)(struct cpumask *dstp, const struct cpumask 
*src1p,
-+                                 const struct cpumask *src2p);
-+
-+extern idle_select_func_t idle_select_func;
++DECLARE_STATIC_CALL(sched_idle_select_func, cpumask_and);
 +
 +/* balance callback */
 +extern struct balance_callback *splice_balance_callbacks(struct rq *rq);
@@ -8598,10 +8592,10 @@ index 000000000000..1dbd7eb6a434
 +{}
 diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
 new file mode 100644
-index 000000000000..5b9a53c669f5
+index 000000000000..6cd5cfe3a332
 --- /dev/null
 +++ b/kernel/sched/alt_sched.h
-@@ -0,0 +1,1018 @@
+@@ -0,0 +1,1013 @@
 +#ifndef _KERNEL_SCHED_ALT_SCHED_H
 +#define _KERNEL_SCHED_ALT_SCHED_H
 +
@@ -8724,8 +8718,6 @@ index 000000000000..5b9a53c669f5
 +};
 +
 +typedef void (*balance_func_t)(struct rq *rq, int cpu);
-+typedef void (*set_idle_mask_func_t)(unsigned int cpu, struct cpumask *dstp);
-+typedef void (*clear_idle_mask_func_t)(int cpu, struct cpumask *dstp);
 +
 +struct balance_arg {
 +      struct task_struct      *task;
@@ -8766,9 +8758,6 @@ index 000000000000..5b9a53c669f5
 +      int membarrier_state;
 +#endif
 +
-+      set_idle_mask_func_t    set_idle_mask_func;
-+      clear_idle_mask_func_t  clear_idle_mask_func;
-+
 +      int cpu;                /* cpu of this runqueue */
 +      bool online;
 +
@@ -9622,10 +9611,10 @@ index 000000000000..5b9a53c669f5
 +#endif /* _KERNEL_SCHED_ALT_SCHED_H */
 diff --git a/kernel/sched/alt_topology.c b/kernel/sched/alt_topology.c
 new file mode 100644
-index 000000000000..376a08a5afda
+index 000000000000..590ee3cb1b49
 --- /dev/null
 +++ b/kernel/sched/alt_topology.c
-@@ -0,0 +1,347 @@
+@@ -0,0 +1,287 @@
 +#include "alt_core.h"
 +#include "alt_topology.h"
 +
@@ -9640,47 +9629,9 @@ index 000000000000..376a08a5afda
 +}
 +__setup("pcore_cpus=", sched_pcore_mask_setup);
 +
-+/*
-+ * set/clear idle mask functions
-+ */
-+#ifdef CONFIG_SCHED_SMT
-+static void set_idle_mask_smt(unsigned int cpu, struct cpumask *dstp)
-+{
-+      cpumask_set_cpu(cpu, dstp);
-+      if (cpumask_subset(cpu_smt_mask(cpu), sched_idle_mask))
-+              cpumask_or(sched_sg_idle_mask, sched_sg_idle_mask, 
cpu_smt_mask(cpu));
-+}
-+
-+static void clear_idle_mask_smt(int cpu, struct cpumask *dstp)
-+{
-+      cpumask_clear_cpu(cpu, dstp);
-+      cpumask_andnot(sched_sg_idle_mask, sched_sg_idle_mask, 
cpu_smt_mask(cpu));
-+}
-+#endif
-+
-+static void set_idle_mask_pcore(unsigned int cpu, struct cpumask *dstp)
-+{
-+      cpumask_set_cpu(cpu, dstp);
-+      cpumask_set_cpu(cpu, sched_pcore_idle_mask);
-+}
-+
-+static void clear_idle_mask_pcore(int cpu, struct cpumask *dstp)
-+{
-+      cpumask_clear_cpu(cpu, dstp);
-+      cpumask_clear_cpu(cpu, sched_pcore_idle_mask);
-+}
-+
-+static void set_idle_mask_ecore(unsigned int cpu, struct cpumask *dstp)
-+{
-+      cpumask_set_cpu(cpu, dstp);
-+      cpumask_set_cpu(cpu, sched_ecore_idle_mask);
-+}
-+
-+static void clear_idle_mask_ecore(int cpu, struct cpumask *dstp)
-+{
-+      cpumask_clear_cpu(cpu, dstp);
-+      cpumask_clear_cpu(cpu, sched_ecore_idle_mask);
-+}
++DEFINE_PER_CPU_READ_MOSTLY(enum cpu_topo_type, sched_cpu_topo);
++DEFINE_PER_CPU_READ_MOSTLY(enum cpu_topo_balance_type, 
sched_cpu_topo_balance);
++DEFINE_PER_CPU(struct balance_callback, active_balance_head);
 +
 +/*
 + * Idle cpu/rq selection functions
@@ -9785,8 +9736,6 @@ index 000000000000..376a08a5afda
 +      return 0;
 +}
 +
-+static DEFINE_PER_CPU(struct balance_callback, active_balance_head);
-+
 +#ifdef CONFIG_SCHED_SMT
 +static inline int
 +smt_pcore_source_balance(struct rq *rq, cpumask_t *single_task_mask, 
cpumask_t *target_mask)
@@ -9807,7 +9756,7 @@ index 000000000000..376a08a5afda
 +}
 +
 +/* smt p core balance functions */
-+static inline void smt_pcore_balance(struct rq *rq)
++void smt_pcore_balance(struct rq *rq)
 +{
 +      cpumask_t single_task_mask;
 +
@@ -9822,14 +9771,8 @@ index 000000000000..376a08a5afda
 +              return;
 +}
 +
-+static void smt_pcore_balance_func(struct rq *rq, const int cpu)
-+{
-+      if (cpumask_test_cpu(cpu, sched_sg_idle_mask))
-+              queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), 
smt_pcore_balance);
-+}
-+
 +/* smt balance functions */
-+static inline void smt_balance(struct rq *rq)
++void smt_balance(struct rq *rq)
 +{
 +      cpumask_t single_task_mask;
 +
@@ -9840,32 +9783,22 @@ index 000000000000..376a08a5afda
 +              return;
 +}
 +
-+static void smt_balance_func(struct rq *rq, const int cpu)
-+{
-+      if (cpumask_test_cpu(cpu, sched_sg_idle_mask))
-+              queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), 
smt_balance);
-+}
-+
 +/* e core balance functions */
-+static inline void ecore_balance(struct rq *rq)
++void ecore_balance(struct rq *rq)
 +{
 +      cpumask_t single_task_mask;
 +
 +      if (cpumask_andnot(&single_task_mask, cpu_active_mask, sched_idle_mask) 
&&
 +          cpumask_andnot(&single_task_mask, &single_task_mask, 
&sched_rq_pending_mask) &&
++          cpumask_empty(sched_pcore_idle_mask) &&
 +          /* smt occupied p core to idle e core balance */
 +          smt_pcore_source_balance(rq, &single_task_mask, 
sched_ecore_idle_mask))
 +              return;
 +}
-+
-+static void ecore_balance_func(struct rq *rq, const int cpu)
-+{
-+      queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), 
ecore_balance);
-+}
 +#endif /* CONFIG_SCHED_SMT */
 +
 +/* p core balance functions */
-+static inline void pcore_balance(struct rq *rq)
++void pcore_balance(struct rq *rq)
 +{
 +      cpumask_t single_task_mask;
 +
@@ -9876,34 +9809,28 @@ index 000000000000..376a08a5afda
 +              return;
 +}
 +
-+static void pcore_balance_func(struct rq *rq, const int cpu)
-+{
-+      queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), 
pcore_balance);
-+}
-+
 +#ifdef ALT_SCHED_DEBUG
 +#define SCHED_DEBUG_INFO(...) printk(KERN_INFO __VA_ARGS__)
 +#else
 +#define SCHED_DEBUG_INFO(...) do { } while(0)
 +#endif
 +
-+#define SET_IDLE_SELECT_FUNC(func)                                            
\
++#define IDLE_SELECT_FUNC_UPDATE(func)                                         
\
 +{                                                                             
\
-+      idle_select_func = func;                                                
\
-+      printk(KERN_INFO "sched: "#func);                                       
\
++      static_call_update(sched_idle_select_func, &func);                      
\
++      printk(KERN_INFO "sched: idle select func -> "#func);                   
\
 +}
 +
-+#define SET_RQ_BALANCE_FUNC(rq, cpu, func)                                    
\
++#define SET_SCHED_CPU_TOPOLOGY(cpu, topo)                                     
\
 +{                                                                             
\
-+      rq->balance_func = func;                                                
\
-+      SCHED_DEBUG_INFO("sched: cpu#%02d -> "#func, cpu);                      
\
++      per_cpu(sched_cpu_topo, (cpu)) = topo;                                  
\
++      SCHED_DEBUG_INFO("sched: cpu#%02d -> "#topo, cpu);                      
\
 +}
 +
-+#define SET_RQ_IDLE_MASK_FUNC(rq, cpu, set_func, clear_func)                  
\
++#define SET_SCHED_CPU_TOPOLOGY_BALANCE(cpu, balance)                          
\
 +{                                                                             
\
-+      rq->set_idle_mask_func          = set_func;                             
\
-+      rq->clear_idle_mask_func        = clear_func;                           
\
-+      SCHED_DEBUG_INFO("sched: cpu#%02d -> "#set_func" "#clear_func, cpu);    
\
++      per_cpu(sched_cpu_topo_balance, (cpu)) = balance;                       
\
++      SCHED_DEBUG_INFO("sched: cpu#%02d -> "#balance, cpu);                   
\
 +}
 +
 +void sched_init_topology(void)
@@ -9926,16 +9853,17 @@ index 000000000000..376a08a5afda
 +              ecore_present = !cpumask_empty(&sched_ecore_mask);
 +      }
 +
-+#ifdef CONFIG_SCHED_SMT
 +      /* idle select function */
++#ifdef CONFIG_SCHED_SMT
 +      if (cpumask_equal(&sched_smt_mask, cpu_online_mask)) {
-+              SET_IDLE_SELECT_FUNC(p1_idle_select_func);
++              IDLE_SELECT_FUNC_UPDATE(p1_idle_select_func);
 +      } else
 +#endif
 +      if (!cpumask_empty(&sched_pcore_mask)) {
-+              SET_IDLE_SELECT_FUNC(p1p2_idle_select_func);
++              IDLE_SELECT_FUNC_UPDATE(p1p2_idle_select_func);
 +      }
 +
++      /* CPU topology setup */
 +      for_each_online_cpu(cpu) {
 +              rq = cpu_rq(cpu);
 +              /* take chance to reset time slice for idle tasks */
@@ -9943,13 +9871,13 @@ index 000000000000..376a08a5afda
 +
 +#ifdef CONFIG_SCHED_SMT
 +              if (cpumask_weight(cpu_smt_mask(cpu)) > 1) {
-+                      SET_RQ_IDLE_MASK_FUNC(rq, cpu, set_idle_mask_smt, 
clear_idle_mask_smt);
++                      SET_SCHED_CPU_TOPOLOGY(cpu, CPU_TOPOLOGY_SMT);
 +
 +                      if (cpumask_test_cpu(cpu, &sched_pcore_mask) &&
 +                          !cpumask_intersects(&sched_ecore_mask, 
&sched_smt_mask)) {
-+                              SET_RQ_BALANCE_FUNC(rq, cpu, 
smt_pcore_balance_func);
++                              SET_SCHED_CPU_TOPOLOGY_BALANCE(cpu, 
CPU_TOPOLOGY_BALANCE_SMT_PCORE);
 +                      } else {
-+                              SET_RQ_BALANCE_FUNC(rq, cpu, smt_balance_func);
++                              SET_SCHED_CPU_TOPOLOGY_BALANCE(cpu, 
CPU_TOPOLOGY_BALANCE_SMT);
 +                      }
 +
 +                      continue;
@@ -9957,31 +9885,139 @@ index 000000000000..376a08a5afda
 +#endif
 +              /* !SMT or only one cpu in sg */
 +              if (cpumask_test_cpu(cpu, &sched_pcore_mask)) {
-+                      SET_RQ_IDLE_MASK_FUNC(rq, cpu, set_idle_mask_pcore, 
clear_idle_mask_pcore);
++                      SET_SCHED_CPU_TOPOLOGY(cpu, CPU_TOPOLOGY_PCORE);
 +
 +                      if (ecore_present)
-+                              SET_RQ_BALANCE_FUNC(rq, cpu, 
pcore_balance_func);
++                              SET_SCHED_CPU_TOPOLOGY_BALANCE(cpu, 
CPU_TOPOLOGY_BALANCE_PCORE);
 +
 +                      continue;
 +              }
++
 +              if (cpumask_test_cpu(cpu, &sched_ecore_mask)) {
-+                      SET_RQ_IDLE_MASK_FUNC(rq, cpu, set_idle_mask_ecore, 
clear_idle_mask_ecore);
++                      SET_SCHED_CPU_TOPOLOGY(cpu, CPU_TOPOLOGY_ECORE);
 +#ifdef CONFIG_SCHED_SMT
 +                      if (cpumask_intersects(&sched_pcore_mask, 
&sched_smt_mask))
-+                              SET_RQ_BALANCE_FUNC(rq, cpu, 
ecore_balance_func);
++                              SET_SCHED_CPU_TOPOLOGY_BALANCE(cpu, 
CPU_TOPOLOGY_BALANCE_ECORE);
 +#endif
 +              }
 +      }
 +}
 diff --git a/kernel/sched/alt_topology.h b/kernel/sched/alt_topology.h
 new file mode 100644
-index 000000000000..076174cd2bc6
+index 000000000000..14591a303ea5
 --- /dev/null
 +++ b/kernel/sched/alt_topology.h
-@@ -0,0 +1,6 @@
+@@ -0,0 +1,113 @@
 +#ifndef _KERNEL_SCHED_ALT_TOPOLOGY_H
 +#define _KERNEL_SCHED_ALT_TOPOLOGY_H
 +
++/*
++ * CPU topology type
++ */
++enum cpu_topo_type {
++      CPU_TOPOLOGY_DEFAULT = 0,
++      CPU_TOPOLOGY_PCORE,
++      CPU_TOPOLOGY_ECORE,
++#ifdef CONFIG_SCHED_SMT
++      CPU_TOPOLOGY_SMT,
++#endif
++};
++
++DECLARE_PER_CPU_READ_MOSTLY(enum cpu_topo_type, sched_cpu_topo);
++
++static inline void sched_set_idle_mask(const unsigned int cpu)
++{
++      cpumask_set_cpu(cpu, sched_idle_mask);
++
++      switch (per_cpu(sched_cpu_topo, cpu)) {
++      case CPU_TOPOLOGY_DEFAULT:
++              break;
++      case CPU_TOPOLOGY_PCORE:
++              cpumask_set_cpu(cpu, sched_pcore_idle_mask);
++              break;
++      case CPU_TOPOLOGY_ECORE:
++              cpumask_set_cpu(cpu, sched_ecore_idle_mask);
++              break;
++#ifdef CONFIG_SCHED_SMT
++      case CPU_TOPOLOGY_SMT:
++              if (cpumask_subset(cpu_smt_mask(cpu), sched_idle_mask))
++                      cpumask_or(sched_sg_idle_mask, sched_sg_idle_mask, 
cpu_smt_mask(cpu));
++              break;
++#endif
++      }
++}
++
++static inline void sched_clear_idle_mask(const unsigned int cpu)
++{
++      cpumask_clear_cpu(cpu, sched_idle_mask);
++
++      switch (per_cpu(sched_cpu_topo, cpu)) {
++      case CPU_TOPOLOGY_DEFAULT:
++              break;
++      case CPU_TOPOLOGY_PCORE:
++              cpumask_clear_cpu(cpu, sched_pcore_idle_mask);
++              break;
++      case CPU_TOPOLOGY_ECORE:
++              cpumask_clear_cpu(cpu, sched_ecore_idle_mask);
++              break;
++#ifdef CONFIG_SCHED_SMT
++      case CPU_TOPOLOGY_SMT:
++              cpumask_andnot(sched_sg_idle_mask, sched_sg_idle_mask, 
cpu_smt_mask(cpu));
++              break;
++#endif
++      }
++}
++
++/*
++ * CPU topology balance type
++ */
++enum cpu_topo_balance_type {
++      CPU_TOPOLOGY_BALANCE_NONE = 0,
++      CPU_TOPOLOGY_BALANCE_PCORE,
++#ifdef CONFIG_SCHED_SMT
++      CPU_TOPOLOGY_BALANCE_ECORE,
++      CPU_TOPOLOGY_BALANCE_SMT,
++      CPU_TOPOLOGY_BALANCE_SMT_PCORE,
++#endif
++};
++
++DECLARE_PER_CPU_READ_MOSTLY(enum cpu_topo_balance_type, 
sched_cpu_topo_balance);
++DECLARE_PER_CPU(struct balance_callback, active_balance_head);
++
++extern void pcore_balance(struct rq *rq);
++#ifdef CONFIG_SCHED_SMT
++extern void ecore_balance(struct rq *rq);
++extern void smt_balance(struct rq *rq);
++extern void smt_pcore_balance(struct rq *rq);
++#endif
++
++static inline void sched_cpu_topology_balance(const unsigned int cpu, struct 
rq *rq)
++{
++      if (!rq->online)
++              return;
++
++      switch (per_cpu(sched_cpu_topo_balance, cpu)) {
++      case CPU_TOPOLOGY_BALANCE_NONE:
++              break;
++      case CPU_TOPOLOGY_BALANCE_PCORE:
++              queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), 
pcore_balance);
++              break;
++#ifdef CONFIG_SCHED_SMT
++      case CPU_TOPOLOGY_BALANCE_ECORE:
++              queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), 
ecore_balance);
++              break;
++      case CPU_TOPOLOGY_BALANCE_SMT:
++              if (cpumask_test_cpu(cpu, sched_sg_idle_mask))
++                      queue_balance_callback(rq, 
&per_cpu(active_balance_head, cpu), smt_balance);
++              break;
++      case CPU_TOPOLOGY_BALANCE_SMT_PCORE:
++              if (cpumask_test_cpu(cpu, sched_sg_idle_mask))
++                      queue_balance_callback(rq, 
&per_cpu(active_balance_head, cpu), smt_pcore_balance);
++              break;
++#endif
++      }
++}
++
 +extern void sched_init_topology(void);
 +
 +#endif /* _KERNEL_SCHED_ALT_TOPOLOGY_H */

Reply via email to