commit:     bbd5b42d3ff847b17f85f7ce29fa19f28f88b798
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Jan 13 17:18:55 2025 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Jan 13 17:18:55 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bbd5b42d

BMQ(BitMap Queue) Schedule r1 version bump

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |   2 +-
 ...=> 5020_BMQ-and-PDS-io-scheduler-v6.12-r1.patch | 252 +++++++++++++++------
 2 files changed, 184 insertions(+), 70 deletions(-)

diff --git a/0000_README b/0000_README
index 29d9187b..06b9cb3f 100644
--- a/0000_README
+++ b/0000_README
@@ -127,7 +127,7 @@ Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional 
CPUs.
 
-Patch:  5020_BMQ-and-PDS-io-scheduler-v6.12-r0.patch
+Patch:  5020_BMQ-and-PDS-io-scheduler-v6.12-r1.patch
 From:   https://gitlab.com/alfredchen/projectc
 Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from 
PDS(incld). Inspired by the scheduler in zircon.
 

diff --git a/5020_BMQ-and-PDS-io-scheduler-v6.12-r0.patch 
b/5020_BMQ-and-PDS-io-scheduler-v6.12-r1.patch
similarity index 98%
rename from 5020_BMQ-and-PDS-io-scheduler-v6.12-r0.patch
rename to 5020_BMQ-and-PDS-io-scheduler-v6.12-r1.patch
index 9eb3139f..532813fd 100644
--- a/5020_BMQ-and-PDS-io-scheduler-v6.12-r0.patch
+++ b/5020_BMQ-and-PDS-io-scheduler-v6.12-r1.patch
@@ -158,7 +158,7 @@ index 8874f681b056..59eb72bf7d5f 100644
        [RLIMIT_RTTIME]         = {  RLIM_INFINITY,  RLIM_INFINITY },   \
  }
 diff --git a/include/linux/sched.h b/include/linux/sched.h
-index bb343136ddd0..212d9204e9aa 100644
+index bb343136ddd0..6adfea989b7b 100644
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
 @@ -804,9 +804,13 @@ struct task_struct {
@@ -212,7 +212,34 @@ index bb343136ddd0..212d9204e9aa 100644
  
  #ifdef CONFIG_CGROUP_SCHED
        struct task_group               *sched_task_group;
-@@ -1609,6 +1628,15 @@ struct task_struct {
+@@ -878,11 +897,15 @@ struct task_struct {
+       const cpumask_t                 *cpus_ptr;
+       cpumask_t                       *user_cpus_ptr;
+       cpumask_t                       cpus_mask;
++#ifndef CONFIG_SCHED_ALT
+       void                            *migration_pending;
++#endif
+ #ifdef CONFIG_SMP
+       unsigned short                  migration_disabled;
+ #endif
++#ifndef CONFIG_SCHED_ALT
+       unsigned short                  migration_flags;
++#endif
+ 
+ #ifdef CONFIG_PREEMPT_RCU
+       int                             rcu_read_lock_nesting;
+@@ -914,8 +937,10 @@ struct task_struct {
+ 
+       struct list_head                tasks;
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+       struct plist_node               pushable_tasks;
+       struct rb_node                  pushable_dl_tasks;
++#endif
+ #endif
+ 
+       struct mm_struct                *mm;
+@@ -1609,6 +1634,15 @@ struct task_struct {
         */
  };
  
@@ -228,7 +255,7 @@ index bb343136ddd0..212d9204e9aa 100644
  #define TASK_REPORT_IDLE      (TASK_REPORT + 1)
  #define TASK_REPORT_MAX               (TASK_REPORT_IDLE << 1)
  
-@@ -2135,7 +2163,11 @@ static inline void set_task_cpu(struct task_struct *p, 
unsigned int cpu)
+@@ -2135,7 +2169,11 @@ static inline void set_task_cpu(struct task_struct *p, 
unsigned int cpu)
  
  static inline bool task_is_runnable(struct task_struct *p)
  {
@@ -341,7 +368,7 @@ index 4237daa5ac7a..3cebd93c49c8 100644
  #else
  static inline void rebuild_sched_domains_energy(void)
 diff --git a/init/Kconfig b/init/Kconfig
-index c521e1421ad4..131a599fcde2 100644
+index c521e1421ad4..4a397b48a453 100644
 --- a/init/Kconfig
 +++ b/init/Kconfig
 @@ -652,6 +652,7 @@ config TASK_IO_ACCOUNTING
@@ -352,15 +379,7 @@ index c521e1421ad4..131a599fcde2 100644
        select KERNFS
        help
          Collect metrics that indicate how overcommitted the CPU, memory,
-@@ -817,6 +818,7 @@ menu "Scheduler features"
- config UCLAMP_TASK
-       bool "Enable utilization clamping for RT/FAIR tasks"
-       depends on CPU_FREQ_GOV_SCHEDUTIL
-+      depends on !SCHED_ALT
-       help
-         This feature enables the scheduler to track the clamped utilization
-         of each CPU based on RUNNABLE tasks scheduled on that CPU.
-@@ -863,6 +865,35 @@ config UCLAMP_BUCKETS_COUNT
+@@ -863,6 +864,35 @@ config UCLAMP_BUCKETS_COUNT
  
          If in doubt, use the default value.
  
@@ -396,7 +415,7 @@ index c521e1421ad4..131a599fcde2 100644
  endmenu
  
  #
-@@ -928,6 +959,7 @@ config NUMA_BALANCING
+@@ -928,6 +958,7 @@ config NUMA_BALANCING
        depends on ARCH_SUPPORTS_NUMA_BALANCING
        depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
        depends on SMP && NUMA && MIGRATION && !PREEMPT_RT
@@ -404,23 +423,7 @@ index c521e1421ad4..131a599fcde2 100644
        help
          This option adds support for automatic NUMA aware memory/task 
placement.
          The mechanism is quite primitive and is based on migrating memory when
-@@ -1036,6 +1068,7 @@ menuconfig CGROUP_SCHED
-         tasks.
- 
- if CGROUP_SCHED
-+if !SCHED_ALT
- config GROUP_SCHED_WEIGHT
-       def_bool n
- 
-@@ -1073,6 +1106,7 @@ config EXT_GROUP_SCHED
-       select GROUP_SCHED_WEIGHT
-       default y
- 
-+endif #!SCHED_ALT
- endif #CGROUP_SCHED
- 
- config SCHED_MM_CID
-@@ -1334,6 +1368,7 @@ config CHECKPOINT_RESTORE
+@@ -1334,6 +1365,7 @@ config CHECKPOINT_RESTORE
  
  config SCHED_AUTOGROUP
        bool "Automatic process group scheduling"
@@ -429,7 +432,7 @@ index c521e1421ad4..131a599fcde2 100644
        select CGROUP_SCHED
        select FAIR_GROUP_SCHED
 diff --git a/init/init_task.c b/init/init_task.c
-index 136a8231355a..03770079619a 100644
+index 136a8231355a..12c01ab8e718 100644
 --- a/init/init_task.c
 +++ b/init/init_task.c
 @@ -71,9 +71,16 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
@@ -466,14 +469,20 @@ index 136a8231355a..03770079619a 100644
        .se             = {
                .group_node     = LIST_HEAD_INIT(init_task.se.group_node),
        },
-@@ -93,6 +110,7 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
+@@ -93,10 +110,13 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
                .run_list       = LIST_HEAD_INIT(init_task.rt.run_list),
                .time_slice     = RR_TIMESLICE,
        },
 +#endif
        .tasks          = LIST_HEAD_INIT(init_task.tasks),
++#ifndef CONFIG_SCHED_ALT
  #ifdef CONFIG_SMP
        .pushable_tasks = PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
+ #endif
++#endif
+ #ifdef CONFIG_CGROUP_SCHED
+       .sched_task_group = &root_task_group,
+ #endif
 diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
 index fe782cd77388..d27d2154d71a 100644
 --- a/kernel/Kconfig.preempt
@@ -700,10 +709,10 @@ index 976092b7bd45..31d587c16ec1 100644
  obj-y += build_utility.o
 diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
 new file mode 100644
-index 000000000000..c59691742340
+index 000000000000..0a08bc0176ac
 --- /dev/null
 +++ b/kernel/sched/alt_core.c
-@@ -0,0 +1,7458 @@
+@@ -0,0 +1,7515 @@
 +/*
 + *  kernel/sched/alt_core.c
 + *
@@ -782,7 +791,7 @@ index 000000000000..c59691742340
 +#define sched_feat(x) (0)
 +#endif /* CONFIG_SCHED_DEBUG */
 +
-+#define ALT_SCHED_VERSION "v6.12-r0"
++#define ALT_SCHED_VERSION "v6.12-r1"
 +
 +#define STOP_PRIO             (MAX_RT_PRIO - 1)
 +
@@ -2144,8 +2153,6 @@ index 000000000000..c59691742340
 +      __set_task_cpu(p, new_cpu);
 +}
 +
-+#define MDF_FORCE_ENABLED     0x80
-+
 +static void
 +__do_set_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask)
 +{
@@ -2186,8 +2193,6 @@ index 000000000000..c59691742340
 +      if (cpumask_test_cpu(cpu, &p->cpus_mask)) {
 +              cpu_rq(cpu)->nr_pinned++;
 +              p->migration_disabled = 1;
-+              p->migration_flags &= ~MDF_FORCE_ENABLED;
-+
 +              /*
 +               * Violates locking rules! see comment in __do_set_cpus_ptr().
 +               */
@@ -2237,6 +2242,15 @@ index 000000000000..c59691742340
 +}
 +EXPORT_SYMBOL_GPL(migrate_enable);
 +
++static void __migrate_force_enable(struct task_struct *p, struct rq *rq)
++{
++      if (likely(p->cpus_ptr != &p->cpus_mask))
++              __do_set_cpus_ptr(p, &p->cpus_mask);
++      p->migration_disabled = 0;
++      /* When p is migrate_disabled, rq->lock should be held */
++      rq->nr_pinned--;
++}
++
 +static inline bool rq_has_pinned_tasks(struct rq *rq)
 +{
 +      return rq->nr_pinned;
@@ -2417,6 +2431,9 @@ index 000000000000..c59691742340
 +
 +      __do_set_cpus_allowed(p, &ac);
 +
++      if (is_migration_disabled(p) && !cpumask_test_cpu(task_cpu(p), 
&p->cpus_mask))
++              __migrate_force_enable(p, task_rq(p));
++
 +      /*
 +       * Because this is called with p->pi_lock held, it is not possible
 +       * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
@@ -2712,14 +2729,8 @@ index 000000000000..c59691742340
 +{
 +      /* Can the task run on the task's current CPU? If so, we're done */
 +      if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
-+              if (p->migration_disabled) {
-+                      if (likely(p->cpus_ptr != &p->cpus_mask))
-+                              __do_set_cpus_ptr(p, &p->cpus_mask);
-+                      p->migration_disabled = 0;
-+                      p->migration_flags |= MDF_FORCE_ENABLED;
-+                      /* When p is migrate_disabled, rq->lock should be held 
*/
-+                      rq->nr_pinned--;
-+              }
++              if (is_migration_disabled(p))
++                      __migrate_force_enable(p, rq);
 +
 +              if (task_on_cpu(p) || READ_ONCE(p->__state) == TASK_WAKING) {
 +                      struct migration_arg arg = { p, dest_cpu };
@@ -7178,9 +7189,6 @@ index 000000000000..c59691742340
 +      if (preempt_count() > 0)
 +              return;
 +
-+      if (current->migration_flags & MDF_FORCE_ENABLED)
-+              return;
-+
 +      if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
 +              return;
 +      prev_jiffy = jiffies;
@@ -7374,6 +7382,43 @@ index 000000000000..c59691742340
 +{
 +}
 +
++#ifdef CONFIG_GROUP_SCHED_WEIGHT
++static int sched_group_set_shares(struct task_group *tg, unsigned long shares)
++{
++      return 0;
++}
++
++static int sched_group_set_idle(struct task_group *tg, long idle)
++{
++      return 0;
++}
++
++static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
++                              struct cftype *cftype, u64 shareval)
++{
++      return sched_group_set_shares(css_tg(css), shareval);
++}
++
++static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
++                             struct cftype *cft)
++{
++      return 0;
++}
++
++static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
++                             struct cftype *cft)
++{
++      return 0;
++}
++
++static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
++                              struct cftype *cft, s64 idle)
++{
++      return sched_group_set_idle(css_tg(css), idle);
++}
++#endif
++
++#ifdef CONFIG_CFS_BANDWIDTH
 +static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
 +                                struct cftype *cft)
 +{
@@ -7419,7 +7464,9 @@ index 000000000000..c59691742340
 +{
 +      return 0;
 +}
++#endif
 +
++#ifdef CONFIG_RT_GROUP_SCHED
 +static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
 +                              struct cftype *cft, s64 val)
 +{
@@ -7443,7 +7490,9 @@ index 000000000000..c59691742340
 +{
 +      return 0;
 +}
++#endif
 +
++#ifdef CONFIG_UCLAMP_TASK_GROUP
 +static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
 +{
 +      return 0;
@@ -7467,8 +7516,22 @@ index 000000000000..c59691742340
 +{
 +      return nbytes;
 +}
++#endif
 +
 +static struct cftype cpu_legacy_files[] = {
++#ifdef CONFIG_GROUP_SCHED_WEIGHT
++      {
++              .name = "shares",
++              .read_u64 = cpu_shares_read_u64,
++              .write_u64 = cpu_shares_write_u64,
++      },
++      {
++              .name = "idle",
++              .read_s64 = cpu_idle_read_s64,
++              .write_s64 = cpu_idle_write_s64,
++      },
++#endif
++#ifdef CONFIG_CFS_BANDWIDTH
 +      {
 +              .name = "cfs_quota_us",
 +              .read_s64 = cpu_cfs_quota_read_s64,
@@ -7492,6 +7555,8 @@ index 000000000000..c59691742340
 +              .name = "stat.local",
 +              .seq_show = cpu_cfs_local_stat_show,
 +      },
++#endif
++#ifdef CONFIG_RT_GROUP_SCHED
 +      {
 +              .name = "rt_runtime_us",
 +              .read_s64 = cpu_rt_runtime_read,
@@ -7502,6 +7567,8 @@ index 000000000000..c59691742340
 +              .read_u64 = cpu_rt_period_read_uint,
 +              .write_u64 = cpu_rt_period_write_uint,
 +      },
++#endif
++#ifdef CONFIG_UCLAMP_TASK_GROUP
 +      {
 +              .name = "uclamp.min",
 +              .flags = CFTYPE_NOT_ON_ROOT,
@@ -7514,9 +7581,11 @@ index 000000000000..c59691742340
 +              .seq_show = cpu_uclamp_max_show,
 +              .write = cpu_uclamp_max_write,
 +      },
++#endif
 +      { }     /* Terminate */
 +};
 +
++#ifdef CONFIG_GROUP_SCHED_WEIGHT
 +static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
 +                             struct cftype *cft)
 +{
@@ -7540,19 +7609,9 @@ index 000000000000..c59691742340
 +{
 +      return 0;
 +}
++#endif
 +
-+static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
-+                             struct cftype *cft)
-+{
-+      return 0;
-+}
-+
-+static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
-+                              struct cftype *cft, s64 idle)
-+{
-+      return 0;
-+}
-+
++#ifdef CONFIG_CFS_BANDWIDTH
 +static int cpu_max_show(struct seq_file *sf, void *v)
 +{
 +      return 0;
@@ -7563,8 +7622,10 @@ index 000000000000..c59691742340
 +{
 +      return nbytes;
 +}
++#endif
 +
 +static struct cftype cpu_files[] = {
++#ifdef CONFIG_GROUP_SCHED_WEIGHT
 +      {
 +              .name = "weight",
 +              .flags = CFTYPE_NOT_ON_ROOT,
@@ -7583,6 +7644,8 @@ index 000000000000..c59691742340
 +              .read_s64 = cpu_idle_read_s64,
 +              .write_s64 = cpu_idle_write_s64,
 +      },
++#endif
++#ifdef CONFIG_CFS_BANDWIDTH
 +      {
 +              .name = "max",
 +              .flags = CFTYPE_NOT_ON_ROOT,
@@ -7595,6 +7658,8 @@ index 000000000000..c59691742340
 +              .read_u64 = cpu_cfs_burst_read_u64,
 +              .write_u64 = cpu_cfs_burst_write_u64,
 +      },
++#endif
++#ifdef CONFIG_UCLAMP_TASK_GROUP
 +      {
 +              .name = "uclamp.min",
 +              .flags = CFTYPE_NOT_ON_ROOT,
@@ -7607,6 +7672,7 @@ index 000000000000..c59691742340
 +              .seq_show = cpu_uclamp_max_show,
 +              .write = cpu_uclamp_max_write,
 +      },
++#endif
 +      { }     /* terminate */
 +};
 +
@@ -8421,10 +8487,10 @@ index 000000000000..1dbd7eb6a434
 +{}
 diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
 new file mode 100644
-index 000000000000..09c9e9f80bf4
+index 000000000000..7fb3433c5c41
 --- /dev/null
 +++ b/kernel/sched/alt_sched.h
-@@ -0,0 +1,971 @@
+@@ -0,0 +1,997 @@
 +#ifndef _KERNEL_SCHED_ALT_SCHED_H
 +#define _KERNEL_SCHED_ALT_SCHED_H
 +
@@ -9120,15 +9186,41 @@ index 000000000000..09c9e9f80bf4
 +
 +static inline void nohz_run_idle_balance(int cpu) { }
 +
-+static inline
-+unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
-+                                struct task_struct *p)
++static inline unsigned long
++uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
 +{
-+      return util;
++      if (clamp_id == UCLAMP_MIN)
++              return 0;
++
++      return SCHED_CAPACITY_SCALE;
 +}
 +
 +static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
 +
++static inline bool uclamp_is_used(void)
++{
++      return false;
++}
++
++static inline unsigned long
++uclamp_rq_get(struct rq *rq, enum uclamp_id clamp_id)
++{
++      if (clamp_id == UCLAMP_MIN)
++              return 0;
++
++      return SCHED_CAPACITY_SCALE;
++}
++
++static inline void
++uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, unsigned int value)
++{
++}
++
++static inline bool uclamp_rq_is_idle(struct rq *rq)
++{
++      return false;
++}
++
 +#ifdef CONFIG_SCHED_MM_CID
 +
 +#define SCHED_MM_CID_PERIOD_NS        (100ULL * 1000000)      /* 100ms */
@@ -11109,6 +11201,28 @@ index 6bcee4704059..cf88205fd4a2 100644
  
        return false;
  }
+diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
+index a50ed23bee77..be0477666049 100644
+--- a/kernel/trace/trace_osnoise.c
++++ b/kernel/trace/trace_osnoise.c
+@@ -1665,6 +1665,9 @@ static void osnoise_sleep(bool skip_period)
+  */
+ static inline int osnoise_migration_pending(void)
+ {
++#ifdef CONFIG_SCHED_ALT
++      return 0;
++#else
+       if (!current->migration_pending)
+               return 0;
+ 
+@@ -1686,6 +1689,7 @@ static inline int osnoise_migration_pending(void)
+       mutex_unlock(&interface_lock);
+ 
+       return 1;
++#endif
+ }
+ 
+ /*
 diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
 index 1469dd8075fa..803527a0e48a 100644
 --- a/kernel/trace/trace_selftest.c

Reply via email to