commit:     8cc75f539068bd1a5718dec6606f5c6826a7cf7f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Sep  2 09:58:44 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Sep  2 09:58:44 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8cc75f53

Linux patch 5.10.194

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |   4 +
 1193_linux-5.10.194.patch | 280 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 284 insertions(+)

diff --git a/0000_README b/0000_README
index 9cc075fd..9db6312d 100644
--- a/0000_README
+++ b/0000_README
@@ -815,6 +815,10 @@ Patch:  1192_linux-5.10.193.patch
 From:   https://www.kernel.org
 Desc:   Linux 5.10.193
 
+Patch:  1193_linux-5.10.194.patch
+From:   https://www.kernel.org
+Desc:   Linux 5.10.194
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1193_linux-5.10.194.patch b/1193_linux-5.10.194.patch
new file mode 100644
index 00000000..e21befbf
--- /dev/null
+++ b/1193_linux-5.10.194.patch
@@ -0,0 +1,280 @@
+diff --git a/Makefile b/Makefile
+index 0423b4b2b000f..9ec2fb0d08f01 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 10
+-SUBLEVEL = 193
++SUBLEVEL = 194
+ EXTRAVERSION =
+ NAME = Dare mighty things
+ 
+diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
+index 1fc309b41f944..8d809724cde52 100644
+--- a/arch/arm/kernel/module-plts.c
++++ b/arch/arm/kernel/module-plts.c
+@@ -256,7 +256,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr 
*sechdrs,
+               /* sort by type and symbol index */
+               sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
+ 
+-              if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
++              if (!module_init_layout_section(secstrings + dstsec->sh_name))
+                       core_plts += count_plts(syms, dstsec->sh_addr, rels,
+                                               numrels, s->sh_info);
+               else
+diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
+index 2e224435c0249..29569284f4016 100644
+--- a/arch/arm64/kernel/module-plts.c
++++ b/arch/arm64/kernel/module-plts.c
+@@ -7,6 +7,7 @@
+ #include <linux/ftrace.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
++#include <linux/moduleloader.h>
+ #include <linux/sort.h>
+ 
+ static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc,
+@@ -342,7 +343,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr 
*sechdrs,
+               if (nents)
+                       sort(rels, nents, sizeof(Elf64_Rela), cmp_rela, NULL);
+ 
+-              if (!str_has_prefix(secstrings + dstsec->sh_name, ".init"))
++              if (!module_init_layout_section(secstrings + dstsec->sh_name))
+                       core_plts += count_plts(syms, rels, numrels,
+                                               sechdrs[i].sh_info, dstsec);
+               else
+diff --git a/arch/mips/alchemy/common/dbdma.c 
b/arch/mips/alchemy/common/dbdma.c
+index e9ee9ab90a0c6..4ca2c28878e0f 100644
+--- a/arch/mips/alchemy/common/dbdma.c
++++ b/arch/mips/alchemy/common/dbdma.c
+@@ -30,7 +30,6 @@
+  *
+  */
+ 
+-#include <linux/dma-map-ops.h> /* for dma_default_coherent */
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+@@ -624,18 +623,17 @@ u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, 
int nbytes, u32 flags)
+               dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
+ 
+       /*
+-       * There is an erratum on certain Au1200/Au1550 revisions that could
+-       * result in "stale" data being DMA'ed. It has to do with the snoop
+-       * logic on the cache eviction buffer.  dma_default_coherent is set
+-       * to false on these parts.
++       * There is an errata on the Au1200/Au1550 parts that could result
++       * in "stale" data being DMA'ed. It has to do with the snoop logic on
++       * the cache eviction buffer.  DMA_NONCOHERENT is on by default for
++       * these parts. If it is fixed in the future, these dma_cache_inv will
++       * just be nothing more than empty macros. See io.h.
+        */
+-      if (!dma_default_coherent)
+-              dma_cache_wback_inv(KSEG0ADDR(buf), nbytes);
++      dma_cache_wback_inv((unsigned long)buf, nbytes);
+       dp->dscr_cmd0 |= DSCR_CMD0_V;   /* Let it rip */
+       wmb(); /* drain writebuffer */
+       dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
+       ctp->chan_ptr->ddma_dbell = 0;
+-      wmb(); /* force doorbell write out to dma engine */
+ 
+       /* Get next descriptor pointer. */
+       ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+@@ -687,18 +685,17 @@ u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, 
int nbytes, u32 flags)
+                         dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
+ #endif
+       /*
+-       * There is an erratum on certain Au1200/Au1550 revisions that could
+-       * result in "stale" data being DMA'ed. It has to do with the snoop
+-       * logic on the cache eviction buffer.  dma_default_coherent is set
+-       * to false on these parts.
++       * There is an errata on the Au1200/Au1550 parts that could result in
++       * "stale" data being DMA'ed. It has to do with the snoop logic on the
++       * cache eviction buffer.  DMA_NONCOHERENT is on by default for these
++       * parts. If it is fixed in the future, these dma_cache_inv will just
++       * be nothing more than empty macros. See io.h.
+        */
+-      if (!dma_default_coherent)
+-              dma_cache_inv(KSEG0ADDR(buf), nbytes);
++      dma_cache_inv((unsigned long)buf, nbytes);
+       dp->dscr_cmd0 |= DSCR_CMD0_V;   /* Let it rip */
+       wmb(); /* drain writebuffer */
+       dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
+       ctp->chan_ptr->ddma_dbell = 0;
+-      wmb(); /* force doorbell write out to dma engine */
+ 
+       /* Get next descriptor pointer. */
+       ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+diff --git a/drivers/bus/mhi/host/pci_generic.c 
b/drivers/bus/mhi/host/pci_generic.c
+index e3df838c3c80e..f5bee76ea0618 100644
+--- a/drivers/bus/mhi/host/pci_generic.c
++++ b/drivers/bus/mhi/host/pci_generic.c
+@@ -273,7 +273,7 @@ static int mhi_pci_probe(struct pci_dev *pdev, const 
struct pci_device_id *id)
+       mhi_cntrl_config = info->config;
+       mhi_cntrl->cntrl_dev = &pdev->dev;
+       mhi_cntrl->iova_start = 0;
+-      mhi_cntrl->iova_stop = DMA_BIT_MASK(info->dma_data_width);
++      mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
+       mhi_cntrl->fw_image = info->fw;
+       mhi_cntrl->edl_image = info->edl;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 3b4724d60868f..8445bb7ae06ab 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2155,7 +2155,6 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct 
amdgpu_device *adev,
+       amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
+ 
+       bo_va->ref_count = 1;
+-      bo_va->last_pt_update = dma_fence_get_stub();
+       INIT_LIST_HEAD(&bo_va->valids);
+       INIT_LIST_HEAD(&bo_va->invalids);
+ 
+@@ -2868,8 +2867,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct 
amdgpu_vm *vm,
+               vm->update_funcs = &amdgpu_vm_cpu_funcs;
+       else
+               vm->update_funcs = &amdgpu_vm_sdma_funcs;
+-
+-      vm->last_update = dma_fence_get_stub();
++      vm->last_update = NULL;
+       vm->last_unlocked = dma_fence_get_stub();
+ 
+       mutex_init(&vm->eviction_lock);
+@@ -3044,7 +3042,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, 
struct amdgpu_vm *vm,
+               vm->update_funcs = &amdgpu_vm_sdma_funcs;
+       }
+       dma_fence_put(vm->last_update);
+-      vm->last_update = dma_fence_get_stub();
++      vm->last_update = NULL;
+       vm->is_compute_context = true;
+ 
+       if (vm->pasid) {
+diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
+index 4fa67a8b22652..03476e205b7e3 100644
+--- a/include/linux/moduleloader.h
++++ b/include/linux/moduleloader.h
+@@ -39,6 +39,11 @@ bool module_init_section(const char *name);
+  */
+ bool module_exit_section(const char *name);
+ 
++/* Describes whether within_module_init() will consider this an init section
++ * or not. This behaviour changes with CONFIG_MODULE_UNLOAD.
++ */
++bool module_init_layout_section(const char *sname);
++
+ /*
+  * Apply the given relocation to the (simplified) ELF.  Return -error
+  * or 0.
+diff --git a/kernel/module.c b/kernel/module.c
+index 33d1dc6d4cd6a..82af3ef79135a 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -2280,7 +2280,7 @@ void *__symbol_get(const char *symbol)
+ }
+ EXPORT_SYMBOL_GPL(__symbol_get);
+ 
+-static bool module_init_layout_section(const char *sname)
++bool module_init_layout_section(const char *sname)
+ {
+ #ifndef CONFIG_MODULE_UNLOAD
+       if (module_exit_section(sname))
+diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
+index 23101ebbbe1ef..c5624ab0580c5 100644
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -874,7 +874,7 @@ reset_ipi:
+ static bool trc_inspect_reader(struct task_struct *t, void *arg)
+ {
+       int cpu = task_cpu(t);
+-      bool in_qs = false;
++      int nesting;
+       bool ofl = cpu_is_offline(cpu);
+ 
+       if (task_curr(t)) {
+@@ -894,18 +894,18 @@ static bool trc_inspect_reader(struct task_struct *t, 
void *arg)
+               n_heavy_reader_updates++;
+               if (ofl)
+                       n_heavy_reader_ofl_updates++;
+-              in_qs = true;
++              nesting = 0;
+       } else {
+               // The task is not running, so C-language access is safe.
+-              in_qs = likely(!t->trc_reader_nesting);
++              nesting = t->trc_reader_nesting;
+       }
+ 
+-      // Mark as checked so that the grace-period kthread will
+-      // remove it from the holdout list.
+-      t->trc_reader_checked = true;
+-
+-      if (in_qs)
+-              return true;  // Already in quiescent state, done!!!
++      // If not exiting a read-side critical section, mark as checked
++      // so that the grace-period kthread will remove it from the
++      // holdout list.
++      t->trc_reader_checked = nesting >= 0;
++      if (nesting <= 0)
++              return !nesting;  // If in QS, done, otherwise try again later.
+ 
+       // The task is in a read-side critical section, so set up its
+       // state so that it will awaken the grace-period kthread upon exit
+@@ -958,9 +958,11 @@ static void trc_wait_for_one_reader(struct task_struct *t,
+               if (smp_call_function_single(cpu, trc_read_check_handler, t, 
0)) {
+                       // Just in case there is some other reason for
+                       // failure than the target CPU being offline.
++                      WARN_ONCE(1, "%s():  smp_call_function_single() failed 
for CPU: %d\n",
++                                __func__, cpu);
+                       rcu_tasks_trace.n_ipis_fails++;
+                       per_cpu(trc_ipi_to_cpu, cpu) = false;
+-                      t->trc_ipi_to_cpu = cpu;
++                      t->trc_ipi_to_cpu = -1;
+               }
+       }
+ }
+@@ -1081,14 +1083,28 @@ static void check_all_holdout_tasks_trace(struct 
list_head *hop,
+       }
+ }
+ 
++static void rcu_tasks_trace_empty_fn(void *unused)
++{
++}
++
+ /* Wait for grace period to complete and provide ordering. */
+ static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
+ {
++      int cpu;
+       bool firstreport;
+       struct task_struct *g, *t;
+       LIST_HEAD(holdouts);
+       long ret;
+ 
++      // Wait for any lingering IPI handlers to complete.  Note that
++      // if a CPU has gone offline or transitioned to userspace in the
++      // meantime, all IPI handlers should have been drained beforehand.
++      // Yes, this assumes that CPUs process IPIs in order.  If that ever
++      // changes, there will need to be a recheck and/or timed wait.
++      for_each_online_cpu(cpu)
++              if (smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu)))
++                      smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, 
NULL, 1);
++
+       // Remove the safety count.
+       smp_mb__before_atomic();  // Order vs. earlier atomics
+       atomic_dec(&trc_n_readers_need_end);
+diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
+index 401c1f331cafa..07a284a18645a 100644
+--- a/kernel/rcu/tree_exp.h
++++ b/kernel/rcu/tree_exp.h
+@@ -507,7 +507,10 @@ static void synchronize_rcu_expedited_wait(void)
+                               if (rdp->rcu_forced_tick_exp)
+                                       continue;
+                               rdp->rcu_forced_tick_exp = true;
+-                              tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
++                              preempt_disable();
++                              if (cpu_online(cpu))
++                                      tick_dep_set_cpu(cpu, 
TICK_DEP_BIT_RCU_EXP);
++                              preempt_enable();
+                       }
+               }
+               j = READ_ONCE(jiffies_till_first_fqs);

Reply via email to