commit:     6634c77b73f4b19a229b8f59097874b23448bf12
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Sat Sep 20 05:26:19 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Sat Sep 20 05:26:19 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6634c77b

Linux patch 6.6.107

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |    4 +
 1106_linux-6.6.107.patch | 3614 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3618 insertions(+)

diff --git a/0000_README b/0000_README
index eae0f533..84cc5b9b 100644
--- a/0000_README
+++ b/0000_README
@@ -467,6 +467,10 @@ Patch:  1105_linux-6.6.106.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.6.106
 
+Patch:  1106_linux-6.6.107.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.6.107
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   
http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch
 Desc:   Enable link security restrictions by default.

diff --git a/1106_linux-6.6.107.patch b/1106_linux-6.6.107.patch
new file mode 100644
index 00000000..4e1e6afe
--- /dev/null
+++ b/1106_linux-6.6.107.patch
@@ -0,0 +1,3614 @@
+diff --git a/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml 
b/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml
+index 89c462653e2d33..8cc848ae11cb73 100644
+--- a/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml
++++ b/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml
+@@ -41,7 +41,7 @@ properties:
+           - const: dma_intr2
+ 
+   clocks:
+-    minItems: 1
++    maxItems: 1
+ 
+   clock-names:
+     const: sw_baud
+diff --git a/Documentation/networking/can.rst 
b/Documentation/networking/can.rst
+index d7e1ada905b2d3..3bdd1558381057 100644
+--- a/Documentation/networking/can.rst
++++ b/Documentation/networking/can.rst
+@@ -740,7 +740,7 @@ The broadcast manager sends responses to user space in the 
same form:
+             struct timeval ival1, ival2;    /* count and subsequent interval 
*/
+             canid_t can_id;                 /* unique can_id for task */
+             __u32 nframes;                  /* number of can_frames following 
*/
+-            struct can_frame frames[0];
++            struct can_frame frames[];
+     };
+ 
+ The aligned payload 'frames' uses the same basic CAN frame structure defined
+diff --git a/Makefile b/Makefile
+index b934846659eed0..9c9e272f48b879 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 6
+-SUBLEVEL = 106
++SUBLEVEL = 107
+ EXTRAVERSION =
+ NAME = Pinguïn Aangedreven
+ 
+diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h
+index 2ac955b51148f4..6b79287baecc00 100644
+--- a/arch/riscv/include/asm/compat.h
++++ b/arch/riscv/include/asm/compat.h
+@@ -9,7 +9,6 @@
+  */
+ #include <linux/types.h>
+ #include <linux/sched.h>
+-#include <linux/sched/task_stack.h>
+ #include <asm-generic/compat.h>
+ 
+ static inline int is_compat_task(void)
+diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
+index 65a66df5bb865e..771e1cb17540db 100644
+--- a/arch/s390/kernel/perf_cpum_cf.c
++++ b/arch/s390/kernel/perf_cpum_cf.c
+@@ -757,8 +757,6 @@ static int __hw_perf_event_init(struct perf_event *event, 
unsigned int type)
+               break;
+ 
+       case PERF_TYPE_HARDWARE:
+-              if (is_sampling_event(event))   /* No sampling support */
+-                      return -ENOENT;
+               ev = attr->config;
+               if (!attr->exclude_user && attr->exclude_kernel) {
+                       /*
+@@ -856,6 +854,8 @@ static int cpumf_pmu_event_init(struct perf_event *event)
+       unsigned int type = event->attr.type;
+       int err;
+ 
++      if (is_sampling_event(event))   /* No sampling support */
++              return err;
+       if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW)
+               err = __hw_perf_event_init(event, type);
+       else if (event->pmu->type == type)
+diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
+index c57d5df1abc603..0929d7fe7e2740 100644
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -500,10 +500,18 @@ SECTIONS
+ PROVIDE(__ref_stack_chk_guard = __stack_chk_guard);
+ 
+ /*
+- * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
++ * COMPILE_TEST kernels can be large - CONFIG_KASAN, for example, can cause
++ * this.  Let's assume that nobody will be running a COMPILE_TEST kernel and
++ * let's assert that fuller build coverage is more valuable than being able to
++ * run a COMPILE_TEST kernel.
++ */
++#ifndef CONFIG_COMPILE_TEST
++/*
++ * The ASSERT() sync to . is intentional, for binutils 2.14 compatibility:
+  */
+ . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
+          "kernel image bigger than KERNEL_IMAGE_SIZE");
++#endif
+ 
+ #ifdef CONFIG_X86_64
+ /*
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 288db351677222..2c0bc6a93ec3e8 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -791,10 +791,15 @@ void kvm_set_cpu_caps(void)
+               F(PERFMON_V2)
+       );
+ 
++      kvm_cpu_cap_check_and_set(X86_FEATURE_VERW_CLEAR);
++
+       kvm_cpu_cap_init_kvm_defined(CPUID_8000_0021_ECX,
+               F(TSA_SQ_NO) | F(TSA_L1_NO)
+       );
+ 
++      kvm_cpu_cap_check_and_set(X86_FEATURE_TSA_SQ_NO);
++      kvm_cpu_cap_check_and_set(X86_FEATURE_TSA_L1_NO);
++
+       /*
+        * Synthesize "LFENCE is serializing" into the AMD-defined entry in
+        * KVM's supported CPUID if the feature is reported as supported by the
+diff --git a/drivers/dma/dw/rzn1-dmamux.c b/drivers/dma/dw/rzn1-dmamux.c
+index 4fb8508419dbd8..deadf135681b67 100644
+--- a/drivers/dma/dw/rzn1-dmamux.c
++++ b/drivers/dma/dw/rzn1-dmamux.c
+@@ -48,12 +48,16 @@ static void *rzn1_dmamux_route_allocate(struct 
of_phandle_args *dma_spec,
+       u32 mask;
+       int ret;
+ 
+-      if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS)
+-              return ERR_PTR(-EINVAL);
++      if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS) {
++              ret = -EINVAL;
++              goto put_device;
++      }
+ 
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
+-      if (!map)
+-              return ERR_PTR(-ENOMEM);
++      if (!map) {
++              ret = -ENOMEM;
++              goto put_device;
++      }
+ 
+       chan = dma_spec->args[0];
+       map->req_idx = dma_spec->args[4];
+@@ -94,12 +98,15 @@ static void *rzn1_dmamux_route_allocate(struct 
of_phandle_args *dma_spec,
+       if (ret)
+               goto clear_bitmap;
+ 
++      put_device(&pdev->dev);
+       return map;
+ 
+ clear_bitmap:
+       clear_bit(map->req_idx, dmamux->used_chans);
+ free_map:
+       kfree(map);
++put_device:
++      put_device(&pdev->dev);
+ 
+       return ERR_PTR(ret);
+ }
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 92e86ae9db29d7..4b999c5802f4b2 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -179,27 +179,30 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+       idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, 
dev_to_node(dev));
+       if (!idxd->wq_enable_map) {
+               rc = -ENOMEM;
+-              goto err_bitmap;
++              goto err_free_wqs;
+       }
+ 
+       for (i = 0; i < idxd->max_wqs; i++) {
+               wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
+               if (!wq) {
+                       rc = -ENOMEM;
+-                      goto err;
++                      goto err_unwind;
+               }
+ 
+               idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ);
+               conf_dev = wq_confdev(wq);
+               wq->id = i;
+               wq->idxd = idxd;
+-              device_initialize(wq_confdev(wq));
++              device_initialize(conf_dev);
+               conf_dev->parent = idxd_confdev(idxd);
+               conf_dev->bus = &dsa_bus_type;
+               conf_dev->type = &idxd_wq_device_type;
+               rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
+-              if (rc < 0)
+-                      goto err;
++              if (rc < 0) {
++                      put_device(conf_dev);
++                      kfree(wq);
++                      goto err_unwind;
++              }
+ 
+               mutex_init(&wq->wq_lock);
+               init_waitqueue_head(&wq->err_queue);
+@@ -210,15 +213,20 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+               wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
+               wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, 
dev_to_node(dev));
+               if (!wq->wqcfg) {
++                      put_device(conf_dev);
++                      kfree(wq);
+                       rc = -ENOMEM;
+-                      goto err;
++                      goto err_unwind;
+               }
+ 
+               if (idxd->hw.wq_cap.op_config) {
+                       wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, 
GFP_KERNEL);
+                       if (!wq->opcap_bmap) {
++                              kfree(wq->wqcfg);
++                              put_device(conf_dev);
++                              kfree(wq);
+                               rc = -ENOMEM;
+-                              goto err_opcap_bmap;
++                              goto err_unwind;
+                       }
+                       bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, 
IDXD_MAX_OPCAP_BITS);
+               }
+@@ -229,13 +237,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ 
+       return 0;
+ 
+-err_opcap_bmap:
+-      kfree(wq->wqcfg);
+-
+-err:
+-      put_device(conf_dev);
+-      kfree(wq);
+-
++err_unwind:
+       while (--i >= 0) {
+               wq = idxd->wqs[i];
+               if (idxd->hw.wq_cap.op_config)
+@@ -244,11 +246,10 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+               conf_dev = wq_confdev(wq);
+               put_device(conf_dev);
+               kfree(wq);
+-
+       }
+       bitmap_free(idxd->wq_enable_map);
+ 
+-err_bitmap:
++err_free_wqs:
+       kfree(idxd->wqs);
+ 
+       return rc;
+@@ -904,10 +905,12 @@ static void idxd_remove(struct pci_dev *pdev)
+       device_unregister(idxd_confdev(idxd));
+       idxd_shutdown(pdev);
+       idxd_device_remove_debugfs(idxd);
+-      idxd_cleanup(idxd);
++      perfmon_pmu_remove(idxd);
++      idxd_cleanup_interrupts(idxd);
++      if (device_pasid_enabled(idxd))
++              idxd_disable_system_pasid(idxd);
+       pci_iounmap(pdev, idxd->reg_base);
+       put_device(idxd_confdev(idxd));
+-      idxd_free(idxd);
+       pci_disable_device(pdev);
+ }
+ 
+diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
+index 4c3eb972039d60..d5882c8537e52b 100644
+--- a/drivers/dma/qcom/bam_dma.c
++++ b/drivers/dma/qcom/bam_dma.c
+@@ -1283,13 +1283,17 @@ static int bam_dma_probe(struct platform_device *pdev)
+       if (!bdev->bamclk) {
+               ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
+                                          &bdev->num_channels);
+-              if (ret)
++              if (ret) {
+                       dev_err(bdev->dev, "num-channels unspecified in dt\n");
++                      return ret;
++              }
+ 
+               ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees",
+                                          &bdev->num_ees);
+-              if (ret)
++              if (ret) {
+                       dev_err(bdev->dev, "num-ees unspecified in dt\n");
++                      return ret;
++              }
+       }
+ 
+       ret = clk_prepare_enable(bdev->bamclk);
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index c0fa5413246756..f7ddf588b7f9b7 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -2063,8 +2063,8 @@ static int edma_setup_from_hw(struct device *dev, struct 
edma_soc_info *pdata,
+        * priority. So Q0 is the highest priority queue and the last queue has
+        * the lowest priority.
+        */
+-      queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
+-                                        GFP_KERNEL);
++      queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1,
++                                        sizeof(*queue_priority_map), 
GFP_KERNEL);
+       if (!queue_priority_map)
+               return -ENOMEM;
+ 
+diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
+index 54ec894150939a..233e58278943e0 100644
+--- a/drivers/edac/altera_edac.c
++++ b/drivers/edac/altera_edac.c
+@@ -127,7 +127,6 @@ static ssize_t altr_sdr_mc_err_inject_write(struct file 
*file,
+ 
+       ptemp = dma_alloc_coherent(mci->pdev, 16, &dma_handle, GFP_KERNEL);
+       if (!ptemp) {
+-              dma_free_coherent(mci->pdev, 16, ptemp, dma_handle);
+               edac_printk(KERN_ERR, EDAC_MC,
+                           "Inject: Buffer Allocation error\n");
+               return -ENOMEM;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index f44b303ae287a7..eebac2e1a6c75b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -396,9 +396,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
+       dma_fence_put(ring->vmid_wait);
+       ring->vmid_wait = NULL;
+       ring->me = 0;
+-
+-      if (!ring->is_mes_queue)
+-              ring->adev->rings[ring->idx] = NULL;
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+index a61ecefdafc512..710328f12194d3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+@@ -1765,15 +1765,19 @@ static int vcn_v3_0_limit_sched(struct 
amdgpu_cs_parser *p,
+                               struct amdgpu_job *job)
+ {
+       struct drm_gpu_scheduler **scheds;
+-
+-      /* The create msg must be in the first IB submitted */
+-      if (atomic_read(&job->base.entity->fence_seq))
+-              return -EINVAL;
++      struct dma_fence *fence;
+ 
+       /* if VCN0 is harvested, we can't support AV1 */
+       if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
+               return -EINVAL;
+ 
++      /* wait for all jobs to finish before switching to instance 0 */
++      fence = amdgpu_ctx_get_fence(p->ctx, job->base.entity, ~0ull);
++      if (fence) {
++              dma_fence_wait(fence, false);
++              dma_fence_put(fence);
++      }
++
+       scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
+               [AMDGPU_RING_PRIO_DEFAULT].sched;
+       drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+index 29164289c5f3e1..43249e9f66d74d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+@@ -1644,15 +1644,19 @@ static int vcn_v4_0_limit_sched(struct 
amdgpu_cs_parser *p,
+                               struct amdgpu_job *job)
+ {
+       struct drm_gpu_scheduler **scheds;
+-
+-      /* The create msg must be in the first IB submitted */
+-      if (atomic_read(&job->base.entity->fence_seq))
+-              return -EINVAL;
++      struct dma_fence *fence;
+ 
+       /* if VCN0 is harvested, we can't support AV1 */
+       if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
+               return -EINVAL;
+ 
++      /* wait for all jobs to finish before switching to instance 0 */
++      fence = amdgpu_ctx_get_fence(p->ctx, job->base.entity, ~0ull);
++      if (fence) {
++              dma_fence_wait(fence, false);
++              dma_fence_put(fence);
++      }
++
+       scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
+               [AMDGPU_RING_PRIO_0].sched;
+       drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
+@@ -1743,22 +1747,16 @@ static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser 
*p, struct amdgpu_job *job,
+ 
+ #define RADEON_VCN_ENGINE_TYPE_ENCODE                 (0x00000002)
+ #define RADEON_VCN_ENGINE_TYPE_DECODE                 (0x00000003)
+-
+ #define RADEON_VCN_ENGINE_INFO                                (0x30000001)
+-#define RADEON_VCN_ENGINE_INFO_MAX_OFFSET             16
+-
+ #define RENCODE_ENCODE_STANDARD_AV1                   2
+ #define RENCODE_IB_PARAM_SESSION_INIT                 0x00000003
+-#define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET      64
+ 
+-/* return the offset in ib if id is found, -1 otherwise
+- * to speed up the searching we only search upto max_offset
+- */
+-static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int 
max_offset)
++/* return the offset in ib if id is found, -1 otherwise */
++static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int 
start)
+ {
+       int i;
+ 
+-      for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i 
+= ib->ptr[i]/4) {
++      for (i = start; i < ib->length_dw && ib->ptr[i] >= 8; i += ib->ptr[i] / 
4) {
+               if (ib->ptr[i + 1] == id)
+                       return i;
+       }
+@@ -1773,33 +1771,29 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct 
amdgpu_cs_parser *p,
+       struct amdgpu_vcn_decode_buffer *decode_buffer;
+       uint64_t addr;
+       uint32_t val;
+-      int idx;
++      int idx = 0, sidx;
+ 
+       /* The first instance can decode anything */
+       if (!ring->me)
+               return 0;
+ 
+-      /* RADEON_VCN_ENGINE_INFO is at the top of ib block */
+-      idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO,
+-                      RADEON_VCN_ENGINE_INFO_MAX_OFFSET);
+-      if (idx < 0) /* engine info is missing */
+-              return 0;
+-
+-      val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */
+-      if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
+-              decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx 
+ 6];
+-
+-              if (!(decode_buffer->valid_buf_flag  & 0x1))
+-                      return 0;
+-
+-              addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
+-                      decode_buffer->msg_buffer_address_lo;
+-              return vcn_v4_0_dec_msg(p, job, addr);
+-      } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
+-              idx = vcn_v4_0_enc_find_ib_param(ib, 
RENCODE_IB_PARAM_SESSION_INIT,
+-                      RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET);
+-              if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1)
+-                      return vcn_v4_0_limit_sched(p, job);
++      while ((idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO, 
idx)) >= 0) {
++              val = amdgpu_ib_get_value(ib, idx + 2); /* 
RADEON_VCN_ENGINE_TYPE */
++              if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
++                      decode_buffer = (struct amdgpu_vcn_decode_buffer 
*)&ib->ptr[idx + 6];
++
++                      if (!(decode_buffer->valid_buf_flag & 0x1))
++                              return 0;
++
++                      addr = ((u64)decode_buffer->msg_buffer_address_hi) << 
32 |
++                              decode_buffer->msg_buffer_address_lo;
++                      return vcn_v4_0_dec_msg(p, job, addr);
++              } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
++                      sidx = vcn_v4_0_enc_find_ib_param(ib, 
RENCODE_IB_PARAM_SESSION_INIT, idx);
++                      if (sidx >= 0 && ib->ptr[sidx + 2] == 
RENCODE_ENCODE_STANDARD_AV1)
++                              return vcn_v4_0_limit_sched(p, job);
++              }
++              idx += ib->ptr[idx] / 4;
+       }
+       return 0;
+ }
+diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c 
b/drivers/gpu/drm/i915/display/intel_display_power.c
+index 9e01054c243001..8beeda3439818e 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_power.c
++++ b/drivers/gpu/drm/i915/display/intel_display_power.c
+@@ -1170,7 +1170,7 @@ static void icl_mbus_init(struct drm_i915_private 
*dev_priv)
+       if (DISPLAY_VER(dev_priv) == 12)
+               abox_regs |= BIT(0);
+ 
+-      for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
++      for_each_set_bit(i, &abox_regs, BITS_PER_TYPE(abox_regs))
+               intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
+ }
+ 
+@@ -1623,11 +1623,11 @@ static void tgl_bw_buddy_init(struct drm_i915_private 
*dev_priv)
+       if (table[config].page_mask == 0) {
+               drm_dbg(&dev_priv->drm,
+                       "Unknown memory configuration; disabling address buddy 
logic.\n");
+-              for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
++              for_each_set_bit(i, &abox_mask, BITS_PER_TYPE(abox_mask))
+                       intel_de_write(dev_priv, BW_BUDDY_CTL(i),
+                                      BW_BUDDY_DISABLE);
+       } else {
+-              for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
++              for_each_set_bit(i, &abox_mask, BITS_PER_TYPE(abox_mask)) {
+                       intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
+                                      table[config].page_mask);
+ 
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c 
b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+index bfa1070a5f08e2..f1f73c1e7b5cbf 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -365,11 +365,11 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
+ 
+               of_id = of_match_node(mtk_drm_of_ids, node);
+               if (!of_id)
+-                      goto next_put_node;
++                      continue;
+ 
+               pdev = of_find_device_by_node(node);
+               if (!pdev)
+-                      goto next_put_node;
++                      continue;
+ 
+               drm_dev = device_find_child(&pdev->dev, NULL, mtk_drm_match);
+               if (!drm_dev)
+@@ -395,11 +395,10 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
+ next_put_device_pdev_dev:
+               put_device(&pdev->dev);
+ 
+-next_put_node:
+-              of_node_put(node);
+-
+-              if (cnt == MAX_CRTC)
++              if (cnt == MAX_CRTC) {
++                      of_node_put(node);
+                       break;
++              }
+       }
+ 
+       if (drm_priv->data->mmsys_dev_num == cnt) {
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 18c04f5e41d9c5..89fdc75cdcfa54 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -1051,7 +1051,7 @@ static const struct pci_device_id i801_ids[] = {
+       { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_P_SMBUS,           FEATURES_ICH5 | 
FEATURE_TCO_CNL) },
+       { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_SOC_S_SMBUS,       FEATURES_ICH5 | 
FEATURE_TCO_CNL) },
+       { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS,       FEATURES_ICH5 | 
FEATURE_TCO_CNL) },
+-      { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS,            FEATURES_ICH5 | 
FEATURE_TCO_CNL) },
++      { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS,            FEATURES_ICH5)  
                 },
+       { PCI_DEVICE_DATA(INTEL, ARROW_LAKE_H_SMBUS,            FEATURES_ICH5 | 
FEATURE_TCO_CNL) },
+       { PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_H_SMBUS,          FEATURES_ICH5 | 
FEATURE_TCO_CNL) },
+       { PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_P_SMBUS,          FEATURES_ICH5 | 
FEATURE_TCO_CNL) },
+diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
+index ce7e977cc8a7a1..eb0e0b37eb41b6 100644
+--- a/drivers/input/misc/iqs7222.c
++++ b/drivers/input/misc/iqs7222.c
+@@ -2430,6 +2430,9 @@ static int iqs7222_parse_chan(struct iqs7222_private 
*iqs7222,
+               if (error)
+                       return error;
+ 
++              if (!iqs7222->kp_type[chan_index][i])
++                      continue;
++
+               if (!dev_desc->event_offset)
+                       continue;
+ 
+diff --git a/drivers/input/serio/i8042-acpipnpio.h 
b/drivers/input/serio/i8042-acpipnpio.h
+index 8813db7eec3978..630cdd5a132831 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -1155,6 +1155,20 @@ static const struct dmi_system_id 
i8042_dmi_quirk_table[] __initconst = {
+               .driver_data = (void *)(SERIO_QUIRK_NOMUX | 
SERIO_QUIRK_RESET_ALWAYS |
+                                       SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+       },
++      {
++              .matches = {
++                      DMI_MATCH(DMI_BOARD_NAME, "XxHP4NAx"),
++              },
++              .driver_data = (void *)(SERIO_QUIRK_NOMUX | 
SERIO_QUIRK_RESET_ALWAYS |
++                                      SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++      },
++      {
++              .matches = {
++                      DMI_MATCH(DMI_BOARD_NAME, "XxKK4NAx_XxSP4NAx"),
++              },
++              .driver_data = (void *)(SERIO_QUIRK_NOMUX | 
SERIO_QUIRK_RESET_ALWAYS |
++                                      SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++      },
+       /*
+        * A lot of modern Clevo barebones have touchpad and/or keyboard issues
+        * after suspend fixable with the forcenorestore quirk.
+diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
+index 2f9c8582f9401a..db40008f31cf1b 100644
+--- a/drivers/media/i2c/imx214.c
++++ b/drivers/media/i2c/imx214.c
+@@ -20,7 +20,9 @@
+ #include <media/v4l2-subdev.h>
+ 
+ #define IMX214_DEFAULT_CLK_FREQ       24000000
+-#define IMX214_DEFAULT_LINK_FREQ 480000000
++#define IMX214_DEFAULT_LINK_FREQ      600000000
++/* Keep wrong link frequency for backward compatibility */
++#define IMX214_DEFAULT_LINK_FREQ_LEGACY       480000000
+ #define IMX214_DEFAULT_PIXEL_RATE ((IMX214_DEFAULT_LINK_FREQ * 8LL) / 10)
+ #define IMX214_FPS 30
+ #define IMX214_MBUS_CODE MEDIA_BUS_FMT_SRGGB10_1X10
+@@ -892,17 +894,26 @@ static int imx214_parse_fwnode(struct device *dev)
+               goto done;
+       }
+ 
+-      for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++)
++      if (bus_cfg.nr_of_link_frequencies != 1)
++              dev_warn(dev, "Only one link-frequency supported, please review 
your DT. Continuing anyway\n");
++
++      for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++) {
+               if (bus_cfg.link_frequencies[i] == IMX214_DEFAULT_LINK_FREQ)
+                       break;
+-
+-      if (i == bus_cfg.nr_of_link_frequencies) {
+-              dev_err(dev, "link-frequencies %d not supported, Please review 
your DT\n",
+-                      IMX214_DEFAULT_LINK_FREQ);
+-              ret = -EINVAL;
+-              goto done;
++              if (bus_cfg.link_frequencies[i] ==
++                  IMX214_DEFAULT_LINK_FREQ_LEGACY) {
++                      dev_warn(dev,
++                               "link-frequencies %d not supported, please 
review your DT. Continuing anyway\n",
++                               IMX214_DEFAULT_LINK_FREQ);
++                      break;
++              }
+       }
+ 
++      if (i == bus_cfg.nr_of_link_frequencies)
++              ret = dev_err_probe(dev, -EINVAL,
++                                  "link-frequencies %d not supported, please 
review your DT\n",
++                                  IMX214_DEFAULT_LINK_FREQ);
++
+ done:
+       v4l2_fwnode_endpoint_free(&bus_cfg);
+       fwnode_handle_put(endpoint);
+diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c 
b/drivers/mtd/nand/raw/atmel/nand-controller.c
+index d4fd1302008ebd..c5aff27ec4a895 100644
+--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
++++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
+@@ -1378,13 +1378,23 @@ static int atmel_smc_nand_prepare_smcconf(struct 
atmel_nand *nand,
+               return ret;
+ 
+       /*
+-       * The write cycle timing is directly matching tWC, but is also
++       * Read setup timing depends on the operation done on the NAND:
++       *
++       * NRD_SETUP = max(tAR, tCLR)
++       */
++      timeps = max(conf->timings.sdr.tAR_min, conf->timings.sdr.tCLR_min);
++      ncycles = DIV_ROUND_UP(timeps, mckperiodps);
++      totalcycles += ncycles;
++      ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NRD_SHIFT, 
ncycles);
++      if (ret)
++              return ret;
++
++      /*
++       * The read cycle timing is directly matching tRC, but is also
+        * dependent on the setup and hold timings we calculated earlier,
+        * which gives:
+        *
+-       * NRD_CYCLE = max(tRC, NRD_PULSE + NRD_HOLD)
+-       *
+-       * NRD_SETUP is always 0.
++       * NRD_CYCLE = max(tRC, NRD_SETUP + NRD_PULSE + NRD_HOLD)
+        */
+       ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps);
+       ncycles = max(totalcycles, ncycles);
+diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c 
b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+index 88811139aaf5b9..c7956298397173 100644
+--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
++++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+@@ -263,6 +263,7 @@ struct stm32_fmc2_nfc {
+       struct sg_table dma_data_sg;
+       struct sg_table dma_ecc_sg;
+       u8 *ecc_buf;
++      dma_addr_t dma_ecc_addr;
+       int dma_ecc_len;
+ 
+       struct completion complete;
+@@ -885,17 +886,10 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, 
const u8 *buf,
+ 
+       if (!write_data && !raw) {
+               /* Configure DMA ECC status */
+-              p = nfc->ecc_buf;
+               for_each_sg(nfc->dma_ecc_sg.sgl, sg, eccsteps, s) {
+-                      sg_set_buf(sg, p, nfc->dma_ecc_len);
+-                      p += nfc->dma_ecc_len;
+-              }
+-
+-              ret = dma_map_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
+-                               eccsteps, dma_data_dir);
+-              if (!ret) {
+-                      ret = -EIO;
+-                      goto err_unmap_data;
++                      sg_dma_address(sg) = nfc->dma_ecc_addr +
++                                           s * nfc->dma_ecc_len;
++                      sg_dma_len(sg) = nfc->dma_ecc_len;
+               }
+ 
+               desc_ecc = dmaengine_prep_slave_sg(nfc->dma_ecc_ch,
+@@ -904,7 +898,7 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, 
const u8 *buf,
+                                                  DMA_PREP_INTERRUPT);
+               if (!desc_ecc) {
+                       ret = -ENOMEM;
+-                      goto err_unmap_ecc;
++                      goto err_unmap_data;
+               }
+ 
+               reinit_completion(&nfc->dma_ecc_complete);
+@@ -912,7 +906,7 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, 
const u8 *buf,
+               desc_ecc->callback_param = &nfc->dma_ecc_complete;
+               ret = dma_submit_error(dmaengine_submit(desc_ecc));
+               if (ret)
+-                      goto err_unmap_ecc;
++                      goto err_unmap_data;
+ 
+               dma_async_issue_pending(nfc->dma_ecc_ch);
+       }
+@@ -932,7 +926,7 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, 
const u8 *buf,
+               if (!write_data && !raw)
+                       dmaengine_terminate_all(nfc->dma_ecc_ch);
+               ret = -ETIMEDOUT;
+-              goto err_unmap_ecc;
++              goto err_unmap_data;
+       }
+ 
+       /* Wait DMA data transfer completion */
+@@ -952,11 +946,6 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, 
const u8 *buf,
+               }
+       }
+ 
+-err_unmap_ecc:
+-      if (!write_data && !raw)
+-              dma_unmap_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
+-                           eccsteps, dma_data_dir);
+-
+ err_unmap_data:
+       dma_unmap_sg(nfc->dev, nfc->dma_data_sg.sgl, eccsteps, dma_data_dir);
+ 
+@@ -979,9 +968,21 @@ static int stm32_fmc2_nfc_seq_write(struct nand_chip 
*chip, const u8 *buf,
+ 
+       /* Write oob */
+       if (oob_required) {
+-              ret = nand_change_write_column_op(chip, mtd->writesize,
+-                                                chip->oob_poi, mtd->oobsize,
+-                                                false);
++              unsigned int offset_in_page = mtd->writesize;
++              const void *buf = chip->oob_poi;
++              unsigned int len = mtd->oobsize;
++
++              if (!raw) {
++                      struct mtd_oob_region oob_free;
++
++                      mtd_ooblayout_free(mtd, 0, &oob_free);
++                      offset_in_page += oob_free.offset;
++                      buf += oob_free.offset;
++                      len = oob_free.length;
++              }
++
++              ret = nand_change_write_column_op(chip, offset_in_page,
++                                                buf, len, false);
+               if (ret)
+                       return ret;
+       }
+@@ -1582,7 +1583,8 @@ static int stm32_fmc2_nfc_dma_setup(struct 
stm32_fmc2_nfc *nfc)
+               return ret;
+ 
+       /* Allocate a buffer to store ECC status registers */
+-      nfc->ecc_buf = devm_kzalloc(nfc->dev, FMC2_MAX_ECC_BUF_LEN, GFP_KERNEL);
++      nfc->ecc_buf = dmam_alloc_coherent(nfc->dev, FMC2_MAX_ECC_BUF_LEN,
++                                         &nfc->dma_ecc_addr, GFP_KERNEL);
+       if (!nfc->ecc_buf)
+               return -ENOMEM;
+ 
+diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
+index abe58f10304336..57d1209134f11b 100644
+--- a/drivers/net/can/xilinx_can.c
++++ b/drivers/net/can/xilinx_can.c
+@@ -628,14 +628,6 @@ static void xcan_write_frame(struct net_device *ndev, 
struct sk_buff *skb,
+               dlc |= XCAN_DLCR_EDL_MASK;
+       }
+ 
+-      if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
+-          (priv->devtype.flags & XCAN_FLAG_TXFEMP))
+-              can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
+-      else
+-              can_put_echo_skb(skb, ndev, 0, 0);
+-
+-      priv->tx_head++;
+-
+       priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
+       /* If the CAN frame is RTR frame this write triggers transmission
+        * (not on CAN FD)
+@@ -668,6 +660,14 @@ static void xcan_write_frame(struct net_device *ndev, 
struct sk_buff *skb,
+                                       data[1]);
+               }
+       }
++
++      if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
++          (priv->devtype.flags & XCAN_FLAG_TXFEMP))
++              can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
++      else
++              can_put_echo_skb(skb, ndev, 0, 0);
++
++      priv->tx_head++;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/freescale/fec_main.c 
b/drivers/net/ethernet/freescale/fec_main.c
+index 2a8b5429df5957..8352d9b6469f2a 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -2300,7 +2300,8 @@ static void fec_enet_phy_reset_after_clk_enable(struct 
net_device *ndev)
+                */
+               phy_dev = of_phy_find_device(fep->phy_node);
+               phy_reset_after_clk_enable(phy_dev);
+-              put_device(&phy_dev->mdio.dev);
++              if (phy_dev)
++                      put_device(&phy_dev->mdio.dev);
+       }
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c 
b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index b749aa3e783ffe..72869336e3a9a9 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -4210,7 +4210,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi 
*vsi, char *basename)
+               irq_num = pf->msix_entries[base + vector].vector;
+               irq_set_affinity_notifier(irq_num, NULL);
+               irq_update_affinity_hint(irq_num, NULL);
+-              free_irq(irq_num, &vsi->q_vectors[vector]);
++              free_irq(irq_num, vsi->q_vectors[vector]);
+       }
+       return err;
+ }
+diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c 
b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+index 92b2be06a6e930..f11cba65e5d85e 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+@@ -2081,11 +2081,8 @@ static void igb_diag_test(struct net_device *netdev,
+       } else {
+               dev_info(&adapter->pdev->dev, "online testing starting\n");
+ 
+-              /* PHY is powered down when interface is down */
+-              if (if_running && igb_link_test(adapter, &data[TEST_LINK]))
++              if (igb_link_test(adapter, &data[TEST_LINK]))
+                       eth_test->flags |= ETH_TEST_FL_FAILED;
+-              else
+-                      data[TEST_LINK] = 0;
+ 
+               /* Online tests aren't run; pass by default */
+               data[TEST_REG] = 0;
+diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
+index e02706b7cc1ed6..f1fac89721ed93 100644
+--- a/drivers/net/phy/mdio_bus.c
++++ b/drivers/net/phy/mdio_bus.c
+@@ -99,6 +99,7 @@ int mdiobus_unregister_device(struct mdio_device *mdiodev)
+       if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev)
+               return -EINVAL;
+ 
++      gpiod_put(mdiodev->reset_gpio);
+       reset_control_put(mdiodev->reset_ctrl);
+ 
+       mdiodev->bus->mdio_map[mdiodev->addr] = NULL;
+@@ -775,9 +776,6 @@ void mdiobus_unregister(struct mii_bus *bus)
+               if (!mdiodev)
+                       continue;
+ 
+-              if (mdiodev->reset_gpio)
+-                      gpiod_put(mdiodev->reset_gpio);
+-
+               mdiodev->device_remove(mdiodev);
+               mdiodev->device_free(mdiodev);
+       }
+diff --git a/drivers/phy/tegra/xusb-tegra210.c 
b/drivers/phy/tegra/xusb-tegra210.c
+index ebc8a7e21a3181..3409924498e9cf 100644
+--- a/drivers/phy/tegra/xusb-tegra210.c
++++ b/drivers/phy/tegra/xusb-tegra210.c
+@@ -3164,18 +3164,22 @@ tegra210_xusb_padctl_probe(struct device *dev,
+       }
+ 
+       pdev = of_find_device_by_node(np);
++      of_node_put(np);
+       if (!pdev) {
+               dev_warn(dev, "PMC device is not available\n");
+               goto out;
+       }
+ 
+-      if (!platform_get_drvdata(pdev))
++      if (!platform_get_drvdata(pdev)) {
++              put_device(&pdev->dev);
+               return ERR_PTR(-EPROBE_DEFER);
++      }
+ 
+       padctl->regmap = dev_get_regmap(&pdev->dev, "usb_sleepwalk");
+       if (!padctl->regmap)
+               dev_info(dev, "failed to find PMC regmap\n");
+ 
++      put_device(&pdev->dev);
+ out:
+       return &padctl->base;
+ }
+diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
+index 507e1552db5e83..3127f3702c3ae3 100644
+--- a/drivers/phy/ti/phy-ti-pipe3.c
++++ b/drivers/phy/ti/phy-ti-pipe3.c
+@@ -666,12 +666,20 @@ static int ti_pipe3_get_clk(struct ti_pipe3 *phy)
+       return 0;
+ }
+ 
++static void ti_pipe3_put_device(void *_dev)
++{
++      struct device *dev = _dev;
++
++      put_device(dev);
++}
++
+ static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy)
+ {
+       struct device *dev = phy->dev;
+       struct device_node *node = dev->of_node;
+       struct device_node *control_node;
+       struct platform_device *control_pdev;
++      int ret;
+ 
+       phy->phy_power_syscon = syscon_regmap_lookup_by_phandle(node,
+                                                       "syscon-phy-power");
+@@ -703,6 +711,11 @@ static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy)
+               }
+ 
+               phy->control_dev = &control_pdev->dev;
++
++              ret = devm_add_action_or_reset(dev, ti_pipe3_put_device,
++                                             phy->control_dev);
++              if (ret)
++                      return ret;
+       }
+ 
+       if (phy->mode == PIPE3_MODE_PCIE) {
+diff --git a/drivers/regulator/sy7636a-regulator.c 
b/drivers/regulator/sy7636a-regulator.c
+index d1e7ba1fb3e1af..27e3d939b7bb9e 100644
+--- a/drivers/regulator/sy7636a-regulator.c
++++ b/drivers/regulator/sy7636a-regulator.c
+@@ -83,9 +83,11 @@ static int sy7636a_regulator_probe(struct platform_device 
*pdev)
+       if (!regmap)
+               return -EPROBE_DEFER;
+ 
+-      gdp = devm_gpiod_get(pdev->dev.parent, "epd-pwr-good", GPIOD_IN);
++      device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
++
++      gdp = devm_gpiod_get(&pdev->dev, "epd-pwr-good", GPIOD_IN);
+       if (IS_ERR(gdp)) {
+-              dev_err(pdev->dev.parent, "Power good GPIO fault %ld\n", 
PTR_ERR(gdp));
++              dev_err(&pdev->dev, "Power good GPIO fault %ld\n", 
PTR_ERR(gdp));
+               return PTR_ERR(gdp);
+       }
+ 
+@@ -105,7 +107,6 @@ static int sy7636a_regulator_probe(struct platform_device 
*pdev)
+       }
+ 
+       config.dev = &pdev->dev;
+-      config.dev->of_node = pdev->dev.parent->of_node;
+       config.regmap = regmap;
+ 
+       rdev = devm_regulator_register(&pdev->dev, &desc, &config);
+diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
+index 959fae54ca394b..4c252bbbf8e276 100644
+--- a/drivers/tty/hvc/hvc_console.c
++++ b/drivers/tty/hvc/hvc_console.c
+@@ -543,10 +543,10 @@ static ssize_t hvc_write(struct tty_struct *tty, const 
u8 *buf, size_t count)
+       }
+ 
+       /*
+-       * Racy, but harmless, kick thread if there is still pending data.
++       * Kick thread to flush if there's still pending data
++       * or to wakeup the write queue.
+        */
+-      if (hp->n_outbuf)
+-              hvc_kick();
++      hvc_kick();
+ 
+       return written;
+ }
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index 8a2ce2ca6b394a..66a88bba8f15b8 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -1163,17 +1163,6 @@ static int sc16is7xx_startup(struct uart_port *port)
+       sc16is7xx_port_write(port, SC16IS7XX_FCR_REG,
+                            SC16IS7XX_FCR_FIFO_BIT);
+ 
+-      /* Enable EFR */
+-      sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
+-                           SC16IS7XX_LCR_CONF_MODE_B);
+-
+-      regcache_cache_bypass(one->regmap, true);
+-
+-      /* Enable write access to enhanced features and internal clock div */
+-      sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
+-                            SC16IS7XX_EFR_ENABLE_BIT,
+-                            SC16IS7XX_EFR_ENABLE_BIT);
+-
+       /* Enable TCR/TLR */
+       sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
+                             SC16IS7XX_MCR_TCRTLR_BIT,
+@@ -1185,7 +1174,8 @@ static int sc16is7xx_startup(struct uart_port *port)
+                            SC16IS7XX_TCR_RX_RESUME(24) |
+                            SC16IS7XX_TCR_RX_HALT(48));
+ 
+-      regcache_cache_bypass(one->regmap, false);
++      /* Disable TCR/TLR access */
++      sc16is7xx_port_update(port, SC16IS7XX_MCR_REG, 
SC16IS7XX_MCR_TCRTLR_BIT, 0);
+ 
+       /* Now, initialize the UART */
+       sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, SC16IS7XX_LCR_WORD_LEN_8);
+diff --git a/drivers/usb/gadget/function/f_midi2.c 
b/drivers/usb/gadget/function/f_midi2.c
+index 90536f47906c33..d7ed50ff380cf4 100644
+--- a/drivers/usb/gadget/function/f_midi2.c
++++ b/drivers/usb/gadget/function/f_midi2.c
+@@ -1601,6 +1601,7 @@ static int f_midi2_create_card(struct f_midi2 *midi2)
+                       strscpy(fb->info.name, ump_fb_name(b),
+                               sizeof(fb->info.name));
+               }
++              snd_ump_update_group_attrs(ump);
+       }
+ 
+       for (i = 0; i < midi2->num_eps; i++) {
+@@ -1738,9 +1739,12 @@ static int f_midi2_create_usb_configs(struct f_midi2 
*midi2,
+       case USB_SPEED_HIGH:
+               midi2_midi1_ep_out_desc.wMaxPacketSize = cpu_to_le16(512);
+               midi2_midi1_ep_in_desc.wMaxPacketSize = cpu_to_le16(512);
+-              for (i = 0; i < midi2->num_eps; i++)
++              for (i = 0; i < midi2->num_eps; i++) {
+                       midi2_midi2_ep_out_desc[i].wMaxPacketSize =
+                               cpu_to_le16(512);
++                      midi2_midi2_ep_in_desc[i].wMaxPacketSize =
++                              cpu_to_le16(512);
++              }
+               fallthrough;
+       case USB_SPEED_FULL:
+               midi1_in_eps = midi2_midi1_ep_in_descs;
+@@ -1749,9 +1753,12 @@ static int f_midi2_create_usb_configs(struct f_midi2 
*midi2,
+       case USB_SPEED_SUPER:
+               midi2_midi1_ep_out_desc.wMaxPacketSize = cpu_to_le16(1024);
+               midi2_midi1_ep_in_desc.wMaxPacketSize = cpu_to_le16(1024);
+-              for (i = 0; i < midi2->num_eps; i++)
++              for (i = 0; i < midi2->num_eps; i++) {
+                       midi2_midi2_ep_out_desc[i].wMaxPacketSize =
+                               cpu_to_le16(1024);
++                      midi2_midi2_ep_in_desc[i].wMaxPacketSize =
++                              cpu_to_le16(1024);
++              }
+               midi1_in_eps = midi2_midi1_ep_in_ss_descs;
+               midi1_out_eps = midi2_midi1_ep_out_ss_descs;
+               break;
+diff --git a/drivers/usb/gadget/udc/dummy_hcd.c 
b/drivers/usb/gadget/udc/dummy_hcd.c
+index d5d89fadde433f..a06f56c08e19d0 100644
+--- a/drivers/usb/gadget/udc/dummy_hcd.c
++++ b/drivers/usb/gadget/udc/dummy_hcd.c
+@@ -764,8 +764,7 @@ static int dummy_dequeue(struct usb_ep *_ep, struct 
usb_request *_req)
+       if (!dum->driver)
+               return -ESHUTDOWN;
+ 
+-      local_irq_save(flags);
+-      spin_lock(&dum->lock);
++      spin_lock_irqsave(&dum->lock, flags);
+       list_for_each_entry(iter, &ep->queue, queue) {
+               if (&iter->req != _req)
+                       continue;
+@@ -775,15 +774,16 @@ static int dummy_dequeue(struct usb_ep *_ep, struct 
usb_request *_req)
+               retval = 0;
+               break;
+       }
+-      spin_unlock(&dum->lock);
+ 
+       if (retval == 0) {
+               dev_dbg(udc_dev(dum),
+                               "dequeued req %p from %s, len %d buf %p\n",
+                               req, _ep->name, _req->length, _req->buf);
++              spin_unlock(&dum->lock);
+               usb_gadget_giveback_request(_ep, _req);
++              spin_lock(&dum->lock);
+       }
+-      local_irq_restore(flags);
++      spin_unlock_irqrestore(&dum->lock, flags);
+       return retval;
+ }
+ 
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 04718048b74bd9..621f12c11cbc2b 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -945,7 +945,7 @@ static void xhci_free_virt_devices_depth_first(struct 
xhci_hcd *xhci, int slot_i
+ out:
+       /* we are now at a leaf device */
+       xhci_debugfs_remove_slot(xhci, slot_id);
+-      xhci_free_virt_device(xhci, vdev, slot_id);
++      xhci_free_virt_device(xhci, xhci->devs[slot_id], slot_id);
+ }
+ 
+ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index ac72b04c997bfb..ef546f660b9927 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1322,7 +1322,18 @@ static const struct usb_device_id option_ids[] = {
+        .driver_info = NCTRL(0) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1033, 0xff),    /* 
Telit LE910C1-EUX (ECM) */
+        .driver_info = NCTRL(0) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1034, 0xff),    /* 
Telit LE910C4-WWX (rmnet) */
++       .driver_info = RSVD(2) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1035, 0xff) }, /* Telit 
LE910C4-WWX (ECM) */
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1036, 0xff) },  /* 
Telit LE910C4-WWX */
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1037, 0xff),    /* 
Telit LE910C4-WWX (rmnet) */
++       .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1038, 0xff),    /* 
Telit LE910C4-WWX (rmnet) */
++       .driver_info = NCTRL(0) | RSVD(3) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x103b, 0xff),    /* 
Telit LE910C4-WWX */
++       .driver_info = NCTRL(0) | NCTRL(1) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x103c, 0xff),    /* 
Telit LE910C4-WWX */
++       .driver_info = NCTRL(0) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
+         .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
+@@ -1369,6 +1380,12 @@ static const struct usb_device_id option_ids[] = {
+         .driver_info = NCTRL(0) | RSVD(1) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff),    /* 
Telit FN990A (PCIe) */
+         .driver_info = RSVD(0) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1077, 0xff),    /* 
Telit FN990A (rmnet + audio) */
++        .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1078, 0xff),    /* 
Telit FN990A (MBIM + audio) */
++        .driver_info = NCTRL(0) | RSVD(1) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1079, 0xff),    /* 
Telit FN990A (RNDIS + audio) */
++        .driver_info = NCTRL(2) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff),    /* 
Telit FE990A (rmnet) */
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff),    /* 
Telit FE990A (MBIM) */
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 48b06459bc485a..ccf94c5fbfdfd6 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -103,6 +103,25 @@ struct btrfs_bio_ctrl {
+       blk_opf_t opf;
+       btrfs_bio_end_io_t end_io_func;
+       struct writeback_control *wbc;
++      struct readahead_control *ractl;
++
++      /*
++       * The start offset of the last used extent map by a read operation.
++       *
++       * This is for proper compressed read merge.
++       * U64_MAX means we are starting the read and have made no progress yet.
++       *
++       * The current btrfs_bio_is_contig() only uses disk_bytenr as
++       * the condition to check if the read can be merged with previous
++       * bio, which is not correct. E.g. two file extents pointing to the
++       * same extent but with different offset.
++       *
++       * So here we need to do extra checks to only merge reads that are
++       * covered by the same extent map.
++       * Just extent_map::start will be enough, as they are unique
++       * inside the same inode.
++       */
++      u64 last_em_start;
+ };
+ 
+ static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
+@@ -952,6 +971,23 @@ __get_extent_map(struct inode *inode, struct page *page, 
size_t pg_offset,
+       }
+       return em;
+ }
++
++static void btrfs_readahead_expand(struct readahead_control *ractl,
++                                 const struct extent_map *em)
++{
++      const u64 ra_pos = readahead_pos(ractl);
++      const u64 ra_end = ra_pos + readahead_length(ractl);
++      const u64 em_end = em->start + em->ram_bytes;
++
++      /* No expansion for holes and inline extents. */
++      if (em->block_start > EXTENT_MAP_LAST_BYTE)
++              return;
++
++      ASSERT(em_end >= ra_pos);
++      if (em_end > ra_end)
++              readahead_expand(ractl, ra_pos, em_end - ra_pos);
++}
++
+ /*
+  * basic readpage implementation.  Locked extent state structs are inserted
+  * into the tree that are removed when the IO is done (by the end_io
+@@ -960,7 +996,7 @@ __get_extent_map(struct inode *inode, struct page *page, 
size_t pg_offset,
+  * return 0 on success, otherwise return error
+  */
+ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
+-                    struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
++                    struct btrfs_bio_ctrl *bio_ctrl)
+ {
+       struct inode *inode = page->mapping->host;
+       struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+@@ -1023,6 +1059,17 @@ static int btrfs_do_readpage(struct page *page, struct 
extent_map **em_cached,
+ 
+               iosize = min(extent_map_end(em) - cur, end - cur + 1);
+               iosize = ALIGN(iosize, blocksize);
++
++              /*
++               * Only expand readahead for extents which are already creating
++               * the pages anyway in add_ra_bio_pages, which is compressed
++               * extents in the non subpage case.
++               */
++              if (bio_ctrl->ractl &&
++                  !btrfs_is_subpage(fs_info, page) &&
++                  compress_type != BTRFS_COMPRESS_NONE)
++                      btrfs_readahead_expand(bio_ctrl->ractl, em);
++
+               if (compress_type != BTRFS_COMPRESS_NONE)
+                       disk_bytenr = em->block_start;
+               else
+@@ -1066,12 +1113,11 @@ static int btrfs_do_readpage(struct page *page, struct 
extent_map **em_cached,
+                * non-optimal behavior (submitting 2 bios for the same extent).
+                */
+               if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
+-                  prev_em_start && *prev_em_start != (u64)-1 &&
+-                  *prev_em_start != em->start)
++                  bio_ctrl->last_em_start != (u64)-1 &&
++                  bio_ctrl->last_em_start != em->start)
+                       force_bio_submit = true;
+ 
+-              if (prev_em_start)
+-                      *prev_em_start = em->start;
++              bio_ctrl->last_em_start = em->start;
+ 
+               free_extent_map(em);
+               em = NULL;
+@@ -1117,12 +1163,15 @@ int btrfs_read_folio(struct file *file, struct folio 
*folio)
+       struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
+       u64 start = page_offset(page);
+       u64 end = start + PAGE_SIZE - 1;
+-      struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
++      struct btrfs_bio_ctrl bio_ctrl = {
++              .opf = REQ_OP_READ,
++              .last_em_start = (u64)-1,
++      };
+       int ret;
+ 
+       btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
+ 
+-      ret = btrfs_do_readpage(page, NULL, &bio_ctrl, NULL);
++      ret = btrfs_do_readpage(page, NULL, &bio_ctrl);
+       /*
+        * If btrfs_do_readpage() failed we will want to submit the assembled
+        * bio to do the cleanup.
+@@ -1134,8 +1183,7 @@ int btrfs_read_folio(struct file *file, struct folio 
*folio)
+ static inline void contiguous_readpages(struct page *pages[], int nr_pages,
+                                       u64 start, u64 end,
+                                       struct extent_map **em_cached,
+-                                      struct btrfs_bio_ctrl *bio_ctrl,
+-                                      u64 *prev_em_start)
++                                      struct btrfs_bio_ctrl *bio_ctrl)
+ {
+       struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
+       int index;
+@@ -1143,8 +1191,7 @@ static inline void contiguous_readpages(struct page 
*pages[], int nr_pages,
+       btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
+ 
+       for (index = 0; index < nr_pages; index++) {
+-              btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
+-                                prev_em_start);
++              btrfs_do_readpage(pages[index], em_cached, bio_ctrl);
+               put_page(pages[index]);
+       }
+ }
+@@ -2224,10 +2271,13 @@ int extent_writepages(struct address_space *mapping,
+ 
+ void extent_readahead(struct readahead_control *rac)
+ {
+-      struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
++      struct btrfs_bio_ctrl bio_ctrl = {
++              .opf = REQ_OP_READ | REQ_RAHEAD,
++              .ractl = rac,
++              .last_em_start = (u64)-1,
++      };
+       struct page *pagepool[16];
+       struct extent_map *em_cached = NULL;
+-      u64 prev_em_start = (u64)-1;
+       int nr;
+ 
+       while ((nr = readahead_page_batch(rac, pagepool))) {
+@@ -2235,7 +2285,7 @@ void extent_readahead(struct readahead_control *rac)
+               u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
+ 
+               contiguous_readpages(pagepool, nr, contig_start, contig_end,
+-                              &em_cached, &bio_ctrl, &prev_em_start);
++                              &em_cached, &bio_ctrl);
+       }
+ 
+       if (em_cached)
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 3e4c3fcb588ba8..952c99fcb636dc 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -3106,7 +3106,7 @@ static ssize_t __fuse_copy_file_range(struct file 
*file_in, loff_t pos_in,
+               .nodeid_out = ff_out->nodeid,
+               .fh_out = ff_out->fh,
+               .off_out = pos_out,
+-              .len = len,
++              .len = min_t(size_t, len, UINT_MAX & PAGE_MASK),
+               .flags = flags
+       };
+       struct fuse_write_out outarg;
+@@ -3172,6 +3172,9 @@ static ssize_t __fuse_copy_file_range(struct file 
*file_in, loff_t pos_in,
+               fc->no_copy_file_range = 1;
+               err = -EOPNOTSUPP;
+       }
++      if (!err && outarg.size > len)
++              err = -EIO;
++
+       if (err)
+               goto out;
+ 
+diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
+index 6b90fea6cca209..257ba5398387be 100644
+--- a/fs/kernfs/file.c
++++ b/fs/kernfs/file.c
+@@ -70,6 +70,24 @@ static struct kernfs_open_node *of_on(struct 
kernfs_open_file *of)
+                                        !list_empty(&of->list));
+ }
+ 
++/* Get active reference to kernfs node for an open file */
++static struct kernfs_open_file *kernfs_get_active_of(struct kernfs_open_file 
*of)
++{
++      /* Skip if file was already released */
++      if (unlikely(of->released))
++              return NULL;
++
++      if (!kernfs_get_active(of->kn))
++              return NULL;
++
++      return of;
++}
++
++static void kernfs_put_active_of(struct kernfs_open_file *of)
++{
++      return kernfs_put_active(of->kn);
++}
++
+ /**
+  * kernfs_deref_open_node_locked - Get kernfs_open_node corresponding to @kn
+  *
+@@ -139,7 +157,7 @@ static void kernfs_seq_stop_active(struct seq_file *sf, 
void *v)
+ 
+       if (ops->seq_stop)
+               ops->seq_stop(sf, v);
+-      kernfs_put_active(of->kn);
++      kernfs_put_active_of(of);
+ }
+ 
+ static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
+@@ -152,7 +170,7 @@ static void *kernfs_seq_start(struct seq_file *sf, loff_t 
*ppos)
+        * the ops aren't called concurrently for the same open file.
+        */
+       mutex_lock(&of->mutex);
+-      if (!kernfs_get_active(of->kn))
++      if (!kernfs_get_active_of(of))
+               return ERR_PTR(-ENODEV);
+ 
+       ops = kernfs_ops(of->kn);
+@@ -238,7 +256,7 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, 
struct iov_iter *iter)
+        * the ops aren't called concurrently for the same open file.
+        */
+       mutex_lock(&of->mutex);
+-      if (!kernfs_get_active(of->kn)) {
++      if (!kernfs_get_active_of(of)) {
+               len = -ENODEV;
+               mutex_unlock(&of->mutex);
+               goto out_free;
+@@ -252,7 +270,7 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, 
struct iov_iter *iter)
+       else
+               len = -EINVAL;
+ 
+-      kernfs_put_active(of->kn);
++      kernfs_put_active_of(of);
+       mutex_unlock(&of->mutex);
+ 
+       if (len < 0)
+@@ -323,7 +341,7 @@ static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, 
struct iov_iter *iter)
+        * the ops aren't called concurrently for the same open file.
+        */
+       mutex_lock(&of->mutex);
+-      if (!kernfs_get_active(of->kn)) {
++      if (!kernfs_get_active_of(of)) {
+               mutex_unlock(&of->mutex);
+               len = -ENODEV;
+               goto out_free;
+@@ -335,7 +353,7 @@ static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, 
struct iov_iter *iter)
+       else
+               len = -EINVAL;
+ 
+-      kernfs_put_active(of->kn);
++      kernfs_put_active_of(of);
+       mutex_unlock(&of->mutex);
+ 
+       if (len > 0)
+@@ -357,13 +375,13 @@ static void kernfs_vma_open(struct vm_area_struct *vma)
+       if (!of->vm_ops)
+               return;
+ 
+-      if (!kernfs_get_active(of->kn))
++      if (!kernfs_get_active_of(of))
+               return;
+ 
+       if (of->vm_ops->open)
+               of->vm_ops->open(vma);
+ 
+-      kernfs_put_active(of->kn);
++      kernfs_put_active_of(of);
+ }
+ 
+ static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf)
+@@ -375,14 +393,14 @@ static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf)
+       if (!of->vm_ops)
+               return VM_FAULT_SIGBUS;
+ 
+-      if (!kernfs_get_active(of->kn))
++      if (!kernfs_get_active_of(of))
+               return VM_FAULT_SIGBUS;
+ 
+       ret = VM_FAULT_SIGBUS;
+       if (of->vm_ops->fault)
+               ret = of->vm_ops->fault(vmf);
+ 
+-      kernfs_put_active(of->kn);
++      kernfs_put_active_of(of);
+       return ret;
+ }
+ 
+@@ -395,7 +413,7 @@ static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault 
*vmf)
+       if (!of->vm_ops)
+               return VM_FAULT_SIGBUS;
+ 
+-      if (!kernfs_get_active(of->kn))
++      if (!kernfs_get_active_of(of))
+               return VM_FAULT_SIGBUS;
+ 
+       ret = 0;
+@@ -404,7 +422,7 @@ static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault 
*vmf)
+       else
+               file_update_time(file);
+ 
+-      kernfs_put_active(of->kn);
++      kernfs_put_active_of(of);
+       return ret;
+ }
+ 
+@@ -418,14 +436,14 @@ static int kernfs_vma_access(struct vm_area_struct *vma, 
unsigned long addr,
+       if (!of->vm_ops)
+               return -EINVAL;
+ 
+-      if (!kernfs_get_active(of->kn))
++      if (!kernfs_get_active_of(of))
+               return -EINVAL;
+ 
+       ret = -EINVAL;
+       if (of->vm_ops->access)
+               ret = of->vm_ops->access(vma, addr, buf, len, write);
+ 
+-      kernfs_put_active(of->kn);
++      kernfs_put_active_of(of);
+       return ret;
+ }
+ 
+@@ -504,7 +522,7 @@ static int kernfs_fop_mmap(struct file *file, struct 
vm_area_struct *vma)
+       mutex_lock(&of->mutex);
+ 
+       rc = -ENODEV;
+-      if (!kernfs_get_active(of->kn))
++      if (!kernfs_get_active_of(of))
+               goto out_unlock;
+ 
+       ops = kernfs_ops(of->kn);
+@@ -539,7 +557,7 @@ static int kernfs_fop_mmap(struct file *file, struct 
vm_area_struct *vma)
+       }
+       vma->vm_ops = &kernfs_vm_ops;
+ out_put:
+-      kernfs_put_active(of->kn);
++      kernfs_put_active_of(of);
+ out_unlock:
+       mutex_unlock(&of->mutex);
+ 
+@@ -894,7 +912,7 @@ static __poll_t kernfs_fop_poll(struct file *filp, 
poll_table *wait)
+       struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry);
+       __poll_t ret;
+ 
+-      if (!kernfs_get_active(kn))
++      if (!kernfs_get_active_of(of))
+               return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
+ 
+       if (kn->attr.ops->poll)
+@@ -902,7 +920,7 @@ static __poll_t kernfs_fop_poll(struct file *filp, 
poll_table *wait)
+       else
+               ret = kernfs_generic_poll(of, wait);
+ 
+-      kernfs_put_active(kn);
++      kernfs_put_active_of(of);
+       return ret;
+ }
+ 
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index cc764da581c43c..1bcdaee7e856f0 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -873,6 +873,8 @@ static void nfs_server_set_fsinfo(struct nfs_server 
*server,
+ 
+       if (fsinfo->xattr_support)
+               server->caps |= NFS_CAP_XATTR;
++      else
++              server->caps &= ~NFS_CAP_XATTR;
+ #endif
+ }
+ 
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index a1ff4a4f5380eb..4e53708dfcf434 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -469,8 +469,16 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct 
iov_iter *iter,
+       if (user_backed_iter(iter))
+               dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
+ 
+-      if (!swap)
+-              nfs_start_io_direct(inode);
++      if (!swap) {
++              result = nfs_start_io_direct(inode);
++              if (result) {
++                      /* release the reference that would usually be
++                       * consumed by nfs_direct_read_schedule_iovec()
++                       */
++                      nfs_direct_req_release(dreq);
++                      goto out_release;
++              }
++      }
+ 
+       NFS_I(inode)->read_io += count;
+       requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
+@@ -1023,7 +1031,14 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, 
struct iov_iter *iter,
+               requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
+                                                           FLUSH_STABLE);
+       } else {
+-              nfs_start_io_direct(inode);
++              result = nfs_start_io_direct(inode);
++              if (result) {
++                      /* release the reference that would usually be
++                       * consumed by nfs_direct_write_schedule_iovec()
++                       */
++                      nfs_direct_req_release(dreq);
++                      goto out_release;
++              }
+ 
+               requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
+                                                           FLUSH_COND_STABLE);
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index 003dda0018403d..2f4db026f8d678 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -167,7 +167,10 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to)
+               iocb->ki_filp,
+               iov_iter_count(to), (unsigned long) iocb->ki_pos);
+ 
+-      nfs_start_io_read(inode);
++      result = nfs_start_io_read(inode);
++      if (result)
++              return result;
++
+       result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
+       if (!result) {
+               result = generic_file_read_iter(iocb, to);
+@@ -188,7 +191,10 @@ nfs_file_splice_read(struct file *in, loff_t *ppos, 
struct pipe_inode_info *pipe
+ 
+       dprintk("NFS: splice_read(%pD2, %zu@%llu)\n", in, len, *ppos);
+ 
+-      nfs_start_io_read(inode);
++      result = nfs_start_io_read(inode);
++      if (result)
++              return result;
++
+       result = nfs_revalidate_mapping(inode, in->f_mapping);
+       if (!result) {
+               result = filemap_splice_read(in, ppos, pipe, len, flags);
+@@ -668,7 +674,9 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter 
*from)
+       nfs_clear_invalid_mapping(file->f_mapping);
+ 
+       since = filemap_sample_wb_err(file->f_mapping);
+-      nfs_start_io_write(inode);
++      error = nfs_start_io_write(inode);
++      if (error)
++              return error;
+       result = generic_write_checks(iocb, from);
+       if (result > 0)
+               result = generic_perform_write(iocb, from);
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c 
b/fs/nfs/flexfilelayout/flexfilelayout.c
+index 7354b6b1047833..42c73c647a27fe 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -276,7 +276,7 @@ ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
+               struct pnfs_layout_segment *l2)
+ {
+       const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
+-      const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1);
++      const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l2);
+       u32 i;
+ 
+       if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
+@@ -756,8 +756,11 @@ ff_layout_choose_ds_for_read(struct pnfs_layout_segment 
*lseg,
+                       continue;
+ 
+               if (check_device &&
+-                  nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
++                  
nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node)) {
++                      // reinitialize the error state in case if this is the 
last iteration
++                      ds = ERR_PTR(-EINVAL);
+                       continue;
++              }
+ 
+               *best_idx = idx;
+               break;
+@@ -787,7 +790,7 @@ ff_layout_choose_best_ds_for_read(struct 
pnfs_layout_segment *lseg,
+       struct nfs4_pnfs_ds *ds;
+ 
+       ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
+-      if (ds)
++      if (!IS_ERR(ds))
+               return ds;
+       return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
+ }
+@@ -801,7 +804,7 @@ ff_layout_get_ds_for_read(struct nfs_pageio_descriptor 
*pgio,
+ 
+       ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
+                                              best_idx);
+-      if (ds || !pgio->pg_mirror_idx)
++      if (!IS_ERR(ds) || !pgio->pg_mirror_idx)
+               return ds;
+       return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
+ }
+@@ -859,7 +862,7 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
+       req->wb_nio = 0;
+ 
+       ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
+-      if (!ds) {
++      if (IS_ERR(ds)) {
+               if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
+                       goto out_mds;
+               pnfs_generic_pg_cleanup(pgio);
+@@ -1063,11 +1066,13 @@ static void ff_layout_resend_pnfs_read(struct 
nfs_pgio_header *hdr)
+ {
+       u32 idx = hdr->pgio_mirror_idx + 1;
+       u32 new_idx = 0;
++      struct nfs4_pnfs_ds *ds;
+ 
+-      if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx))
+-              ff_layout_send_layouterror(hdr->lseg);
+-      else
++      ds = ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx);
++      if (IS_ERR(ds))
+               pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
++      else
++              ff_layout_send_layouterror(hdr->lseg);
+       pnfs_read_resend_pnfs(hdr, new_idx);
+ }
+ 
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 7e7dd2aab449dd..5cd5e4226db364 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -645,8 +645,10 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry 
*dentry,
+       trace_nfs_setattr_enter(inode);
+ 
+       /* Write all dirty data */
+-      if (S_ISREG(inode->i_mode))
++      if (S_ISREG(inode->i_mode)) {
++              nfs_file_block_o_direct(NFS_I(inode));
+               nfs_sync_inode(inode);
++      }
+ 
+       fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
+       if (fattr == NULL) {
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 4eea91d054b241..bde81e0abf0ae1 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -6,6 +6,7 @@
+ #include "nfs4_fs.h"
+ #include <linux/fs_context.h>
+ #include <linux/security.h>
++#include <linux/compiler_attributes.h>
+ #include <linux/crc32.h>
+ #include <linux/sunrpc/addr.h>
+ #include <linux/nfs_page.h>
+@@ -461,11 +462,11 @@ extern const struct netfs_request_ops nfs_netfs_ops;
+ #endif
+ 
+ /* io.c */
+-extern void nfs_start_io_read(struct inode *inode);
++extern __must_check int nfs_start_io_read(struct inode *inode);
+ extern void nfs_end_io_read(struct inode *inode);
+-extern void nfs_start_io_write(struct inode *inode);
++extern  __must_check int nfs_start_io_write(struct inode *inode);
+ extern void nfs_end_io_write(struct inode *inode);
+-extern void nfs_start_io_direct(struct inode *inode);
++extern __must_check int nfs_start_io_direct(struct inode *inode);
+ extern void nfs_end_io_direct(struct inode *inode);
+ 
+ static inline bool nfs_file_io_is_buffered(struct nfs_inode *nfsi)
+@@ -473,6 +474,16 @@ static inline bool nfs_file_io_is_buffered(struct 
nfs_inode *nfsi)
+       return test_bit(NFS_INO_ODIRECT, &nfsi->flags) == 0;
+ }
+ 
++/* Must be called with exclusively locked inode->i_rwsem */
++static inline void nfs_file_block_o_direct(struct nfs_inode *nfsi)
++{
++      if (test_bit(NFS_INO_ODIRECT, &nfsi->flags)) {
++              clear_bit(NFS_INO_ODIRECT, &nfsi->flags);
++              inode_dio_wait(&nfsi->vfs_inode);
++      }
++}
++
++
+ /* namespace.c */
+ #define NFS_PATH_CANONICAL 1
+ extern char *nfs_path(char **p, struct dentry *dentry,
+diff --git a/fs/nfs/io.c b/fs/nfs/io.c
+index b5551ed8f648bc..d275b0a250bf3b 100644
+--- a/fs/nfs/io.c
++++ b/fs/nfs/io.c
+@@ -14,15 +14,6 @@
+ 
+ #include "internal.h"
+ 
+-/* Call with exclusively locked inode->i_rwsem */
+-static void nfs_block_o_direct(struct nfs_inode *nfsi, struct inode *inode)
+-{
+-      if (test_bit(NFS_INO_ODIRECT, &nfsi->flags)) {
+-              clear_bit(NFS_INO_ODIRECT, &nfsi->flags);
+-              inode_dio_wait(inode);
+-      }
+-}
+-
+ /**
+  * nfs_start_io_read - declare the file is being used for buffered reads
+  * @inode: file inode
+@@ -39,19 +30,28 @@ static void nfs_block_o_direct(struct nfs_inode *nfsi, 
struct inode *inode)
+  * Note that buffered writes and truncates both take a write lock on
+  * inode->i_rwsem, meaning that those are serialised w.r.t. the reads.
+  */
+-void
++int
+ nfs_start_io_read(struct inode *inode)
+ {
+       struct nfs_inode *nfsi = NFS_I(inode);
++      int err;
++
+       /* Be an optimist! */
+-      down_read(&inode->i_rwsem);
++      err = down_read_killable(&inode->i_rwsem);
++      if (err)
++              return err;
+       if (test_bit(NFS_INO_ODIRECT, &nfsi->flags) == 0)
+-              return;
++              return 0;
+       up_read(&inode->i_rwsem);
++
+       /* Slow path.... */
+-      down_write(&inode->i_rwsem);
+-      nfs_block_o_direct(nfsi, inode);
++      err = down_write_killable(&inode->i_rwsem);
++      if (err)
++              return err;
++      nfs_file_block_o_direct(nfsi);
+       downgrade_write(&inode->i_rwsem);
++
++      return 0;
+ }
+ 
+ /**
+@@ -74,11 +74,15 @@ nfs_end_io_read(struct inode *inode)
+  * Declare that a buffered read operation is about to start, and ensure
+  * that we block all direct I/O.
+  */
+-void
++int
+ nfs_start_io_write(struct inode *inode)
+ {
+-      down_write(&inode->i_rwsem);
+-      nfs_block_o_direct(NFS_I(inode), inode);
++      int err;
++
++      err = down_write_killable(&inode->i_rwsem);
++      if (!err)
++              nfs_file_block_o_direct(NFS_I(inode));
++      return err;
+ }
+ 
+ /**
+@@ -119,19 +123,28 @@ static void nfs_block_buffered(struct nfs_inode *nfsi, 
struct inode *inode)
+  * Note that buffered writes and truncates both take a write lock on
+  * inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT.
+  */
+-void
++int
+ nfs_start_io_direct(struct inode *inode)
+ {
+       struct nfs_inode *nfsi = NFS_I(inode);
++      int err;
++
+       /* Be an optimist! */
+-      down_read(&inode->i_rwsem);
++      err = down_read_killable(&inode->i_rwsem);
++      if (err)
++              return err;
+       if (test_bit(NFS_INO_ODIRECT, &nfsi->flags) != 0)
+-              return;
++              return 0;
+       up_read(&inode->i_rwsem);
++
+       /* Slow path.... */
+-      down_write(&inode->i_rwsem);
++      err = down_write_killable(&inode->i_rwsem);
++      if (err)
++              return err;
+       nfs_block_buffered(nfsi, inode);
+       downgrade_write(&inode->i_rwsem);
++
++      return 0;
+ }
+ 
+ /**
+diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
+index 9f0d69e6526443..582cf8a469560b 100644
+--- a/fs/nfs/nfs42proc.c
++++ b/fs/nfs/nfs42proc.c
+@@ -112,6 +112,7 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, 
struct file *filep,
+       exception.inode = inode;
+       exception.state = lock->open_context->state;
+ 
++      nfs_file_block_o_direct(NFS_I(inode));
+       err = nfs_sync_inode(inode);
+       if (err)
+               goto out;
+@@ -355,6 +356,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
+               return status;
+       }
+ 
++      nfs_file_block_o_direct(NFS_I(dst_inode));
+       status = nfs_sync_inode(dst_inode);
+       if (status)
+               return status;
+diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
+index 02788c3c85e5bb..befdb0f4e6dc3c 100644
+--- a/fs/nfs/nfs4file.c
++++ b/fs/nfs/nfs4file.c
+@@ -282,9 +282,11 @@ static loff_t nfs42_remap_file_range(struct file 
*src_file, loff_t src_off,
+ 
+       /* flush all pending writes on both src and dst so that server
+        * has the latest data */
++      nfs_file_block_o_direct(NFS_I(src_inode));
+       ret = nfs_sync_inode(src_inode);
+       if (ret)
+               goto out_unlock;
++      nfs_file_block_o_direct(NFS_I(dst_inode));
+       ret = nfs_sync_inode(dst_inode);
+       if (ret)
+               goto out_unlock;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 6debcfc63222d2..124b9cee6fed7d 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3882,8 +3882,9 @@ static int _nfs4_server_capabilities(struct nfs_server 
*server, struct nfs_fh *f
+                       res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
+               }
+               memcpy(server->attr_bitmask, res.attr_bitmask, 
sizeof(server->attr_bitmask));
+-              server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS |
+-                                NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL);
++              server->caps &=
++                      ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS |
++                        NFS_CAP_SECURITY_LABEL | NFS_CAP_FS_LOCATIONS);
+               server->fattr_valid = NFS_ATTR_FATTR_V4;
+               if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
+                               res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
+@@ -3951,7 +3952,6 @@ int nfs4_server_capabilities(struct nfs_server *server, 
struct nfs_fh *fhandle)
+       };
+       int err;
+ 
+-      nfs_server_set_init_caps(server);
+       do {
+               err = nfs4_handle_exception(server,
+                               _nfs4_server_capabilities(server, fhandle),
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index b2bbf3d6d177e8..cd78b7ecbd4325 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1131,6 +1131,7 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct 
nfsd4_compound_state *cstate,
+       };
+       struct inode *inode;
+       __be32 status = nfs_ok;
++      bool save_no_wcc;
+       int err;
+ 
+       if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
+@@ -1156,8 +1157,11 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct 
nfsd4_compound_state *cstate,
+ 
+       if (status)
+               goto out;
++      save_no_wcc = cstate->current_fh.fh_no_wcc;
++      cstate->current_fh.fh_no_wcc = true;
+       status = nfsd_setattr(rqstp, &cstate->current_fh, &attrs,
+                               0, (time64_t)0);
++      cstate->current_fh.fh_no_wcc = save_no_wcc;
+       if (!status)
+               status = nfserrno(attrs.na_labelerr);
+       if (!status)
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index b3e51d88faff46..5ee7149ceaa5a7 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -480,7 +480,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
+       int             accmode = NFSD_MAY_SATTR;
+       umode_t         ftype = 0;
+       __be32          err;
+-      int             host_err;
++      int             host_err = 0;
+       bool            get_write_count;
+       bool            size_change = (iap->ia_valid & ATTR_SIZE);
+       int             retries;
+@@ -538,6 +538,9 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
+       }
+ 
+       inode_lock(inode);
++      err = fh_fill_pre_attrs(fhp);
++      if (err)
++              goto out_unlock;
+       for (retries = 1;;) {
+               struct iattr attrs;
+ 
+@@ -565,13 +568,15 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
+               attr->na_aclerr = set_posix_acl(&nop_mnt_idmap,
+                                               dentry, ACL_TYPE_DEFAULT,
+                                               attr->na_dpacl);
++      fh_fill_post_attrs(fhp);
++out_unlock:
+       inode_unlock(inode);
+       if (size_change)
+               put_write_access(inode);
+ out:
+       if (!host_err)
+               host_err = commit_metadata(fhp);
+-      return nfserrno(host_err);
++      return err != 0 ? err : nfserrno(host_err);
+ }
+ 
+ #if defined(CONFIG_NFSD_V4)
+@@ -1965,11 +1970,9 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, 
int type,
+                       err = nfserr_file_open;
+               else
+                       err = nfserr_acces;
+-      } else {
+-              err = nfserrno(host_err);
+       }
+ out:
+-      return err;
++      return err != nfs_ok ? err : nfserrno(host_err);
+ out_unlock:
+       inode_unlock(dirp);
+       goto out_drop_write;
+diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
+index f7672472fa8279..5e86c7e2c82125 100644
+--- a/fs/ocfs2/extent_map.c
++++ b/fs/ocfs2/extent_map.c
+@@ -696,6 +696,8 @@ int ocfs2_extent_map_get_blocks(struct inode *inode, u64 
v_blkno, u64 *p_blkno,
+  * it not only handles the fiemap for inlined files, but also deals
+  * with the fast symlink, cause they have no difference for extent
+  * mapping per se.
++ *
++ * Must be called with ip_alloc_sem semaphore held.
+  */
+ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
+                              struct fiemap_extent_info *fieinfo,
+@@ -707,6 +709,7 @@ static int ocfs2_fiemap_inline(struct inode *inode, struct 
buffer_head *di_bh,
+       u64 phys;
+       u32 flags = FIEMAP_EXTENT_DATA_INLINE|FIEMAP_EXTENT_LAST;
+       struct ocfs2_inode_info *oi = OCFS2_I(inode);
++      lockdep_assert_held_read(&oi->ip_alloc_sem);
+ 
+       di = (struct ocfs2_dinode *)di_bh->b_data;
+       if (ocfs2_inode_is_fast_symlink(inode))
+@@ -722,8 +725,11 @@ static int ocfs2_fiemap_inline(struct inode *inode, 
struct buffer_head *di_bh,
+                       phys += offsetof(struct ocfs2_dinode,
+                                        id2.i_data.id_data);
+ 
++              /* Release the ip_alloc_sem to prevent deadlock on page fault */
++              up_read(&OCFS2_I(inode)->ip_alloc_sem);
+               ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count,
+                                             flags);
++              down_read(&OCFS2_I(inode)->ip_alloc_sem);
+               if (ret < 0)
+                       return ret;
+       }
+@@ -792,9 +798,11 @@ int ocfs2_fiemap(struct inode *inode, struct 
fiemap_extent_info *fieinfo,
+               len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << 
osb->s_clustersize_bits;
+               phys_bytes = le64_to_cpu(rec.e_blkno) << 
osb->sb->s_blocksize_bits;
+               virt_bytes = (u64)le32_to_cpu(rec.e_cpos) << 
osb->s_clustersize_bits;
+-
++              /* Release the ip_alloc_sem to prevent deadlock on page fault */
++              up_read(&OCFS2_I(inode)->ip_alloc_sem);
+               ret = fiemap_fill_next_extent(fieinfo, virt_bytes, phys_bytes,
+                                             len_bytes, fe_flags);
++              down_read(&OCFS2_I(inode)->ip_alloc_sem);
+               if (ret)
+                       break;
+ 
+diff --git a/fs/proc/generic.c b/fs/proc/generic.c
+index db3f2c6abc162a..4cadd2fd23d8f8 100644
+--- a/fs/proc/generic.c
++++ b/fs/proc/generic.c
+@@ -388,7 +388,8 @@ struct proc_dir_entry *proc_register(struct proc_dir_entry 
*dir,
+       if (proc_alloc_inum(&dp->low_ino))
+               goto out_free_entry;
+ 
+-      pde_set_flags(dp);
++      if (!S_ISDIR(dp->mode))
++              pde_set_flags(dp);
+ 
+       write_lock(&proc_subdir_lock);
+       dp->parent = dir;
+diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
+index 7a2b81fbd9cfd2..1058066913dd60 100644
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -2884,17 +2884,21 @@ static ssize_t 
cifs_write_back_from_locked_folio(struct address_space *mapping,
+       rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
+       if (rc) {
+               cifs_dbg(VFS, "No writable handle in writepages rc=%d\n", rc);
++              folio_unlock(folio);
+               goto err_xid;
+       }
+ 
+       rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
+                                          &wsize, credits);
+-      if (rc != 0)
++      if (rc != 0) {
++              folio_unlock(folio);
+               goto err_close;
++      }
+ 
+       wdata = cifs_writedata_alloc(cifs_writev_complete);
+       if (!wdata) {
+               rc = -ENOMEM;
++              folio_unlock(folio);
+               goto err_uncredit;
+       }
+ 
+@@ -3041,17 +3045,22 @@ static ssize_t cifs_writepages_begin(struct 
address_space *mapping,
+ lock_again:
+       if (wbc->sync_mode != WB_SYNC_NONE) {
+               ret = folio_lock_killable(folio);
+-              if (ret < 0)
++              if (ret < 0) {
++                      folio_put(folio);
+                       return ret;
++              }
+       } else {
+-              if (!folio_trylock(folio))
++              if (!folio_trylock(folio)) {
++                      folio_put(folio);
+                       goto search_again;
++              }
+       }
+ 
+       if (folio->mapping != mapping ||
+           !folio_test_dirty(folio)) {
+               start += folio_size(folio);
+               folio_unlock(folio);
++              folio_put(folio);
+               goto search_again;
+       }
+ 
+@@ -3081,6 +3090,7 @@ static ssize_t cifs_writepages_begin(struct 
address_space *mapping,
+ out:
+       if (ret > 0)
+               *_start = start + ret;
++      folio_put(folio);
+       return ret;
+ }
+ 
+diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
+index 29ba91fc54076c..45421269ddd881 100644
+--- a/fs/smb/server/connection.h
++++ b/fs/smb/server/connection.h
+@@ -27,6 +27,7 @@ enum {
+       KSMBD_SESS_EXITING,
+       KSMBD_SESS_NEED_RECONNECT,
+       KSMBD_SESS_NEED_NEGOTIATE,
++      KSMBD_SESS_NEED_SETUP,
+       KSMBD_SESS_RELEASING
+ };
+ 
+@@ -195,6 +196,11 @@ static inline bool ksmbd_conn_need_negotiate(struct 
ksmbd_conn *conn)
+       return READ_ONCE(conn->status) == KSMBD_SESS_NEED_NEGOTIATE;
+ }
+ 
++static inline bool ksmbd_conn_need_setup(struct ksmbd_conn *conn)
++{
++      return READ_ONCE(conn->status) == KSMBD_SESS_NEED_SETUP;
++}
++
+ static inline bool ksmbd_conn_need_reconnect(struct ksmbd_conn *conn)
+ {
+       return READ_ONCE(conn->status) == KSMBD_SESS_NEED_RECONNECT;
+@@ -225,6 +231,11 @@ static inline void ksmbd_conn_set_need_negotiate(struct 
ksmbd_conn *conn)
+       WRITE_ONCE(conn->status, KSMBD_SESS_NEED_NEGOTIATE);
+ }
+ 
++static inline void ksmbd_conn_set_need_setup(struct ksmbd_conn *conn)
++{
++      WRITE_ONCE(conn->status, KSMBD_SESS_NEED_SETUP);
++}
++
+ static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_conn *conn)
+ {
+       WRITE_ONCE(conn->status, KSMBD_SESS_NEED_RECONNECT);
+diff --git a/fs/smb/server/mgmt/user_session.c 
b/fs/smb/server/mgmt/user_session.c
+index 82dcc86a32c57a..408f47220c07b7 100644
+--- a/fs/smb/server/mgmt/user_session.c
++++ b/fs/smb/server/mgmt/user_session.c
+@@ -373,12 +373,12 @@ void destroy_previous_session(struct ksmbd_conn *conn,
+       ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_RECONNECT);
+       err = ksmbd_conn_wait_idle_sess_id(conn, id);
+       if (err) {
+-              ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE);
++              ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_SETUP);
+               goto out;
+       }
+       ksmbd_destroy_file_table(&prev_sess->file_table);
+       prev_sess->state = SMB2_SESSION_EXPIRED;
+-      ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE);
++      ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_SETUP);
+ out:
+       up_write(&conn->session_lock);
+       up_write(&sessions_table_lock);
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 85e7bc3a2bd33c..ae47450dc40f82 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -1252,7 +1252,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
+       }
+ 
+       conn->srv_sec_mode = le16_to_cpu(rsp->SecurityMode);
+-      ksmbd_conn_set_need_negotiate(conn);
++      ksmbd_conn_set_need_setup(conn);
+ 
+ err_out:
+       if (rc)
+@@ -1273,6 +1273,9 @@ static int alloc_preauth_hash(struct ksmbd_session *sess,
+       if (sess->Preauth_HashValue)
+               return 0;
+ 
++      if (!conn->preauth_info)
++              return -ENOMEM;
++
+       sess->Preauth_HashValue = kmemdup(conn->preauth_info->Preauth_HashValue,
+                                         PREAUTH_HASHVALUE_SIZE, GFP_KERNEL);
+       if (!sess->Preauth_HashValue)
+@@ -1688,6 +1691,11 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 
+       ksmbd_debug(SMB, "Received request for session setup\n");
+ 
++      if (!ksmbd_conn_need_setup(conn) && !ksmbd_conn_good(conn)) {
++              work->send_no_response = 1;
++              return rc;
++      }
++
+       WORK_BUFFERS(work, req, rsp);
+ 
+       rsp->StructureSize = cpu_to_le16(9);
+@@ -1919,7 +1927,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
+                       if (try_delay) {
+                               ksmbd_conn_set_need_reconnect(conn);
+                               ssleep(5);
+-                              ksmbd_conn_set_need_negotiate(conn);
++                              ksmbd_conn_set_need_setup(conn);
+                       }
+               }
+               smb2_set_err_rsp(work);
+@@ -2249,7 +2257,7 @@ int smb2_session_logoff(struct ksmbd_work *work)
+               ksmbd_free_user(sess->user);
+               sess->user = NULL;
+       }
+-      ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE);
++      ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_SETUP);
+ 
+       rsp->StructureSize = cpu_to_le16(4);
+       err = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_logoff_rsp));
+diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
+index 9b673fefcef8a6..f9de53fff3acc4 100644
+--- a/include/linux/compiler-clang.h
++++ b/include/linux/compiler-clang.h
+@@ -23,23 +23,42 @@
+ #define KASAN_ABI_VERSION 5
+ 
+ /*
++ * Clang 22 added preprocessor macros to match GCC, in hopes of eventually
++ * dropping __has_feature support for sanitizers:
++ * 
https://github.com/llvm/llvm-project/commit/568c23bbd3303518c5056d7f03444dae4fdc8a9c
++ * Create these macros for older versions of clang so that it is easy to clean
++ * up once the minimum supported version of LLVM for building the kernel 
always
++ * creates these macros.
++ *
+  * Note: Checking __has_feature(*_sanitizer) is only true if the feature is
+  * enabled. Therefore it is not required to additionally check 
defined(CONFIG_*)
+  * to avoid adding redundant attributes in other configurations.
+  */
++#if __has_feature(address_sanitizer) && !defined(__SANITIZE_ADDRESS__)
++#define __SANITIZE_ADDRESS__
++#endif
++#if __has_feature(hwaddress_sanitizer) && !defined(__SANITIZE_HWADDRESS__)
++#define __SANITIZE_HWADDRESS__
++#endif
++#if __has_feature(thread_sanitizer) && !defined(__SANITIZE_THREAD__)
++#define __SANITIZE_THREAD__
++#endif
+ 
+-#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
+-/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
++/*
++ * Treat __SANITIZE_HWADDRESS__ the same as __SANITIZE_ADDRESS__ in the 
kernel.
++ */
++#ifdef __SANITIZE_HWADDRESS__
+ #define __SANITIZE_ADDRESS__
++#endif
++
++#ifdef __SANITIZE_ADDRESS__
+ #define __no_sanitize_address \
+               __attribute__((no_sanitize("address", "hwaddress")))
+ #else
+ #define __no_sanitize_address
+ #endif
+ 
+-#if __has_feature(thread_sanitizer)
+-/* emulate gcc's __SANITIZE_THREAD__ flag */
+-#define __SANITIZE_THREAD__
++#ifdef __SANITIZE_THREAD__
+ #define __no_sanitize_thread \
+               __attribute__((no_sanitize("thread")))
+ #else
+diff --git a/include/linux/pgalloc.h b/include/linux/pgalloc.h
+new file mode 100644
+index 00000000000000..9174fa59bbc54d
+--- /dev/null
++++ b/include/linux/pgalloc.h
+@@ -0,0 +1,29 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _LINUX_PGALLOC_H
++#define _LINUX_PGALLOC_H
++
++#include <linux/pgtable.h>
++#include <asm/pgalloc.h>
++
++/*
++ * {pgd,p4d}_populate_kernel() are defined as macros to allow
++ * compile-time optimization based on the configured page table levels.
++ * Without this, linking may fail because callers (e.g., KASAN) may rely
++ * on calls to these functions being optimized away when passing symbols
++ * that exist only for certain page table levels.
++ */
++#define pgd_populate_kernel(addr, pgd, p4d)                           \
++      do {                                                            \
++              pgd_populate(&init_mm, pgd, p4d);                       \
++              if (ARCH_PAGE_TABLE_SYNC_MASK & PGTBL_PGD_MODIFIED)     \
++                      arch_sync_kernel_mappings(addr, addr);          \
++      } while (0)
++
++#define p4d_populate_kernel(addr, p4d, pud)                           \
++      do {                                                            \
++              p4d_populate(&init_mm, p4d, pud);                       \
++              if (ARCH_PAGE_TABLE_SYNC_MASK & PGTBL_P4D_MODIFIED)     \
++                      arch_sync_kernel_mappings(addr, addr);          \
++      } while (0)
++
++#endif /* _LINUX_PGALLOC_H */
+diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
+index e42388b6998b17..78a518129e8f06 100644
+--- a/include/linux/pgtable.h
++++ b/include/linux/pgtable.h
+@@ -1467,8 +1467,8 @@ static inline int pmd_protnone(pmd_t pmd)
+ 
+ /*
+  * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED 
values
+- * and let generic vmalloc and ioremap code know when 
arch_sync_kernel_mappings()
+- * needs to be called.
++ * and let generic vmalloc, ioremap and page table update code know when
++ * arch_sync_kernel_mappings() needs to be called.
+  */
+ #ifndef ARCH_PAGE_TABLE_SYNC_MASK
+ #define ARCH_PAGE_TABLE_SYNC_MASK 0
+@@ -1601,10 +1601,11 @@ static inline bool arch_has_pfn_modify_check(void)
+ /*
+  * Page Table Modification bits for pgtbl_mod_mask.
+  *
+- * These are used by the p?d_alloc_track*() set of functions an in the generic
+- * vmalloc/ioremap code to track at which page-table levels entries have been
+- * modified. Based on that the code can better decide when vmalloc and ioremap
+- * mapping changes need to be synchronized to other page-tables in the system.
++ * These are used by the p?d_alloc_track*() and p*d_populate_kernel()
++ * functions in the generic vmalloc, ioremap and page table update code
++ * to track at which page-table levels entries have been modified.
++ * Based on that the code can better decide when page table changes need
++ * to be synchronized to other page-tables in the system.
+  */
+ #define               __PGTBL_PGD_MODIFIED    0
+ #define               __PGTBL_P4D_MODIFIED    1
+diff --git a/include/net/sock.h b/include/net/sock.h
+index b5f7208a9ec383..f8e029cc48ccce 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -353,6 +353,8 @@ struct sk_filter;
+   *   @sk_txtime_unused: unused txtime flags
+   *   @ns_tracker: tracker for netns reference
+   *   @sk_bind2_node: bind node in the bhash2 table
++  *   @sk_owner: reference to the real owner of the socket that calls
++  *              sock_lock_init_class_and_name().
+   */
+ struct sock {
+       /*
+@@ -545,6 +547,10 @@ struct sock {
+       struct rcu_head         sk_rcu;
+       netns_tracker           ns_tracker;
+       struct hlist_node       sk_bind2_node;
++
++#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
++      struct module           *sk_owner;
++#endif
+ };
+ 
+ enum sk_pacing {
+@@ -1699,6 +1705,35 @@ static inline void sk_mem_uncharge(struct sock *sk, int 
size)
+       sk_mem_reclaim(sk);
+ }
+ 
++#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
++static inline void sk_owner_set(struct sock *sk, struct module *owner)
++{
++      __module_get(owner);
++      sk->sk_owner = owner;
++}
++
++static inline void sk_owner_clear(struct sock *sk)
++{
++      sk->sk_owner = NULL;
++}
++
++static inline void sk_owner_put(struct sock *sk)
++{
++      module_put(sk->sk_owner);
++}
++#else
++static inline void sk_owner_set(struct sock *sk, struct module *owner)
++{
++}
++
++static inline void sk_owner_clear(struct sock *sk)
++{
++}
++
++static inline void sk_owner_put(struct sock *sk)
++{
++}
++#endif
+ /*
+  * Macro so as to not evaluate some arguments when
+  * lockdep is not enabled.
+@@ -1708,13 +1743,14 @@ static inline void sk_mem_uncharge(struct sock *sk, 
int size)
+  */
+ #define sock_lock_init_class_and_name(sk, sname, skey, name, key)     \
+ do {                                                                  \
++      sk_owner_set(sk, THIS_MODULE);                                  \
+       sk->sk_lock.owned = 0;                                          \
+       init_waitqueue_head(&sk->sk_lock.wq);                           \
+       spin_lock_init(&(sk)->sk_lock.slock);                           \
+       debug_check_no_locks_freed((void *)&(sk)->sk_lock,              \
+-                      sizeof((sk)->sk_lock));                         \
++                                 sizeof((sk)->sk_lock));              \
+       lockdep_set_class_and_name(&(sk)->sk_lock.slock,                \
+-                              (skey), (sname));                               
\
++                                 (skey), (sname));                    \
+       lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);     \
+ } while (0)
+ 
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index 4b20a72ab8cffe..90c281e1379eeb 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -1204,8 +1204,11 @@ static int __bpf_async_init(struct bpf_async_kern 
*async, struct bpf_map *map, u
+               goto out;
+       }
+ 
+-      /* allocate hrtimer via map_kmalloc to use memcg accounting */
+-      cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node);
++      /* Allocate via bpf_map_kmalloc_node() for memcg accounting. Until
++       * kmalloc_nolock() is available, avoid locking issues by using
++       * __GFP_HIGH (GFP_ATOMIC & ~__GFP_RECLAIM).
++       */
++      cb = bpf_map_kmalloc_node(map, size, __GFP_HIGH, map->numa_node);
+       if (!cb) {
+               ret = -ENOMEM;
+               goto out;
+diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
+index 3fcd9f92d38612..9189ccd4fee472 100644
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -150,8 +150,6 @@ static struct rcu_tasks rt_name =                          
                        \
+ }
+ 
+ #ifdef CONFIG_TASKS_RCU
+-/* Track exiting tasks in order to allow them to be waited for. */
+-DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
+ 
+ /* Report delay in synchronize_srcu() completion in rcu_tasks_postscan(). */
+ static void tasks_rcu_exit_srcu_stall(struct timer_list *unused);
+@@ -879,10 +877,12 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
+ //    number of voluntary context switches, and add that task to the
+ //    holdout list.
+ // rcu_tasks_postscan():
+-//    Invoke synchronize_srcu() to ensure that all tasks that were
+-//    in the process of exiting (and which thus might not know to
+-//    synchronize with this RCU Tasks grace period) have completed
+-//    exiting.
++//    Gather per-CPU lists of tasks in do_exit() to ensure that all
++//    tasks that were in the process of exiting (and which thus might
++//    not know to synchronize with this RCU Tasks grace period) have
++//    completed exiting.  The synchronize_rcu() in rcu_tasks_postgp()
++//    will take care of any tasks stuck in the non-preemptible region
++//    of do_exit() following its call to exit_tasks_rcu_stop().
+ // check_all_holdout_tasks(), repeatedly until holdout list is empty:
+ //    Scans the holdout list, attempting to identify a quiescent state
+ //    for each task on the list.  If there is a quiescent state, the
+@@ -895,8 +895,10 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
+ //    with interrupts disabled.
+ //
+ // For each exiting task, the exit_tasks_rcu_start() and
+-// exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
+-// read-side critical sections waited for by rcu_tasks_postscan().
++// exit_tasks_rcu_finish() functions add and remove, respectively, the
++// current task to a per-CPU list of tasks that rcu_tasks_postscan() must
++// wait on.  This is necessary because rcu_tasks_postscan() must wait on
++// tasks that have already been removed from the global list of tasks.
+ //
+ // Pre-grace-period update-side code is ordered before the grace
+ // via the raw_spin_lock.*rcu_node().  Pre-grace-period read-side code
+@@ -960,9 +962,13 @@ static void rcu_tasks_pertask(struct task_struct *t, 
struct list_head *hop)
+       }
+ }
+ 
++void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
++DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
++
+ /* Processing between scanning taskslist and draining the holdout list. */
+ static void rcu_tasks_postscan(struct list_head *hop)
+ {
++      int cpu;
+       int rtsi = READ_ONCE(rcu_task_stall_info);
+ 
+       if (!IS_ENABLED(CONFIG_TINY_RCU)) {
+@@ -976,9 +982,9 @@ static void rcu_tasks_postscan(struct list_head *hop)
+        * this, divide the fragile exit path part in two intersecting
+        * read side critical sections:
+        *
+-       * 1) An _SRCU_ read side starting before calling exit_notify(),
+-       *    which may remove the task from the tasklist, and ending after
+-       *    the final preempt_disable() call in do_exit().
++       * 1) A task_struct list addition before calling exit_notify(),
++       *    which may remove the task from the tasklist, with the
++       *    removal after the final preempt_disable() call in do_exit().
+        *
+        * 2) An _RCU_ read side starting with the final preempt_disable()
+        *    call in do_exit() and ending with the final call to schedule()
+@@ -987,7 +993,37 @@ static void rcu_tasks_postscan(struct list_head *hop)
+        * This handles the part 1). And postgp will handle part 2) with a
+        * call to synchronize_rcu().
+        */
+-      synchronize_srcu(&tasks_rcu_exit_srcu);
++
++      for_each_possible_cpu(cpu) {
++              unsigned long j = jiffies + 1;
++              struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, 
cpu);
++              struct task_struct *t;
++              struct task_struct *t1;
++              struct list_head tmp;
++
++              raw_spin_lock_irq_rcu_node(rtpcp);
++              list_for_each_entry_safe(t, t1, &rtpcp->rtp_exit_list, 
rcu_tasks_exit_list) {
++                      if (list_empty(&t->rcu_tasks_holdout_list))
++                              rcu_tasks_pertask(t, hop);
++
++                      // RT kernels need frequent pauses, otherwise
++                      // pause at least once per pair of jiffies.
++                      if (!IS_ENABLED(CONFIG_PREEMPT_RT) && 
time_before(jiffies, j))
++                              continue;
++
++                      // Keep our place in the list while pausing.
++                      // Nothing else traverses this list, so adding a
++                      // bare list_head is OK.
++                      list_add(&tmp, &t->rcu_tasks_exit_list);
++                      raw_spin_unlock_irq_rcu_node(rtpcp);
++                      cond_resched(); // For CONFIG_PREEMPT=n kernels
++                      raw_spin_lock_irq_rcu_node(rtpcp);
++                      t1 = list_entry(tmp.next, struct task_struct, 
rcu_tasks_exit_list);
++                      list_del(&tmp);
++                      j = jiffies + 1;
++              }
++              raw_spin_unlock_irq_rcu_node(rtpcp);
++      }
+ 
+       if (!IS_ENABLED(CONFIG_TINY_RCU))
+               del_timer_sync(&tasks_rcu_exit_srcu_stall_timer);
+@@ -1055,7 +1091,6 @@ static void rcu_tasks_postgp(struct rcu_tasks *rtp)
+        *
+        * In addition, this synchronize_rcu() waits for exiting tasks
+        * to complete their final preempt_disable() region of execution,
+-       * cleaning up after synchronize_srcu(&tasks_rcu_exit_srcu),
+        * enforcing the whole region before tasklist removal until
+        * the final schedule() with TASK_DEAD state to be an RCU TASKS
+        * read side critical section.
+@@ -1063,9 +1098,6 @@ static void rcu_tasks_postgp(struct rcu_tasks *rtp)
+       synchronize_rcu();
+ }
+ 
+-void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
+-DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
+-
+ static void tasks_rcu_exit_srcu_stall(struct timer_list *unused)
+ {
+ #ifndef CONFIG_TINY_RCU
+@@ -1175,25 +1207,48 @@ struct task_struct *get_rcu_tasks_gp_kthread(void)
+ EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread);
+ 
+ /*
+- * Contribute to protect against tasklist scan blind spot while the
+- * task is exiting and may be removed from the tasklist. See
+- * corresponding synchronize_srcu() for further details.
++ * Protect against tasklist scan blind spot while the task is exiting and
++ * may be removed from the tasklist.  Do this by adding the task to yet
++ * another list.
++ *
++ * Note that the task will remove itself from this list, so there is no
++ * need for get_task_struct(), except in the case where rcu_tasks_pertask()
++ * adds it to the holdout list, in which case rcu_tasks_pertask() supplies
++ * the needed get_task_struct().
+  */
+-void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
++void exit_tasks_rcu_start(void)
+ {
+-      current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
++      unsigned long flags;
++      struct rcu_tasks_percpu *rtpcp;
++      struct task_struct *t = current;
++
++      WARN_ON_ONCE(!list_empty(&t->rcu_tasks_exit_list));
++      preempt_disable();
++      rtpcp = this_cpu_ptr(rcu_tasks.rtpcpu);
++      t->rcu_tasks_exit_cpu = smp_processor_id();
++      raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
++      if (!rtpcp->rtp_exit_list.next)
++              INIT_LIST_HEAD(&rtpcp->rtp_exit_list);
++      list_add(&t->rcu_tasks_exit_list, &rtpcp->rtp_exit_list);
++      raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
++      preempt_enable();
+ }
+ 
+ /*
+- * Contribute to protect against tasklist scan blind spot while the
+- * task is exiting and may be removed from the tasklist. See
+- * corresponding synchronize_srcu() for further details.
++ * Remove the task from the "yet another list" because do_exit() is now
++ * non-preemptible, allowing synchronize_rcu() to wait beyond this point.
+  */
+-void exit_tasks_rcu_stop(void) __releases(&tasks_rcu_exit_srcu)
++void exit_tasks_rcu_stop(void)
+ {
++      unsigned long flags;
++      struct rcu_tasks_percpu *rtpcp;
+       struct task_struct *t = current;
+ 
+-      __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
++      WARN_ON_ONCE(list_empty(&t->rcu_tasks_exit_list));
++      rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, t->rcu_tasks_exit_cpu);
++      raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
++      list_del_init(&t->rcu_tasks_exit_list);
++      raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
+ }
+ 
+ /*
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 6d9da768604d68..ccea52adcba672 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -671,17 +671,12 @@ static inline ktime_t hrtimer_update_base(struct 
hrtimer_cpu_base *base)
+ /*
+  * Is the high resolution mode active ?
+  */
+-static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
++static inline int hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
+ {
+       return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
+               cpu_base->hres_active : 0;
+ }
+ 
+-static inline int hrtimer_hres_active(void)
+-{
+-      return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
+-}
+-
+ static void __hrtimer_reprogram(struct hrtimer_cpu_base *cpu_base,
+                               struct hrtimer *next_timer,
+                               ktime_t expires_next)
+@@ -705,7 +700,7 @@ static void __hrtimer_reprogram(struct hrtimer_cpu_base 
*cpu_base,
+        * set. So we'd effectively block all timers until the T2 event
+        * fires.
+        */
+-      if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
++      if (!hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
+               return;
+ 
+       tick_program_event(expires_next, 1);
+@@ -813,13 +808,13 @@ static void retrigger_next_event(void *arg)
+        * of the next expiring timer is enough. The return from the SMP
+        * function call will take care of the reprogramming in case the
+        * CPU was in a NOHZ idle sleep.
++       *
++       * In periodic low resolution mode, the next softirq expiration
++       * must also be updated.
+        */
+-      if (!__hrtimer_hres_active(base) && !tick_nohz_active)
+-              return;
+-
+       raw_spin_lock(&base->lock);
+       hrtimer_update_base(base);
+-      if (__hrtimer_hres_active(base))
++      if (hrtimer_hres_active(base))
+               hrtimer_force_reprogram(base, 0);
+       else
+               hrtimer_update_next_event(base);
+@@ -976,7 +971,7 @@ void clock_was_set(unsigned int bases)
+       cpumask_var_t mask;
+       int cpu;
+ 
+-      if (!__hrtimer_hres_active(cpu_base) && !tick_nohz_active)
++      if (!hrtimer_hres_active(cpu_base) && !tick_nohz_active)
+               goto out_timerfd;
+ 
+       if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
+@@ -1554,7 +1549,7 @@ u64 hrtimer_get_next_event(void)
+ 
+       raw_spin_lock_irqsave(&cpu_base->lock, flags);
+ 
+-      if (!__hrtimer_hres_active(cpu_base))
++      if (!hrtimer_hres_active(cpu_base))
+               expires = __hrtimer_get_next_event(cpu_base, 
HRTIMER_ACTIVE_ALL);
+ 
+       raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+@@ -1577,7 +1572,7 @@ u64 hrtimer_next_event_without(const struct hrtimer 
*exclude)
+ 
+       raw_spin_lock_irqsave(&cpu_base->lock, flags);
+ 
+-      if (__hrtimer_hres_active(cpu_base)) {
++      if (hrtimer_hres_active(cpu_base)) {
+               unsigned int active;
+ 
+               if (!cpu_base->softirq_activated) {
+@@ -1938,25 +1933,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
+       tick_program_event(expires_next, 1);
+       pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta));
+ }
+-
+-/* called with interrupts disabled */
+-static inline void __hrtimer_peek_ahead_timers(void)
+-{
+-      struct tick_device *td;
+-
+-      if (!hrtimer_hres_active())
+-              return;
+-
+-      td = this_cpu_ptr(&tick_cpu_device);
+-      if (td && td->evtdev)
+-              hrtimer_interrupt(td->evtdev);
+-}
+-
+-#else /* CONFIG_HIGH_RES_TIMERS */
+-
+-static inline void __hrtimer_peek_ahead_timers(void) { }
+-
+-#endif        /* !CONFIG_HIGH_RES_TIMERS */
++#endif /* !CONFIG_HIGH_RES_TIMERS */
+ 
+ /*
+  * Called from run_local_timers in hardirq context every jiffy
+@@ -1967,7 +1944,7 @@ void hrtimer_run_queues(void)
+       unsigned long flags;
+       ktime_t now;
+ 
+-      if (__hrtimer_hres_active(cpu_base))
++      if (hrtimer_hres_active(cpu_base))
+               return;
+ 
+       /*
+@@ -2312,11 +2289,6 @@ int hrtimers_cpu_dying(unsigned int dying_cpu)
+                                    &new_base->clock_base[i]);
+       }
+ 
+-      /*
+-       * The migration might have changed the first expiring softirq
+-       * timer on this CPU. Update it.
+-       */
+-      __hrtimer_get_next_event(new_base, HRTIMER_ACTIVE_SOFT);
+       /* Tell the other CPU to retrigger the next event */
+       smp_call_function_single(ncpu, retrigger_next_event, NULL, 0);
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index a32c8637503d14..a111be83c36939 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -750,7 +750,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
+               /* copy the current bits to the new max */
+               ret = trace_pid_list_first(filtered_pids, &pid);
+               while (!ret) {
+-                      trace_pid_list_set(pid_list, pid);
++                      ret = trace_pid_list_set(pid_list, pid);
++                      if (ret < 0)
++                              goto out;
++
+                       ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
+                       nr_pids++;
+               }
+@@ -787,6 +790,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
+               trace_parser_clear(&parser);
+               ret = 0;
+       }
++ out:
+       trace_parser_put(&parser);
+ 
+       if (ret < 0) {
+@@ -7226,7 +7230,7 @@ tracing_mark_write(struct file *filp, const char __user 
*ubuf,
+       entry = ring_buffer_event_data(event);
+       entry->ip = _THIS_IP_;
+ 
+-      len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
++      len = copy_from_user_nofault(&entry->buf, ubuf, cnt);
+       if (len) {
+               memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
+               cnt = FAULTED_SIZE;
+@@ -7301,7 +7305,7 @@ tracing_mark_raw_write(struct file *filp, const char 
__user *ubuf,
+ 
+       entry = ring_buffer_event_data(event);
+ 
+-      len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
++      len = copy_from_user_nofault(&entry->id, ubuf, cnt);
+       if (len) {
+               entry->id = -1;
+               memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
+diff --git a/mm/Kconfig b/mm/Kconfig
+index c11cd01169e8d1..046c32686fc4d8 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -280,7 +280,7 @@ config SLAB
+ 
+ config SLUB_TINY
+       bool "Configure SLUB for minimal memory footprint"
+-      depends on SLUB && EXPERT
++      depends on SLUB && EXPERT && !COMPILE_TEST
+       select SLAB_MERGE_DEFAULT
+       help
+          Configures the SLUB allocator in a way to achieve minimal memory
+diff --git a/mm/damon/core.c b/mm/damon/core.c
+index 43e4fe7ef17eb4..48747236c21ca8 100644
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -1043,6 +1043,10 @@ static void damos_adjust_quota(struct damon_ctx *c, 
struct damos *s)
+       if (!quota->ms && !quota->sz)
+               return;
+ 
++      /* First charge window */
++      if (!quota->total_charged_sz && !quota->charged_from)
++              quota->charged_from = jiffies;
++
+       /* New charge window starts */
+       if (time_after_eq(jiffies, quota->charged_from +
+                               msecs_to_jiffies(quota->reset_interval))) {
+diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
+index 3de2916a65c38c..b4032538b22cf7 100644
+--- a/mm/damon/lru_sort.c
++++ b/mm/damon/lru_sort.c
+@@ -203,6 +203,9 @@ static int damon_lru_sort_apply_parameters(void)
+       unsigned int hot_thres, cold_thres;
+       int err = 0;
+ 
++      if (!damon_lru_sort_mon_attrs.sample_interval)
++              return -EINVAL;
++
+       err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs);
+       if (err)
+               return err;
+diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c
+index 66e190f0374ac8..586daa2cefe4fe 100644
+--- a/mm/damon/reclaim.c
++++ b/mm/damon/reclaim.c
+@@ -167,6 +167,9 @@ static int damon_reclaim_apply_parameters(void)
+       struct damos_filter *filter;
+       int err = 0;
+ 
++      if (!damon_reclaim_mon_attrs.aggr_interval)
++              return -EINVAL;
++
+       err = damon_set_attrs(ctx, &damon_reclaim_mon_attrs);
+       if (err)
+               return err;
+diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
+index b317f51dcc9876..91893543d47ca3 100644
+--- a/mm/damon/sysfs.c
++++ b/mm/damon/sysfs.c
+@@ -1055,14 +1055,18 @@ static ssize_t state_show(struct kobject *kobj, struct 
kobj_attribute *attr,
+ {
+       struct damon_sysfs_kdamond *kdamond = container_of(kobj,
+                       struct damon_sysfs_kdamond, kobj);
+-      struct damon_ctx *ctx = kdamond->damon_ctx;
+-      bool running;
++      struct damon_ctx *ctx;
++      bool running = false;
+ 
+-      if (!ctx)
+-              running = false;
+-      else
++      if (!mutex_trylock(&damon_sysfs_lock))
++              return -EBUSY;
++
++      ctx = kdamond->damon_ctx;
++      if (ctx)
+               running = damon_sysfs_ctx_running(ctx);
+ 
++      mutex_unlock(&damon_sysfs_lock);
++
+       return sysfs_emit(buf, "%s\n", running ?
+                       damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
+                       damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);
+diff --git a/mm/kasan/init.c b/mm/kasan/init.c
+index 89895f38f72242..afecc04b486a63 100644
+--- a/mm/kasan/init.c
++++ b/mm/kasan/init.c
+@@ -13,9 +13,9 @@
+ #include <linux/mm.h>
+ #include <linux/pfn.h>
+ #include <linux/slab.h>
++#include <linux/pgalloc.h>
+ 
+ #include <asm/page.h>
+-#include <asm/pgalloc.h>
+ 
+ #include "kasan.h"
+ 
+@@ -197,7 +197,7 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned 
long addr,
+                       pud_t *pud;
+                       pmd_t *pmd;
+ 
+-                      p4d_populate(&init_mm, p4d,
++                      p4d_populate_kernel(addr, p4d,
+                                       lm_alias(kasan_early_shadow_pud));
+                       pud = pud_offset(p4d, addr);
+                       pud_populate(&init_mm, pud,
+@@ -218,7 +218,7 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned 
long addr,
+                       } else {
+                               p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
+                               pud_init(p);
+-                              p4d_populate(&init_mm, p4d, p);
++                              p4d_populate_kernel(addr, p4d, p);
+                       }
+               }
+               zero_pud_populate(p4d, addr, next);
+@@ -257,10 +257,10 @@ int __ref kasan_populate_early_shadow(const void 
*shadow_start,
+                        * puds,pmds, so pgd_populate(), pud_populate()
+                        * is noops.
+                        */
+-                      pgd_populate(&init_mm, pgd,
++                      pgd_populate_kernel(addr, pgd,
+                                       lm_alias(kasan_early_shadow_p4d));
+                       p4d = p4d_offset(pgd, addr);
+-                      p4d_populate(&init_mm, p4d,
++                      p4d_populate_kernel(addr, p4d,
+                                       lm_alias(kasan_early_shadow_pud));
+                       pud = pud_offset(p4d, addr);
+                       pud_populate(&init_mm, pud,
+@@ -279,7 +279,7 @@ int __ref kasan_populate_early_shadow(const void 
*shadow_start,
+                               if (!p)
+                                       return -ENOMEM;
+                       } else {
+-                              pgd_populate(&init_mm, pgd,
++                              pgd_populate_kernel(addr, pgd,
+                                       early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+                       }
+               }
+diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
+index ecf9f5aa352005..9ca21d1e10ac19 100644
+--- a/mm/kasan/kasan_test.c
++++ b/mm/kasan/kasan_test.c
+@@ -1053,6 +1053,7 @@ static void kasan_strings(struct kunit *test)
+ 
+       ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
++      OPTIMIZER_HIDE_VAR(ptr);
+ 
+       kfree(ptr);
+ 
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index f227b39ae4cf74..16d29ee602c70a 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -1240,6 +1240,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
+       int result = SCAN_FAIL, referenced = 0;
+       int none_or_zero = 0, shared = 0;
+       struct page *page = NULL;
++      struct folio *folio = NULL;
+       unsigned long _address;
+       spinlock_t *ptl;
+       int node = NUMA_NO_NODE, unmapped = 0;
+@@ -1326,29 +1327,28 @@ static int hpage_collapse_scan_pmd(struct mm_struct 
*mm,
+                       }
+               }
+ 
+-              page = compound_head(page);
+-
++              folio = page_folio(page);
+               /*
+                * Record which node the original page is from and save this
+                * information to cc->node_load[].
+                * Khugepaged will allocate hugepage from the node has the max
+                * hit record.
+                */
+-              node = page_to_nid(page);
++              node = folio_nid(folio);
+               if (hpage_collapse_scan_abort(node, cc)) {
+                       result = SCAN_SCAN_ABORT;
+                       goto out_unmap;
+               }
+               cc->node_load[node]++;
+-              if (!PageLRU(page)) {
++              if (!folio_test_lru(folio)) {
+                       result = SCAN_PAGE_LRU;
+                       goto out_unmap;
+               }
+-              if (PageLocked(page)) {
++              if (folio_test_locked(folio)) {
+                       result = SCAN_PAGE_LOCK;
+                       goto out_unmap;
+               }
+-              if (!PageAnon(page)) {
++              if (!folio_test_anon(folio)) {
+                       result = SCAN_PAGE_ANON;
+                       goto out_unmap;
+               }
+@@ -1363,7 +1363,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
+                * has excessive GUP pins (i.e. 512).  Anyway the same check
+                * will be done again later the risk seems low.
+                */
+-              if (!is_refcount_suitable(page)) {
++              if (!is_refcount_suitable(&folio->page)) {
+                       result = SCAN_PAGE_COUNT;
+                       goto out_unmap;
+               }
+@@ -1373,9 +1373,9 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
+                * enough young pte to justify collapsing the page
+                */
+               if (cc->is_khugepaged &&
+-                  (pte_young(pteval) || page_is_young(page) ||
+-                   PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
+-                                                                   address)))
++                  (pte_young(pteval) || folio_test_young(folio) ||
++                   folio_test_referenced(folio) ||
++                   mmu_notifier_test_young(vma->vm_mm, _address)))
+                       referenced++;
+       }
+       if (!writable) {
+@@ -1396,7 +1396,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
+               *mmap_locked = false;
+       }
+ out:
+-      trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
++      trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
+                                    none_or_zero, result, unmapped);
+       return result;
+ }
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index dae5e60d64e2fd..8bedcd288a0cc9 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -2535,10 +2535,9 @@ int unpoison_memory(unsigned long pfn)
+       static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
+                                       DEFAULT_RATELIMIT_BURST);
+ 
+-      if (!pfn_valid(pfn))
+-              return -ENXIO;
+-
+-      p = pfn_to_page(pfn);
++      p = pfn_to_online_page(pfn);
++      if (!p)
++              return -EIO;
+       folio = page_folio(p);
+ 
+       mutex_lock(&mf_mutex);
+diff --git a/mm/percpu.c b/mm/percpu.c
+index d287cebd58caa3..38d5121c2b652a 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -3157,7 +3157,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, 
size_t dyn_size,
+ #endif /* BUILD_EMBED_FIRST_CHUNK */
+ 
+ #ifdef BUILD_PAGE_FIRST_CHUNK
+-#include <asm/pgalloc.h>
++#include <linux/pgalloc.h>
+ 
+ #ifndef P4D_TABLE_SIZE
+ #define P4D_TABLE_SIZE PAGE_SIZE
+@@ -3185,7 +3185,7 @@ void __init __weak pcpu_populate_pte(unsigned long addr)
+               p4d = memblock_alloc(P4D_TABLE_SIZE, P4D_TABLE_SIZE);
+               if (!p4d)
+                       goto err_alloc;
+-              pgd_populate(&init_mm, pgd, p4d);
++              pgd_populate_kernel(addr, pgd, p4d);
+       }
+ 
+       p4d = p4d_offset(pgd, addr);
+@@ -3193,7 +3193,7 @@ void __init __weak pcpu_populate_pte(unsigned long addr)
+               pud = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
+               if (!pud)
+                       goto err_alloc;
+-              p4d_populate(&init_mm, p4d, pud);
++              p4d_populate_kernel(addr, p4d, pud);
+       }
+ 
+       pud = pud_offset(p4d, addr);
+diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
+index a2cbe44c48e10f..589d6a262b6dad 100644
+--- a/mm/sparse-vmemmap.c
++++ b/mm/sparse-vmemmap.c
+@@ -27,9 +27,9 @@
+ #include <linux/spinlock.h>
+ #include <linux/vmalloc.h>
+ #include <linux/sched.h>
++#include <linux/pgalloc.h>
+ 
+ #include <asm/dma.h>
+-#include <asm/pgalloc.h>
+ 
+ /*
+  * Allocate a block of memory to be used to back the virtual memory map
+@@ -225,7 +225,7 @@ p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, 
unsigned long addr, int node)
+               if (!p)
+                       return NULL;
+               pud_init(p);
+-              p4d_populate(&init_mm, p4d, p);
++              p4d_populate_kernel(addr, p4d, p);
+       }
+       return p4d;
+ }
+@@ -237,7 +237,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, 
int node)
+               void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
+               if (!p)
+                       return NULL;
+-              pgd_populate(&init_mm, pgd, p);
++              pgd_populate_kernel(addr, pgd, p);
+       }
+       return pgd;
+ }
+diff --git a/net/bridge/br.c b/net/bridge/br.c
+index a6e94ceb7c9a08..a45db67197226b 100644
+--- a/net/bridge/br.c
++++ b/net/bridge/br.c
+@@ -312,6 +312,13 @@ int br_boolopt_multi_toggle(struct net_bridge *br,
+       int err = 0;
+       int opt_id;
+ 
++      opt_id = find_next_bit(&bitmap, BITS_PER_LONG, BR_BOOLOPT_MAX);
++      if (opt_id != BITS_PER_LONG) {
++              NL_SET_ERR_MSG_FMT_MOD(extack, "Unknown boolean option %d",
++                                     opt_id);
++              return -EINVAL;
++      }
++
+       for_each_set_bit(opt_id, &bitmap, BR_BOOLOPT_MAX) {
+               bool on = !!(bm->optval & BIT(opt_id));
+ 
+diff --git a/net/can/j1939/bus.c b/net/can/j1939/bus.c
+index 48668790160211..e0b966c2517cf1 100644
+--- a/net/can/j1939/bus.c
++++ b/net/can/j1939/bus.c
+@@ -290,8 +290,11 @@ int j1939_local_ecu_get(struct j1939_priv *priv, name_t 
name, u8 sa)
+       if (!ecu)
+               ecu = j1939_ecu_create_locked(priv, name);
+       err = PTR_ERR_OR_ZERO(ecu);
+-      if (err)
++      if (err) {
++              if (j1939_address_is_unicast(sa))
++                      priv->ents[sa].nusers--;
+               goto done;
++      }
+ 
+       ecu->nusers++;
+       /* TODO: do we care if ecu->addr != sa? */
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index cf9a12d8da6f90..7bf4d4fb96735a 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -520,6 +520,9 @@ static int j1939_sk_bind(struct socket *sock, struct 
sockaddr *uaddr, int len)
+       ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa);
+       if (ret) {
+               j1939_netdev_stop(priv);
++              jsk->priv = NULL;
++              synchronize_rcu();
++              j1939_priv_put(priv);
+               goto out_release_sock;
+       }
+ 
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 3c8b78d9c4d1ce..8add91385375e1 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -1524,7 +1524,7 @@ static void con_fault_finish(struct ceph_connection *con)
+        * in case we faulted due to authentication, invalidate our
+        * current tickets so that we can get new ones.
+        */
+-      if (con->v1.auth_retry) {
++      if (!ceph_msgr2(from_msgr(con->msgr)) && con->v1.auth_retry) {
+               dout("auth_retry %d, invalidating\n", con->v1.auth_retry);
+               if (con->ops->invalidate_authorizer)
+                       con->ops->invalidate_authorizer(con);
+@@ -1714,9 +1714,10 @@ static void clear_standby(struct ceph_connection *con)
+ {
+       /* come back from STANDBY? */
+       if (con->state == CEPH_CON_S_STANDBY) {
+-              dout("clear_standby %p and ++connect_seq\n", con);
++              dout("clear_standby %p\n", con);
+               con->state = CEPH_CON_S_PREOPEN;
+-              con->v1.connect_seq++;
++              if (!ceph_msgr2(from_msgr(con->msgr)))
++                      con->v1.connect_seq++;
+               WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_WRITE_PENDING));
+               WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_KEEPALIVE_PENDING));
+       }
+diff --git a/net/core/sock.c b/net/core/sock.c
+index b74bc8175937e2..9918a9a337b616 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2029,6 +2029,8 @@ int sk_getsockopt(struct sock *sk, int level, int 
optname,
+  */
+ static inline void sock_lock_init(struct sock *sk)
+ {
++      sk_owner_clear(sk);
++
+       if (sk->sk_kern_sock)
+               sock_lock_init_class_and_name(
+                       sk,
+@@ -2124,6 +2126,9 @@ static void sk_prot_free(struct proto *prot, struct sock 
*sk)
+       cgroup_sk_free(&sk->sk_cgrp_data);
+       mem_cgroup_sk_free(sk);
+       security_sk_free(sk);
++
++      sk_owner_put(sk);
++
+       if (slab != NULL)
+               kmem_cache_free(slab, sk);
+       else
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 906c38b9d66ff3..5514b5bedc9298 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -59,7 +59,7 @@ static bool hsr_check_carrier(struct hsr_port *master)
+ 
+       ASSERT_RTNL();
+ 
+-      hsr_for_each_port(master->hsr, port) {
++      hsr_for_each_port_rtnl(master->hsr, port) {
+               if (port->type != HSR_PT_MASTER && is_slave_up(port->dev)) {
+                       netif_carrier_on(master->dev);
+                       return true;
+@@ -109,7 +109,7 @@ int hsr_get_max_mtu(struct hsr_priv *hsr)
+       struct hsr_port *port;
+ 
+       mtu_max = ETH_DATA_LEN;
+-      hsr_for_each_port(hsr, port)
++      hsr_for_each_port_rtnl(hsr, port)
+               if (port->type != HSR_PT_MASTER)
+                       mtu_max = min(port->dev->mtu, mtu_max);
+ 
+@@ -144,7 +144,7 @@ static int hsr_dev_open(struct net_device *dev)
+       hsr = netdev_priv(dev);
+       designation = '\0';
+ 
+-      hsr_for_each_port(hsr, port) {
++      hsr_for_each_port_rtnl(hsr, port) {
+               if (port->type == HSR_PT_MASTER)
+                       continue;
+               switch (port->type) {
+@@ -170,7 +170,24 @@ static int hsr_dev_open(struct net_device *dev)
+ 
+ static int hsr_dev_close(struct net_device *dev)
+ {
+-      /* Nothing to do here. */
++      struct hsr_port *port;
++      struct hsr_priv *hsr;
++
++      hsr = netdev_priv(dev);
++      hsr_for_each_port_rtnl(hsr, port) {
++              if (port->type == HSR_PT_MASTER)
++                      continue;
++              switch (port->type) {
++              case HSR_PT_SLAVE_A:
++              case HSR_PT_SLAVE_B:
++                      dev_uc_unsync(port->dev, dev);
++                      dev_mc_unsync(port->dev, dev);
++                      break;
++              default:
++                      break;
++              }
++      }
++
+       return 0;
+ }
+ 
+@@ -190,7 +207,7 @@ static netdev_features_t hsr_features_recompute(struct 
hsr_priv *hsr,
+        * may become enabled.
+        */
+       features &= ~NETIF_F_ONE_FOR_ALL;
+-      hsr_for_each_port(hsr, port)
++      hsr_for_each_port_rtnl(hsr, port)
+               features = netdev_increment_features(features,
+                                                    port->dev->features,
+                                                    mask);
+@@ -211,6 +228,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, 
struct net_device *dev)
+       struct hsr_priv *hsr = netdev_priv(dev);
+       struct hsr_port *master;
+ 
++      rcu_read_lock();
+       master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+       if (master) {
+               skb->dev = master->dev;
+@@ -223,6 +241,8 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, 
struct net_device *dev)
+               dev_core_stats_tx_dropped_inc(dev);
+               dev_kfree_skb_any(skb);
+       }
++      rcu_read_unlock();
++
+       return NETDEV_TX_OK;
+ }
+ 
+@@ -401,12 +421,133 @@ void hsr_del_ports(struct hsr_priv *hsr)
+               hsr_del_port(port);
+ }
+ 
++static void hsr_set_rx_mode(struct net_device *dev)
++{
++      struct hsr_port *port;
++      struct hsr_priv *hsr;
++
++      hsr = netdev_priv(dev);
++
++      hsr_for_each_port_rtnl(hsr, port) {
++              if (port->type == HSR_PT_MASTER)
++                      continue;
++              switch (port->type) {
++              case HSR_PT_SLAVE_A:
++              case HSR_PT_SLAVE_B:
++                      dev_mc_sync_multiple(port->dev, dev);
++                      dev_uc_sync_multiple(port->dev, dev);
++                      break;
++              default:
++                      break;
++              }
++      }
++}
++
++static void hsr_change_rx_flags(struct net_device *dev, int change)
++{
++      struct hsr_port *port;
++      struct hsr_priv *hsr;
++
++      hsr = netdev_priv(dev);
++
++      hsr_for_each_port_rtnl(hsr, port) {
++              if (port->type == HSR_PT_MASTER)
++                      continue;
++              switch (port->type) {
++              case HSR_PT_SLAVE_A:
++              case HSR_PT_SLAVE_B:
++                      if (change & IFF_ALLMULTI)
++                              dev_set_allmulti(port->dev,
++                                               dev->flags &
++                                               IFF_ALLMULTI ? 1 : -1);
++                      break;
++              default:
++                      break;
++              }
++      }
++}
++
++static int hsr_ndo_vlan_rx_add_vid(struct net_device *dev,
++                                 __be16 proto, u16 vid)
++{
++      bool is_slave_a_added = false;
++      bool is_slave_b_added = false;
++      struct hsr_port *port;
++      struct hsr_priv *hsr;
++      int ret = 0;
++
++      hsr = netdev_priv(dev);
++
++      hsr_for_each_port_rtnl(hsr, port) {
++              if (port->type == HSR_PT_MASTER ||
++                  port->type == HSR_PT_INTERLINK)
++                      continue;
++
++              ret = vlan_vid_add(port->dev, proto, vid);
++              switch (port->type) {
++              case HSR_PT_SLAVE_A:
++                      if (ret) {
++                              /* clean up Slave-B */
++                              netdev_err(dev, "add vid failed for Slave-A\n");
++                              if (is_slave_b_added)
++                                      vlan_vid_del(port->dev, proto, vid);
++                              return ret;
++                      }
++
++                      is_slave_a_added = true;
++                      break;
++
++              case HSR_PT_SLAVE_B:
++                      if (ret) {
++                              /* clean up Slave-A */
++                              netdev_err(dev, "add vid failed for Slave-B\n");
++                              if (is_slave_a_added)
++                                      vlan_vid_del(port->dev, proto, vid);
++                              return ret;
++                      }
++
++                      is_slave_b_added = true;
++                      break;
++              default:
++                      break;
++              }
++      }
++
++      return 0;
++}
++
++static int hsr_ndo_vlan_rx_kill_vid(struct net_device *dev,
++                                  __be16 proto, u16 vid)
++{
++      struct hsr_port *port;
++      struct hsr_priv *hsr;
++
++      hsr = netdev_priv(dev);
++
++      hsr_for_each_port_rtnl(hsr, port) {
++              switch (port->type) {
++              case HSR_PT_SLAVE_A:
++              case HSR_PT_SLAVE_B:
++                      vlan_vid_del(port->dev, proto, vid);
++                      break;
++              default:
++                      break;
++              }
++      }
++
++      return 0;
++}
++
+ static const struct net_device_ops hsr_device_ops = {
+       .ndo_change_mtu = hsr_dev_change_mtu,
+       .ndo_open = hsr_dev_open,
+       .ndo_stop = hsr_dev_close,
+       .ndo_start_xmit = hsr_dev_xmit,
++      .ndo_change_rx_flags = hsr_change_rx_flags,
+       .ndo_fix_features = hsr_fix_features,
++      .ndo_set_rx_mode = hsr_set_rx_mode,
++      .ndo_vlan_rx_add_vid = hsr_ndo_vlan_rx_add_vid,
++      .ndo_vlan_rx_kill_vid = hsr_ndo_vlan_rx_kill_vid,
+ };
+ 
+ static struct device_type hsr_type = {
+@@ -447,7 +588,8 @@ void hsr_dev_setup(struct net_device *dev)
+ 
+       dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
+                          NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
+-                         NETIF_F_HW_VLAN_CTAG_TX;
++                         NETIF_F_HW_VLAN_CTAG_TX |
++                         NETIF_F_HW_VLAN_CTAG_FILTER;
+ 
+       dev->features = dev->hw_features;
+ 
+@@ -533,6 +675,10 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct 
net_device *slave[2],
+           (slave[1]->features & NETIF_F_HW_HSR_FWD))
+               hsr->fwd_offloaded = true;
+ 
++      if ((slave[0]->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
++          (slave[1]->features & NETIF_F_HW_VLAN_CTAG_FILTER))
++              hsr_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
++
+       res = register_netdevice(hsr_dev);
+       if (res)
+               goto err_unregister;
+diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
+index 257b50124cee5e..76a1958609e291 100644
+--- a/net/hsr/hsr_main.c
++++ b/net/hsr/hsr_main.c
+@@ -22,7 +22,7 @@ static bool hsr_slave_empty(struct hsr_priv *hsr)
+ {
+       struct hsr_port *port;
+ 
+-      hsr_for_each_port(hsr, port)
++      hsr_for_each_port_rtnl(hsr, port)
+               if (port->type != HSR_PT_MASTER)
+                       return false;
+       return true;
+@@ -125,7 +125,7 @@ struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, 
enum hsr_port_type pt)
+ {
+       struct hsr_port *port;
+ 
+-      hsr_for_each_port(hsr, port)
++      hsr_for_each_port_rtnl(hsr, port)
+               if (port->type == pt)
+                       return port;
+       return NULL;
+diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
+index 18e01791ad799d..2fcabe39e61f4f 100644
+--- a/net/hsr/hsr_main.h
++++ b/net/hsr/hsr_main.h
+@@ -221,6 +221,9 @@ struct hsr_priv {
+ #define hsr_for_each_port(hsr, port) \
+       list_for_each_entry_rcu((port), &(hsr)->ports, port_list)
+ 
++#define hsr_for_each_port_rtnl(hsr, port) \
++      list_for_each_entry_rcu((port), &(hsr)->ports, port_list, 
lockdep_rtnl_is_held())
++
+ struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type 
pt);
+ 
+ /* Caller must ensure skb is a valid HSR frame */
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index deb08cab44640d..75e3d7501752df 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -203,6 +203,9 @@ static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, 
int mtu)
+       if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
+               return -EINVAL;
+ 
++      if (skb_is_gso(skb))
++              skb_gso_reset(skb);
++
+       skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
+       pskb_pull(skb, ETH_HLEN);
+       skb_reset_network_header(skb);
+@@ -297,6 +300,9 @@ static int iptunnel_pmtud_build_icmpv6(struct sk_buff 
*skb, int mtu)
+       if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
+               return -EINVAL;
+ 
++      if (skb_is_gso(skb))
++              skb_gso_reset(skb);
++
+       skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
+       pskb_pull(skb, ETH_HLEN);
+       skb_reset_network_header(skb);
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index 5312237e804093..7518d2af630880 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -408,8 +408,11 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct 
sk_psock *psock,
+               if (!psock->cork) {
+                       psock->cork = kzalloc(sizeof(*psock->cork),
+                                             GFP_ATOMIC | __GFP_NOWARN);
+-                      if (!psock->cork)
++                      if (!psock->cork) {
++                              sk_msg_free(sk, msg);
++                              *copied = 0;
+                               return -ENOMEM;
++                      }
+               }
+               memcpy(psock->cork, msg, sizeof(*msg));
+               return 0;
+diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
+index 31f6899ef71aac..b31ba9f905e30c 100644
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -1471,13 +1471,12 @@ static void sync_socket_options(struct mptcp_sock 
*msk, struct sock *ssk)
+ {
+       static const unsigned int tx_rx_locks = SOCK_RCVBUF_LOCK | 
SOCK_SNDBUF_LOCK;
+       struct sock *sk = (struct sock *)msk;
++      bool keep_open;
+ 
+-      if (ssk->sk_prot->keepalive) {
+-              if (sock_flag(sk, SOCK_KEEPOPEN))
+-                      ssk->sk_prot->keepalive(ssk, 1);
+-              else
+-                      ssk->sk_prot->keepalive(ssk, 0);
+-      }
++      keep_open = sock_flag(sk, SOCK_KEEPOPEN);
++      if (ssk->sk_prot->keepalive)
++              ssk->sk_prot->keepalive(ssk, keep_open);
++      sock_valbool_flag(ssk, SOCK_KEEPOPEN, keep_open);
+ 
+       ssk->sk_priority = sk->sk_priority;
+       ssk->sk_bound_dev_if = sk->sk_bound_dev_if;
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 73bc39281ef5f5..9b45fbdc90cabe 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -276,8 +276,6 @@ EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
+ 
+ static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
+ {
+-      if (unlikely(current->flags & PF_EXITING))
+-              return -EINTR;
+       schedule();
+       if (signal_pending_state(mode, current))
+               return -ERESTARTSYS;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 8b27a21f3b42d8..3660ef26471129 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -407,9 +407,9 @@ xs_sock_recv_cmsg(struct socket *sock, unsigned int 
*msg_flags, int flags)
+       iov_iter_kvec(&msg.msg_iter, ITER_DEST, &alert_kvec, 1,
+                     alert_kvec.iov_len);
+       ret = sock_recvmsg(sock, &msg, flags);
+-      if (ret > 0 &&
+-          tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT) {
+-              iov_iter_revert(&msg.msg_iter, ret);
++      if (ret > 0) {
++              if (tls_get_record_type(sock->sk, &u.cmsg) == 
TLS_RECORD_TYPE_ALERT)
++                      iov_iter_revert(&msg.msg_iter, ret);
+               ret = xs_sock_process_cmsg(sock, &msg, msg_flags, &u.cmsg,
+                                          -EAGAIN);
+       }
+diff --git a/samples/ftrace/ftrace-direct-modify.c 
b/samples/ftrace/ftrace-direct-modify.c
+index e2a6a69352dfb7..b40f85e3806fcb 100644
+--- a/samples/ftrace/ftrace-direct-modify.c
++++ b/samples/ftrace/ftrace-direct-modify.c
+@@ -40,8 +40,8 @@ asm (
+       CALL_DEPTH_ACCOUNT
+ "     call my_direct_func1\n"
+ "     leave\n"
+-"     .size           my_tramp1, .-my_tramp1\n"
+       ASM_RET
++"     .size           my_tramp1, .-my_tramp1\n"
+ 
+ "     .type           my_tramp2, @function\n"
+ "     .globl          my_tramp2\n"
+diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
+index 390658a2d5b746..a57c24c129720f 100644
+--- a/scripts/Makefile.kasan
++++ b/scripts/Makefile.kasan
+@@ -68,10 +68,14 @@ CFLAGS_KASAN := -fsanitize=kernel-hwaddress \
+               $(call cc-param,hwasan-inline-all-checks=0) \
+               $(instrumentation_flags)
+ 
+-# Instrument memcpy/memset/memmove calls by using instrumented 
__hwasan_mem*().
+-ifeq ($(call clang-min-version, 150000)$(call gcc-min-version, 130000),y)
+-CFLAGS_KASAN += $(call cc-param,hwasan-kernel-mem-intrinsic-prefix=1)
+-endif
++# Instrument memcpy/memset/memmove calls by using instrumented 
__(hw)asan_mem*().
++ifdef CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX
++      ifdef CONFIG_CC_IS_GCC
++              CFLAGS_KASAN += $(call 
cc-param,asan-kernel-mem-intrinsic-prefix=1)
++      else
++              CFLAGS_KASAN += $(call 
cc-param,hwasan-kernel-mem-intrinsic-prefix=1)
++      endif
++endif # CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX
+ 
+ endif # CONFIG_KASAN_SW_TAGS
+ 
+diff --git a/security/integrity/ima/ima_main.c 
b/security/integrity/ima/ima_main.c
+index 068edb0d79f736..3b734a4dfcbe4e 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -128,16 +128,22 @@ static void ima_rdwr_violation_check(struct file *file,
+               if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) {
+                       if (!iint)
+                               iint = integrity_iint_find(inode);
++
+                       /* IMA_MEASURE is set from reader side */
+-                      if (iint && test_bit(IMA_MUST_MEASURE,
+-                                              &iint->atomic_flags))
++                      if (iint && test_and_clear_bit(IMA_MAY_EMIT_TOMTOU,
++                                                     &iint->atomic_flags))
+                               send_tomtou = true;
+               }
+       } else {
+               if (must_measure)
+-                      set_bit(IMA_MUST_MEASURE, &iint->atomic_flags);
+-              if (inode_is_open_for_write(inode) && must_measure)
+-                      send_writers = true;
++                      set_bit(IMA_MAY_EMIT_TOMTOU, &iint->atomic_flags);
++
++              /* Limit number of open_writers violations */
++              if (inode_is_open_for_write(inode) && must_measure) {
++                      if (!test_and_set_bit(IMA_EMITTED_OPENWRITERS,
++                                            &iint->atomic_flags))
++                              send_writers = true;
++              }
+       }
+ 
+       if (!send_tomtou && !send_writers)
+diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
+index ad20ff7f5dfaa4..a007edae938aeb 100644
+--- a/security/integrity/integrity.h
++++ b/security/integrity/integrity.h
+@@ -74,7 +74,8 @@
+ #define IMA_UPDATE_XATTR      1
+ #define IMA_CHANGE_ATTR               2
+ #define IMA_DIGSIG            3
+-#define IMA_MUST_MEASURE      4
++#define IMA_MAY_EMIT_TOMTOU   4
++#define IMA_EMITTED_OPENWRITERS       5
+ 
+ enum evm_ima_xattr_type {
+       IMA_XATTR_DIGEST = 0x01,

Reply via email to