commit:     3a9a60ef2eebb127b9a97d68971ce8871421ccf4
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Sat Sep 20 05:26:40 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Sat Sep 20 05:26:40 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3a9a60ef

Linux patch 6.1.153

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |    4 +
 1152_linux-6.1.153.patch | 2513 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2517 insertions(+)

diff --git a/0000_README b/0000_README
index 9696c806..beeb2a18 100644
--- a/0000_README
+++ b/0000_README
@@ -651,6 +651,10 @@ Patch:  1151_linux-6.1.152.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.152
 
+Patch:  1152_linux-6.1.153.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.153
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1152_linux-6.1.153.patch b/1152_linux-6.1.153.patch
new file mode 100644
index 00000000..286e40a4
--- /dev/null
+++ b/1152_linux-6.1.153.patch
@@ -0,0 +1,2513 @@
+diff --git a/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml 
b/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml
+index 6d176588df47d8..e66869e92b47da 100644
+--- a/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml
++++ b/Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml
+@@ -41,7 +41,7 @@ properties:
+           - const: dma_intr2
+ 
+   clocks:
+-    minItems: 1
++    maxItems: 1
+ 
+   clock-names:
+     const: sw_baud
+diff --git a/Documentation/networking/can.rst 
b/Documentation/networking/can.rst
+index ebc822e605f55b..9ac6d3973ad58e 100644
+--- a/Documentation/networking/can.rst
++++ b/Documentation/networking/can.rst
+@@ -740,7 +740,7 @@ The broadcast manager sends responses to user space in the 
same form:
+             struct timeval ival1, ival2;    /* count and subsequent interval 
*/
+             canid_t can_id;                 /* unique can_id for task */
+             __u32 nframes;                  /* number of can_frames following 
*/
+-            struct can_frame frames[0];
++            struct can_frame frames[];
+     };
+ 
+ The aligned payload 'frames' uses the same basic CAN frame structure defined
+diff --git a/Makefile b/Makefile
+index 5d7fd3b481b3d2..77ebc6dea10089 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 152
++SUBLEVEL = 153
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 1bb5e8f6c63e47..db508118891d0a 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -749,6 +749,18 @@ void kvm_set_cpu_caps(void)
+               0 /* SME */ | F(SEV) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES) |
+               F(SME_COHERENT));
+ 
++      kvm_cpu_cap_mask(CPUID_8000_0021_EAX,
++              BIT(0) /* NO_NESTED_DATA_BP */ |
++              BIT(2) /* LFENCE Always serializing */ | 0 /* SmmPgCfgLock */ |
++              BIT(5) /* The memory form of VERW mitigates TSA */ |
++              BIT(6) /* NULL_SEL_CLR_BASE */ | 0 /* PrefetchCtlMsr */
++      );
++      if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
++              kvm_cpu_caps[CPUID_8000_0021_EAX] |= BIT(2) /* LFENCE Always 
serializing */;
++      if (!static_cpu_has_bug(X86_BUG_NULL_SEG))
++              kvm_cpu_caps[CPUID_8000_0021_EAX] |= BIT(6) /* 
NULL_SEL_CLR_BASE */;
++      kvm_cpu_caps[CPUID_8000_0021_EAX] |= BIT(9) /* NO_SMM_CTL_MSR */;
++
+       kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
+               F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
+               F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
+@@ -758,12 +770,15 @@ void kvm_set_cpu_caps(void)
+       if (cpu_feature_enabled(X86_FEATURE_SRSO_NO))
+               kvm_cpu_cap_set(X86_FEATURE_SRSO_NO);
+ 
+-      kvm_cpu_cap_mask(CPUID_8000_0021_EAX, F(VERW_CLEAR));
++      kvm_cpu_cap_check_and_set(X86_FEATURE_VERW_CLEAR);
+ 
+       kvm_cpu_cap_init_kvm_defined(CPUID_8000_0021_ECX,
+               F(TSA_SQ_NO) | F(TSA_L1_NO)
+       );
+ 
++      kvm_cpu_cap_check_and_set(X86_FEATURE_TSA_SQ_NO);
++      kvm_cpu_cap_check_and_set(X86_FEATURE_TSA_L1_NO);
++
+       /*
+        * Hide RDTSCP and RDPID if either feature is reported as supported but
+        * probing MSR_TSC_AUX failed.  This is purely a sanity check and
+@@ -1250,21 +1265,7 @@ static inline int __do_cpuid_func(struct 
kvm_cpuid_array *array, u32 function)
+               break;
+       case 0x80000021:
+               entry->ebx = entry->edx = 0;
+-              /*
+-               * Pass down these bits:
+-               *    EAX      0      NNDBP, Processor ignores nested data 
breakpoints
+-               *    EAX      2      LAS, LFENCE always serializing
+-               *    EAX      6      NSCB, Null selector clear base
+-               *
+-               * Other defined bits are for MSRs that KVM does not expose:
+-               *   EAX      3      SPCL, SMM page configuration lock
+-               *   EAX      13     PCMSR, Prefetch control MSR
+-               */
+-              entry->eax &= BIT(0) | BIT(2) | BIT(6);
+-              if (static_cpu_has(X86_FEATURE_LFENCE_RDTSC))
+-                      entry->eax |= BIT(2);
+-              if (!static_cpu_has_bug(X86_BUG_NULL_SEG))
+-                      entry->eax |= BIT(6);
++              cpuid_entry_override(entry, CPUID_8000_0021_EAX);
+               cpuid_entry_override(entry, CPUID_8000_0021_ECX);
+               break;
+       /*Add support for Centaur's CPUID instruction*/
+diff --git a/drivers/dma/dw/rzn1-dmamux.c b/drivers/dma/dw/rzn1-dmamux.c
+index f9912c3dd4d7c0..e7d87d952648bd 100644
+--- a/drivers/dma/dw/rzn1-dmamux.c
++++ b/drivers/dma/dw/rzn1-dmamux.c
+@@ -46,12 +46,16 @@ static void *rzn1_dmamux_route_allocate(struct 
of_phandle_args *dma_spec,
+       u32 mask;
+       int ret;
+ 
+-      if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS)
+-              return ERR_PTR(-EINVAL);
++      if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS) {
++              ret = -EINVAL;
++              goto put_device;
++      }
+ 
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
+-      if (!map)
+-              return ERR_PTR(-ENOMEM);
++      if (!map) {
++              ret = -ENOMEM;
++              goto put_device;
++      }
+ 
+       chan = dma_spec->args[0];
+       map->req_idx = dma_spec->args[4];
+@@ -92,12 +96,15 @@ static void *rzn1_dmamux_route_allocate(struct 
of_phandle_args *dma_spec,
+       if (ret)
+               goto clear_bitmap;
+ 
++      put_device(&pdev->dev);
+       return map;
+ 
+ clear_bitmap:
+       clear_bit(map->req_idx, dmamux->used_chans);
+ free_map:
+       kfree(map);
++put_device:
++      put_device(&pdev->dev);
+ 
+       return ERR_PTR(ret);
+ }
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index ea651d5cf332d9..127a6a302a5bb7 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -175,27 +175,30 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+       idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, 
dev_to_node(dev));
+       if (!idxd->wq_enable_map) {
+               rc = -ENOMEM;
+-              goto err_bitmap;
++              goto err_free_wqs;
+       }
+ 
+       for (i = 0; i < idxd->max_wqs; i++) {
+               wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
+               if (!wq) {
+                       rc = -ENOMEM;
+-                      goto err;
++                      goto err_unwind;
+               }
+ 
+               idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ);
+               conf_dev = wq_confdev(wq);
+               wq->id = i;
+               wq->idxd = idxd;
+-              device_initialize(wq_confdev(wq));
++              device_initialize(conf_dev);
+               conf_dev->parent = idxd_confdev(idxd);
+               conf_dev->bus = &dsa_bus_type;
+               conf_dev->type = &idxd_wq_device_type;
+               rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
+-              if (rc < 0)
+-                      goto err;
++              if (rc < 0) {
++                      put_device(conf_dev);
++                      kfree(wq);
++                      goto err_unwind;
++              }
+ 
+               mutex_init(&wq->wq_lock);
+               init_waitqueue_head(&wq->err_queue);
+@@ -206,15 +209,20 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+               wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
+               wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, 
dev_to_node(dev));
+               if (!wq->wqcfg) {
++                      put_device(conf_dev);
++                      kfree(wq);
+                       rc = -ENOMEM;
+-                      goto err;
++                      goto err_unwind;
+               }
+ 
+               if (idxd->hw.wq_cap.op_config) {
+                       wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, 
GFP_KERNEL);
+                       if (!wq->opcap_bmap) {
++                              kfree(wq->wqcfg);
++                              put_device(conf_dev);
++                              kfree(wq);
+                               rc = -ENOMEM;
+-                              goto err_opcap_bmap;
++                              goto err_unwind;
+                       }
+                       bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, 
IDXD_MAX_OPCAP_BITS);
+               }
+@@ -225,13 +233,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+ 
+       return 0;
+ 
+-err_opcap_bmap:
+-      kfree(wq->wqcfg);
+-
+-err:
+-      put_device(conf_dev);
+-      kfree(wq);
+-
++err_unwind:
+       while (--i >= 0) {
+               wq = idxd->wqs[i];
+               if (idxd->hw.wq_cap.op_config)
+@@ -240,11 +242,10 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
+               conf_dev = wq_confdev(wq);
+               put_device(conf_dev);
+               kfree(wq);
+-
+       }
+       bitmap_free(idxd->wq_enable_map);
+ 
+-err_bitmap:
++err_free_wqs:
+       kfree(idxd->wqs);
+ 
+       return rc;
+diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
+index 2ff787df513e60..8417883b24c041 100644
+--- a/drivers/dma/qcom/bam_dma.c
++++ b/drivers/dma/qcom/bam_dma.c
+@@ -1277,13 +1277,17 @@ static int bam_dma_probe(struct platform_device *pdev)
+       if (bdev->controlled_remotely || bdev->powered_remotely) {
+               ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
+                                          &bdev->num_channels);
+-              if (ret)
++              if (ret) {
+                       dev_err(bdev->dev, "num-channels unspecified in dt\n");
++                      return ret;
++              }
+ 
+               ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees",
+                                          &bdev->num_ees);
+-              if (ret)
++              if (ret) {
+                       dev_err(bdev->dev, "num-ees unspecified in dt\n");
++                      return ret;
++              }
+       }
+ 
+       if (bdev->controlled_remotely || bdev->powered_remotely)
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index 89e06c87a258bc..f24a685da2a22e 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -2072,8 +2072,8 @@ static int edma_setup_from_hw(struct device *dev, struct 
edma_soc_info *pdata,
+        * priority. So Q0 is the highest priority queue and the last queue has
+        * the lowest priority.
+        */
+-      queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
+-                                        GFP_KERNEL);
++      queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1,
++                                        sizeof(*queue_priority_map), 
GFP_KERNEL);
+       if (!queue_priority_map)
+               return -ENOMEM;
+ 
+diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
+index 0234407f36327a..ae890b4ece0988 100644
+--- a/drivers/edac/altera_edac.c
++++ b/drivers/edac/altera_edac.c
+@@ -127,7 +127,6 @@ static ssize_t altr_sdr_mc_err_inject_write(struct file 
*file,
+ 
+       ptemp = dma_alloc_coherent(mci->pdev, 16, &dma_handle, GFP_KERNEL);
+       if (!ptemp) {
+-              dma_free_coherent(mci->pdev, 16, ptemp, dma_handle);
+               edac_printk(KERN_ERR, EDAC_MC,
+                           "Inject: Buffer Allocation error\n");
+               return -ENOMEM;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index cb73d06e1d38d1..e1ba0619878331 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -368,9 +368,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
+       dma_fence_put(ring->vmid_wait);
+       ring->vmid_wait = NULL;
+       ring->me = 0;
+-
+-      if (!ring->is_mes_queue)
+-              ring->adev->rings[ring->idx] = NULL;
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c 
b/drivers/gpu/drm/i915/display/intel_display_power.c
+index 1a63da28f33002..8b8ba7426404a7 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_power.c
++++ b/drivers/gpu/drm/i915/display/intel_display_power.c
+@@ -1122,7 +1122,7 @@ static void icl_mbus_init(struct drm_i915_private 
*dev_priv)
+       if (DISPLAY_VER(dev_priv) == 12)
+               abox_regs |= BIT(0);
+ 
+-      for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
++      for_each_set_bit(i, &abox_regs, BITS_PER_TYPE(abox_regs))
+               intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
+ }
+ 
+@@ -1587,11 +1587,11 @@ static void tgl_bw_buddy_init(struct drm_i915_private 
*dev_priv)
+       if (table[config].page_mask == 0) {
+               drm_dbg(&dev_priv->drm,
+                       "Unknown memory configuration; disabling address buddy 
logic.\n");
+-              for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
++              for_each_set_bit(i, &abox_mask, BITS_PER_TYPE(abox_mask))
+                       intel_de_write(dev_priv, BW_BUDDY_CTL(i),
+                                      BW_BUDDY_DISABLE);
+       } else {
+-              for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
++              for_each_set_bit(i, &abox_mask, BITS_PER_TYPE(abox_mask)) {
+                       intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
+                                      table[config].page_mask);
+ 
+diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
+index f24b174c726671..1a799bffa5e1d8 100644
+--- a/drivers/input/misc/iqs7222.c
++++ b/drivers/input/misc/iqs7222.c
+@@ -2038,6 +2038,9 @@ static int iqs7222_parse_chan(struct iqs7222_private 
*iqs7222,
+               if (error)
+                       return error;
+ 
++              if (!iqs7222->kp_type[chan_index][i])
++                      continue;
++
+               if (!dev_desc->event_offset)
+                       continue;
+ 
+diff --git a/drivers/input/serio/i8042-acpipnpio.h 
b/drivers/input/serio/i8042-acpipnpio.h
+index 8813db7eec3978..630cdd5a132831 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -1155,6 +1155,20 @@ static const struct dmi_system_id 
i8042_dmi_quirk_table[] __initconst = {
+               .driver_data = (void *)(SERIO_QUIRK_NOMUX | 
SERIO_QUIRK_RESET_ALWAYS |
+                                       SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+       },
++      {
++              .matches = {
++                      DMI_MATCH(DMI_BOARD_NAME, "XxHP4NAx"),
++              },
++              .driver_data = (void *)(SERIO_QUIRK_NOMUX | 
SERIO_QUIRK_RESET_ALWAYS |
++                                      SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++      },
++      {
++              .matches = {
++                      DMI_MATCH(DMI_BOARD_NAME, "XxKK4NAx_XxSP4NAx"),
++              },
++              .driver_data = (void *)(SERIO_QUIRK_NOMUX | 
SERIO_QUIRK_RESET_ALWAYS |
++                                      SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++      },
+       /*
+        * A lot of modern Clevo barebones have touchpad and/or keyboard issues
+        * after suspend fixable with the forcenorestore quirk.
+diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
+index 710c9fb515fd09..ac245e384dc874 100644
+--- a/drivers/media/i2c/imx214.c
++++ b/drivers/media/i2c/imx214.c
+@@ -20,7 +20,9 @@
+ #include <media/v4l2-subdev.h>
+ 
+ #define IMX214_DEFAULT_CLK_FREQ       24000000
+-#define IMX214_DEFAULT_LINK_FREQ 480000000
++#define IMX214_DEFAULT_LINK_FREQ      600000000
++/* Keep wrong link frequency for backward compatibility */
++#define IMX214_DEFAULT_LINK_FREQ_LEGACY       480000000
+ #define IMX214_DEFAULT_PIXEL_RATE ((IMX214_DEFAULT_LINK_FREQ * 8LL) / 10)
+ #define IMX214_FPS 30
+ #define IMX214_MBUS_CODE MEDIA_BUS_FMT_SRGGB10_1X10
+@@ -892,17 +894,26 @@ static int imx214_parse_fwnode(struct device *dev)
+               goto done;
+       }
+ 
+-      for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++)
++      if (bus_cfg.nr_of_link_frequencies != 1)
++              dev_warn(dev, "Only one link-frequency supported, please review 
your DT. Continuing anyway\n");
++
++      for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++) {
+               if (bus_cfg.link_frequencies[i] == IMX214_DEFAULT_LINK_FREQ)
+                       break;
+-
+-      if (i == bus_cfg.nr_of_link_frequencies) {
+-              dev_err(dev, "link-frequencies %d not supported, Please review 
your DT\n",
+-                      IMX214_DEFAULT_LINK_FREQ);
+-              ret = -EINVAL;
+-              goto done;
++              if (bus_cfg.link_frequencies[i] ==
++                  IMX214_DEFAULT_LINK_FREQ_LEGACY) {
++                      dev_warn(dev,
++                               "link-frequencies %d not supported, please 
review your DT. Continuing anyway\n",
++                               IMX214_DEFAULT_LINK_FREQ);
++                      break;
++              }
+       }
+ 
++      if (i == bus_cfg.nr_of_link_frequencies)
++              ret = dev_err_probe(dev, -EINVAL,
++                                  "link-frequencies %d not supported, please 
review your DT\n",
++                                  IMX214_DEFAULT_LINK_FREQ);
++
+ done:
+       v4l2_fwnode_endpoint_free(&bus_cfg);
+       fwnode_handle_put(endpoint);
+diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_scp.c 
b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_scp.c
+index 27f08b1d34d199..e13f09d883543e 100644
+--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_scp.c
++++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_scp.c
+@@ -65,8 +65,10 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(struct 
mtk_vcodec_dev *dev)
+       }
+ 
+       fw = devm_kzalloc(&dev->plat_dev->dev, sizeof(*fw), GFP_KERNEL);
+-      if (!fw)
++      if (!fw) {
++              scp_put(scp);
+               return ERR_PTR(-ENOMEM);
++      }
+       fw->type = SCP;
+       fw->ops = &mtk_vcodec_rproc_msg;
+       fw->scp = scp;
+diff --git a/drivers/media/platform/mediatek/vcodec/venc/venc_h264_if.c 
b/drivers/media/platform/mediatek/vcodec/venc/venc_h264_if.c
+index 13c4f860fa69cf..dc35ec7f9c049f 100644
+--- a/drivers/media/platform/mediatek/vcodec/venc/venc_h264_if.c
++++ b/drivers/media/platform/mediatek/vcodec/venc/venc_h264_if.c
+@@ -611,7 +611,11 @@ static int h264_enc_init(struct mtk_vcodec_ctx *ctx)
+ 
+       inst->ctx = ctx;
+       inst->vpu_inst.ctx = ctx;
+-      inst->vpu_inst.id = is_ext ? SCP_IPI_VENC_H264 : IPI_VENC_H264;
++      if (is_ext)
++              inst->vpu_inst.id = SCP_IPI_VENC_H264;
++      else
++              inst->vpu_inst.id = IPI_VENC_H264;
++
+       inst->hw_base = mtk_vcodec_get_reg_addr(inst->ctx, VENC_SYS);
+ 
+       mtk_vcodec_debug_enter(inst);
+diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c 
b/drivers/mtd/nand/raw/atmel/nand-controller.c
+index 710d1d73eb352c..78f317ac04afaa 100644
+--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
++++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
+@@ -1378,13 +1378,23 @@ static int atmel_smc_nand_prepare_smcconf(struct 
atmel_nand *nand,
+               return ret;
+ 
+       /*
+-       * The write cycle timing is directly matching tWC, but is also
++       * Read setup timing depends on the operation done on the NAND:
++       *
++       * NRD_SETUP = max(tAR, tCLR)
++       */
++      timeps = max(conf->timings.sdr.tAR_min, conf->timings.sdr.tCLR_min);
++      ncycles = DIV_ROUND_UP(timeps, mckperiodps);
++      totalcycles += ncycles;
++      ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NRD_SHIFT, 
ncycles);
++      if (ret)
++              return ret;
++
++      /*
++       * The read cycle timing is directly matching tRC, but is also
+        * dependent on the setup and hold timings we calculated earlier,
+        * which gives:
+        *
+-       * NRD_CYCLE = max(tRC, NRD_PULSE + NRD_HOLD)
+-       *
+-       * NRD_SETUP is always 0.
++       * NRD_CYCLE = max(tRC, NRD_SETUP + NRD_PULSE + NRD_HOLD)
+        */
+       ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps);
+       ncycles = max(totalcycles, ncycles);
+diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c 
b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+index 9e74bcd90aaa2e..588c6312b31279 100644
+--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
++++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+@@ -263,6 +263,7 @@ struct stm32_fmc2_nfc {
+       struct sg_table dma_data_sg;
+       struct sg_table dma_ecc_sg;
+       u8 *ecc_buf;
++      dma_addr_t dma_ecc_addr;
+       int dma_ecc_len;
+ 
+       struct completion complete;
+@@ -885,17 +886,10 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, 
const u8 *buf,
+ 
+       if (!write_data && !raw) {
+               /* Configure DMA ECC status */
+-              p = nfc->ecc_buf;
+               for_each_sg(nfc->dma_ecc_sg.sgl, sg, eccsteps, s) {
+-                      sg_set_buf(sg, p, nfc->dma_ecc_len);
+-                      p += nfc->dma_ecc_len;
+-              }
+-
+-              ret = dma_map_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
+-                               eccsteps, dma_data_dir);
+-              if (!ret) {
+-                      ret = -EIO;
+-                      goto err_unmap_data;
++                      sg_dma_address(sg) = nfc->dma_ecc_addr +
++                                           s * nfc->dma_ecc_len;
++                      sg_dma_len(sg) = nfc->dma_ecc_len;
+               }
+ 
+               desc_ecc = dmaengine_prep_slave_sg(nfc->dma_ecc_ch,
+@@ -904,7 +898,7 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, 
const u8 *buf,
+                                                  DMA_PREP_INTERRUPT);
+               if (!desc_ecc) {
+                       ret = -ENOMEM;
+-                      goto err_unmap_ecc;
++                      goto err_unmap_data;
+               }
+ 
+               reinit_completion(&nfc->dma_ecc_complete);
+@@ -912,7 +906,7 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, 
const u8 *buf,
+               desc_ecc->callback_param = &nfc->dma_ecc_complete;
+               ret = dma_submit_error(dmaengine_submit(desc_ecc));
+               if (ret)
+-                      goto err_unmap_ecc;
++                      goto err_unmap_data;
+ 
+               dma_async_issue_pending(nfc->dma_ecc_ch);
+       }
+@@ -932,7 +926,7 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, 
const u8 *buf,
+               if (!write_data && !raw)
+                       dmaengine_terminate_all(nfc->dma_ecc_ch);
+               ret = -ETIMEDOUT;
+-              goto err_unmap_ecc;
++              goto err_unmap_data;
+       }
+ 
+       /* Wait DMA data transfer completion */
+@@ -952,11 +946,6 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, 
const u8 *buf,
+               }
+       }
+ 
+-err_unmap_ecc:
+-      if (!write_data && !raw)
+-              dma_unmap_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
+-                           eccsteps, dma_data_dir);
+-
+ err_unmap_data:
+       dma_unmap_sg(nfc->dev, nfc->dma_data_sg.sgl, eccsteps, dma_data_dir);
+ 
+@@ -979,9 +968,21 @@ static int stm32_fmc2_nfc_seq_write(struct nand_chip 
*chip, const u8 *buf,
+ 
+       /* Write oob */
+       if (oob_required) {
+-              ret = nand_change_write_column_op(chip, mtd->writesize,
+-                                                chip->oob_poi, mtd->oobsize,
+-                                                false);
++              unsigned int offset_in_page = mtd->writesize;
++              const void *buf = chip->oob_poi;
++              unsigned int len = mtd->oobsize;
++
++              if (!raw) {
++                      struct mtd_oob_region oob_free;
++
++                      mtd_ooblayout_free(mtd, 0, &oob_free);
++                      offset_in_page += oob_free.offset;
++                      buf += oob_free.offset;
++                      len = oob_free.length;
++              }
++
++              ret = nand_change_write_column_op(chip, offset_in_page,
++                                                buf, len, false);
+               if (ret)
+                       return ret;
+       }
+@@ -1582,7 +1583,8 @@ static int stm32_fmc2_nfc_dma_setup(struct 
stm32_fmc2_nfc *nfc)
+               return ret;
+ 
+       /* Allocate a buffer to store ECC status registers */
+-      nfc->ecc_buf = devm_kzalloc(nfc->dev, FMC2_MAX_ECC_BUF_LEN, GFP_KERNEL);
++      nfc->ecc_buf = dmam_alloc_coherent(nfc->dev, FMC2_MAX_ECC_BUF_LEN,
++                                         &nfc->dma_ecc_addr, GFP_KERNEL);
+       if (!nfc->ecc_buf)
+               return -ENOMEM;
+ 
+diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
+index 43c812ea1de023..7d8dc36c9bbd86 100644
+--- a/drivers/net/can/xilinx_can.c
++++ b/drivers/net/can/xilinx_can.c
+@@ -622,14 +622,6 @@ static void xcan_write_frame(struct net_device *ndev, 
struct sk_buff *skb,
+               dlc |= XCAN_DLCR_EDL_MASK;
+       }
+ 
+-      if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
+-          (priv->devtype.flags & XCAN_FLAG_TXFEMP))
+-              can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
+-      else
+-              can_put_echo_skb(skb, ndev, 0, 0);
+-
+-      priv->tx_head++;
+-
+       priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
+       /* If the CAN frame is RTR frame this write triggers transmission
+        * (not on CAN FD)
+@@ -662,6 +654,14 @@ static void xcan_write_frame(struct net_device *ndev, 
struct sk_buff *skb,
+                                       data[1]);
+               }
+       }
++
++      if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
++          (priv->devtype.flags & XCAN_FLAG_TXFEMP))
++              can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
++      else
++              can_put_echo_skb(skb, ndev, 0, 0);
++
++      priv->tx_head++;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/freescale/fec_main.c 
b/drivers/net/ethernet/freescale/fec_main.c
+index d10db5d6d226a1..ca271d7a388b4e 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -2137,7 +2137,8 @@ static void fec_enet_phy_reset_after_clk_enable(struct 
net_device *ndev)
+                */
+               phy_dev = of_phy_find_device(fep->phy_node);
+               phy_reset_after_clk_enable(phy_dev);
+-              put_device(&phy_dev->mdio.dev);
++              if (phy_dev)
++                      put_device(&phy_dev->mdio.dev);
+       }
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c 
b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 37d83b4bca7fda..e01eab03971fa0 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -4179,7 +4179,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi 
*vsi, char *basename)
+               irq_num = pf->msix_entries[base + vector].vector;
+               irq_set_affinity_notifier(irq_num, NULL);
+               irq_update_affinity_hint(irq_num, NULL);
+-              free_irq(irq_num, &vsi->q_vectors[vector]);
++              free_irq(irq_num, vsi->q_vectors[vector]);
+       }
+       return err;
+ }
+diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c 
b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+index ceff537d9d22dc..ba067c3860a510 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+@@ -2081,11 +2081,8 @@ static void igb_diag_test(struct net_device *netdev,
+       } else {
+               dev_info(&adapter->pdev->dev, "online testing starting\n");
+ 
+-              /* PHY is powered down when interface is down */
+-              if (if_running && igb_link_test(adapter, &data[TEST_LINK]))
++              if (igb_link_test(adapter, &data[TEST_LINK]))
+                       eth_test->flags |= ETH_TEST_FL_FAILED;
+-              else
+-                      data[TEST_LINK] = 0;
+ 
+               /* Online tests aren't run; pass by default */
+               data[TEST_REG] = 0;
+diff --git a/drivers/phy/tegra/xusb-tegra210.c 
b/drivers/phy/tegra/xusb-tegra210.c
+index eedfc7c2cc0527..80d28aecdce418 100644
+--- a/drivers/phy/tegra/xusb-tegra210.c
++++ b/drivers/phy/tegra/xusb-tegra210.c
+@@ -3165,18 +3165,22 @@ tegra210_xusb_padctl_probe(struct device *dev,
+       }
+ 
+       pdev = of_find_device_by_node(np);
++      of_node_put(np);
+       if (!pdev) {
+               dev_warn(dev, "PMC device is not available\n");
+               goto out;
+       }
+ 
+-      if (!platform_get_drvdata(pdev))
++      if (!platform_get_drvdata(pdev)) {
++              put_device(&pdev->dev);
+               return ERR_PTR(-EPROBE_DEFER);
++      }
+ 
+       padctl->regmap = dev_get_regmap(&pdev->dev, "usb_sleepwalk");
+       if (!padctl->regmap)
+               dev_info(dev, "failed to find PMC regmap\n");
+ 
++      put_device(&pdev->dev);
+ out:
+       return &padctl->base;
+ }
+diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
+index f502c36f3be543..df482485159fcc 100644
+--- a/drivers/phy/ti/phy-ti-pipe3.c
++++ b/drivers/phy/ti/phy-ti-pipe3.c
+@@ -666,12 +666,20 @@ static int ti_pipe3_get_clk(struct ti_pipe3 *phy)
+       return 0;
+ }
+ 
++static void ti_pipe3_put_device(void *_dev)
++{
++      struct device *dev = _dev;
++
++      put_device(dev);
++}
++
+ static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy)
+ {
+       struct device *dev = phy->dev;
+       struct device_node *node = dev->of_node;
+       struct device_node *control_node;
+       struct platform_device *control_pdev;
++      int ret;
+ 
+       phy->phy_power_syscon = syscon_regmap_lookup_by_phandle(node,
+                                                       "syscon-phy-power");
+@@ -703,6 +711,11 @@ static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy)
+               }
+ 
+               phy->control_dev = &control_pdev->dev;
++
++              ret = devm_add_action_or_reset(dev, ti_pipe3_put_device,
++                                             phy->control_dev);
++              if (ret)
++                      return ret;
+       }
+ 
+       if (phy->mode == PIPE3_MODE_PCIE) {
+diff --git a/drivers/regulator/sy7636a-regulator.c 
b/drivers/regulator/sy7636a-regulator.c
+index 29fc27c2cda0bf..dd3b0137d902c4 100644
+--- a/drivers/regulator/sy7636a-regulator.c
++++ b/drivers/regulator/sy7636a-regulator.c
+@@ -83,9 +83,11 @@ static int sy7636a_regulator_probe(struct platform_device 
*pdev)
+       if (!regmap)
+               return -EPROBE_DEFER;
+ 
+-      gdp = devm_gpiod_get(pdev->dev.parent, "epd-pwr-good", GPIOD_IN);
++      device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
++
++      gdp = devm_gpiod_get(&pdev->dev, "epd-pwr-good", GPIOD_IN);
+       if (IS_ERR(gdp)) {
+-              dev_err(pdev->dev.parent, "Power good GPIO fault %ld\n", 
PTR_ERR(gdp));
++              dev_err(&pdev->dev, "Power good GPIO fault %ld\n", 
PTR_ERR(gdp));
+               return PTR_ERR(gdp);
+       }
+ 
+@@ -105,7 +107,6 @@ static int sy7636a_regulator_probe(struct platform_device 
*pdev)
+       }
+ 
+       config.dev = &pdev->dev;
+-      config.dev->of_node = pdev->dev.parent->of_node;
+       config.regmap = regmap;
+ 
+       rdev = devm_regulator_register(&pdev->dev, &desc, &config);
+diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
+index fc6fdcd0a5d4e5..2448133d9987fc 100644
+--- a/drivers/soc/qcom/mdt_loader.c
++++ b/drivers/soc/qcom/mdt_loader.c
+@@ -32,18 +32,20 @@ static bool mdt_header_valid(const struct firmware *fw)
+               return false;
+ 
+       if (ehdr->e_phentsize != sizeof(struct elf32_phdr))
+-              return -EINVAL;
++              return false;
+ 
+       phend = size_add(size_mul(sizeof(struct elf32_phdr), ehdr->e_phnum), 
ehdr->e_phoff);
+       if (phend > fw->size)
+               return false;
+ 
+-      if (ehdr->e_shentsize != sizeof(struct elf32_shdr))
+-              return -EINVAL;
++      if (ehdr->e_shentsize || ehdr->e_shnum) {
++              if (ehdr->e_shentsize != sizeof(struct elf32_shdr))
++                      return false;
+ 
+-      shend = size_add(size_mul(sizeof(struct elf32_shdr), ehdr->e_shnum), 
ehdr->e_shoff);
+-      if (shend > fw->size)
+-              return false;
++              shend = size_add(size_mul(sizeof(struct elf32_shdr), 
ehdr->e_shnum), ehdr->e_shoff);
++              if (shend > fw->size)
++                      return false;
++      }
+ 
+       return true;
+ }
+diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
+index 4802cfaa107fce..3a8aa6490f2fc1 100644
+--- a/drivers/tty/hvc/hvc_console.c
++++ b/drivers/tty/hvc/hvc_console.c
+@@ -543,10 +543,10 @@ static int hvc_write(struct tty_struct *tty, const 
unsigned char *buf, int count
+       }
+ 
+       /*
+-       * Racy, but harmless, kick thread if there is still pending data.
++       * Kick thread to flush if there's still pending data
++       * or to wakeup the write queue.
+        */
+-      if (hp->n_outbuf)
+-              hvc_kick();
++      hvc_kick();
+ 
+       return written;
+ }
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index c07baf5d5a9ce9..63573ef8b91423 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -1166,17 +1166,6 @@ static int sc16is7xx_startup(struct uart_port *port)
+       sc16is7xx_port_write(port, SC16IS7XX_FCR_REG,
+                            SC16IS7XX_FCR_FIFO_BIT);
+ 
+-      /* Enable EFR */
+-      sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
+-                           SC16IS7XX_LCR_CONF_MODE_B);
+-
+-      regcache_cache_bypass(one->regmap, true);
+-
+-      /* Enable write access to enhanced features and internal clock div */
+-      sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
+-                            SC16IS7XX_EFR_ENABLE_BIT,
+-                            SC16IS7XX_EFR_ENABLE_BIT);
+-
+       /* Enable TCR/TLR */
+       sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
+                             SC16IS7XX_MCR_TCRTLR_BIT,
+@@ -1188,7 +1177,8 @@ static int sc16is7xx_startup(struct uart_port *port)
+                            SC16IS7XX_TCR_RX_RESUME(24) |
+                            SC16IS7XX_TCR_RX_HALT(48));
+ 
+-      regcache_cache_bypass(one->regmap, false);
++      /* Disable TCR/TLR access */
++      sc16is7xx_port_update(port, SC16IS7XX_MCR_REG, 
SC16IS7XX_MCR_TCRTLR_BIT, 0);
+ 
+       /* Now, initialize the UART */
+       sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, SC16IS7XX_LCR_WORD_LEN_8);
+diff --git a/drivers/usb/gadget/udc/dummy_hcd.c 
b/drivers/usb/gadget/udc/dummy_hcd.c
+index 6e18e8e76e8b9b..8d1da4616130a1 100644
+--- a/drivers/usb/gadget/udc/dummy_hcd.c
++++ b/drivers/usb/gadget/udc/dummy_hcd.c
+@@ -764,8 +764,7 @@ static int dummy_dequeue(struct usb_ep *_ep, struct 
usb_request *_req)
+       if (!dum->driver)
+               return -ESHUTDOWN;
+ 
+-      local_irq_save(flags);
+-      spin_lock(&dum->lock);
++      spin_lock_irqsave(&dum->lock, flags);
+       list_for_each_entry(iter, &ep->queue, queue) {
+               if (&iter->req != _req)
+                       continue;
+@@ -775,15 +774,16 @@ static int dummy_dequeue(struct usb_ep *_ep, struct 
usb_request *_req)
+               retval = 0;
+               break;
+       }
+-      spin_unlock(&dum->lock);
+ 
+       if (retval == 0) {
+               dev_dbg(udc_dev(dum),
+                               "dequeued req %p from %s, len %d buf %p\n",
+                               req, _ep->name, _req->length, _req->buf);
++              spin_unlock(&dum->lock);
+               usb_gadget_giveback_request(_ep, _req);
++              spin_lock(&dum->lock);
+       }
+-      local_irq_restore(flags);
++      spin_unlock_irqrestore(&dum->lock, flags);
+       return retval;
+ }
+ 
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 2a3bf8718efcad..7e58be8e156676 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1322,7 +1322,18 @@ static const struct usb_device_id option_ids[] = {
+        .driver_info = NCTRL(0) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1033, 0xff),    /* 
Telit LE910C1-EUX (ECM) */
+        .driver_info = NCTRL(0) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1034, 0xff),    /* 
Telit LE910C4-WWX (rmnet) */
++       .driver_info = RSVD(2) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1035, 0xff) }, /* Telit 
LE910C4-WWX (ECM) */
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1036, 0xff) },  /* 
Telit LE910C4-WWX */
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1037, 0xff),    /* 
Telit LE910C4-WWX (rmnet) */
++       .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1038, 0xff),    /* 
Telit LE910C4-WWX (rmnet) */
++       .driver_info = NCTRL(0) | RSVD(3) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x103b, 0xff),    /* 
Telit LE910C4-WWX */
++       .driver_info = NCTRL(0) | NCTRL(1) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x103c, 0xff),    /* 
Telit LE910C4-WWX */
++       .driver_info = NCTRL(0) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
+         .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
+@@ -1369,6 +1380,12 @@ static const struct usb_device_id option_ids[] = {
+         .driver_info = NCTRL(0) | RSVD(1) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff),    /* 
Telit FN990A (PCIe) */
+         .driver_info = RSVD(0) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1077, 0xff),    /* 
Telit FN990A (rmnet + audio) */
++        .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1078, 0xff),    /* 
Telit FN990A (MBIM + audio) */
++        .driver_info = NCTRL(0) | RSVD(1) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1079, 0xff),    /* 
Telit FN990A (RNDIS + audio) */
++        .driver_info = NCTRL(2) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff),    /* 
Telit FE990A (rmnet) */
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff),    /* 
Telit FE990A (MBIM) */
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 723dd9b94e5673..db5ba56a837a82 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -3103,7 +3103,7 @@ static ssize_t __fuse_copy_file_range(struct file 
*file_in, loff_t pos_in,
+               .nodeid_out = ff_out->nodeid,
+               .fh_out = ff_out->fh,
+               .off_out = pos_out,
+-              .len = len,
++              .len = min_t(size_t, len, UINT_MAX & PAGE_MASK),
+               .flags = flags
+       };
+       struct fuse_write_out outarg;
+@@ -3169,6 +3169,9 @@ static ssize_t __fuse_copy_file_range(struct file 
*file_in, loff_t pos_in,
+               fc->no_copy_file_range = 1;
+               err = -EOPNOTSUPP;
+       }
++      if (!err && outarg.size > len)
++              err = -EIO;
++
+       if (err)
+               goto out;
+ 
+diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
+index cf57b7cc3a4309..5087d18cd1f829 100644
+--- a/fs/kernfs/file.c
++++ b/fs/kernfs/file.c
+@@ -70,6 +70,24 @@ static struct kernfs_open_node *of_on(struct 
kernfs_open_file *of)
+                                        !list_empty(&of->list));
+ }
+ 
++/* Get active reference to kernfs node for an open file */
++static struct kernfs_open_file *kernfs_get_active_of(struct kernfs_open_file 
*of)
++{
++      /* Skip if file was already released */
++      if (unlikely(of->released))
++              return NULL;
++
++      if (!kernfs_get_active(of->kn))
++              return NULL;
++
++      return of;
++}
++
++static void kernfs_put_active_of(struct kernfs_open_file *of)
++{
++      return kernfs_put_active(of->kn);
++}
++
+ /**
+  * kernfs_deref_open_node_locked - Get kernfs_open_node corresponding to @kn
+  *
+@@ -139,7 +157,7 @@ static void kernfs_seq_stop_active(struct seq_file *sf, 
void *v)
+ 
+       if (ops->seq_stop)
+               ops->seq_stop(sf, v);
+-      kernfs_put_active(of->kn);
++      kernfs_put_active_of(of);
+ }
+ 
+ static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
+@@ -152,7 +170,7 @@ static void *kernfs_seq_start(struct seq_file *sf, loff_t 
*ppos)
+        * the ops aren't called concurrently for the same open file.
+        */
+       mutex_lock(&of->mutex);
+-      if (!kernfs_get_active(of->kn))
++      if (!kernfs_get_active_of(of))
+               return ERR_PTR(-ENODEV);
+ 
+       ops = kernfs_ops(of->kn);
+@@ -238,7 +256,7 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, 
struct iov_iter *iter)
+        * the ops aren't called concurrently for the same open file.
+        */
+       mutex_lock(&of->mutex);
+-      if (!kernfs_get_active(of->kn)) {
++      if (!kernfs_get_active_of(of)) {
+               len = -ENODEV;
+               mutex_unlock(&of->mutex);
+               goto out_free;
+@@ -252,7 +270,7 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, 
struct iov_iter *iter)
+       else
+               len = -EINVAL;
+ 
+-      kernfs_put_active(of->kn);
++      kernfs_put_active_of(of);
+       mutex_unlock(&of->mutex);
+ 
+       if (len < 0)
+@@ -323,7 +341,7 @@ static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, 
struct iov_iter *iter)
+        * the ops aren't called concurrently for the same open file.
+        */
+       mutex_lock(&of->mutex);
+-      if (!kernfs_get_active(of->kn)) {
++      if (!kernfs_get_active_of(of)) {
+               mutex_unlock(&of->mutex);
+               len = -ENODEV;
+               goto out_free;
+@@ -335,7 +353,7 @@ static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, 
struct iov_iter *iter)
+       else
+               len = -EINVAL;
+ 
+-      kernfs_put_active(of->kn);
++      kernfs_put_active_of(of);
+       mutex_unlock(&of->mutex);
+ 
+       if (len > 0)
+@@ -357,13 +375,13 @@ static void kernfs_vma_open(struct vm_area_struct *vma)
+       if (!of->vm_ops)
+               return;
+ 
+-      if (!kernfs_get_active(of->kn))
++      if (!kernfs_get_active_of(of))
+               return;
+ 
+       if (of->vm_ops->open)
+               of->vm_ops->open(vma);
+ 
+-      kernfs_put_active(of->kn);
++      kernfs_put_active_of(of);
+ }
+ 
+ static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf)
+@@ -375,14 +393,14 @@ static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf)
+       if (!of->vm_ops)
+               return VM_FAULT_SIGBUS;
+ 
+-      if (!kernfs_get_active(of->kn))
++      if (!kernfs_get_active_of(of))
+               return VM_FAULT_SIGBUS;
+ 
+       ret = VM_FAULT_SIGBUS;
+       if (of->vm_ops->fault)
+               ret = of->vm_ops->fault(vmf);
+ 
+-      kernfs_put_active(of->kn);
++      kernfs_put_active_of(of);
+       return ret;
+ }
+ 
+@@ -395,7 +413,7 @@ static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault 
*vmf)
+       if (!of->vm_ops)
+               return VM_FAULT_SIGBUS;
+ 
+-      if (!kernfs_get_active(of->kn))
++      if (!kernfs_get_active_of(of))
+               return VM_FAULT_SIGBUS;
+ 
+       ret = 0;
+@@ -404,7 +422,7 @@ static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault 
*vmf)
+       else
+               file_update_time(file);
+ 
+-      kernfs_put_active(of->kn);
++      kernfs_put_active_of(of);
+       return ret;
+ }
+ 
+@@ -418,14 +436,14 @@ static int kernfs_vma_access(struct vm_area_struct *vma, 
unsigned long addr,
+       if (!of->vm_ops)
+               return -EINVAL;
+ 
+-      if (!kernfs_get_active(of->kn))
++      if (!kernfs_get_active_of(of))
+               return -EINVAL;
+ 
+       ret = -EINVAL;
+       if (of->vm_ops->access)
+               ret = of->vm_ops->access(vma, addr, buf, len, write);
+ 
+-      kernfs_put_active(of->kn);
++      kernfs_put_active_of(of);
+       return ret;
+ }
+ 
+@@ -504,7 +522,7 @@ static int kernfs_fop_mmap(struct file *file, struct 
vm_area_struct *vma)
+       mutex_lock(&of->mutex);
+ 
+       rc = -ENODEV;
+-      if (!kernfs_get_active(of->kn))
++      if (!kernfs_get_active_of(of))
+               goto out_unlock;
+ 
+       ops = kernfs_ops(of->kn);
+@@ -539,7 +557,7 @@ static int kernfs_fop_mmap(struct file *file, struct 
vm_area_struct *vma)
+       }
+       vma->vm_ops = &kernfs_vm_ops;
+ out_put:
+-      kernfs_put_active(of->kn);
++      kernfs_put_active_of(of);
+ out_unlock:
+       mutex_unlock(&of->mutex);
+ 
+@@ -894,7 +912,7 @@ static __poll_t kernfs_fop_poll(struct file *filp, 
poll_table *wait)
+       struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry);
+       __poll_t ret;
+ 
+-      if (!kernfs_get_active(kn))
++      if (!kernfs_get_active_of(of))
+               return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
+ 
+       if (kn->attr.ops->poll)
+@@ -902,7 +920,7 @@ static __poll_t kernfs_fop_poll(struct file *filp, 
poll_table *wait)
+       else
+               ret = kernfs_generic_poll(of, wait);
+ 
+-      kernfs_put_active(kn);
++      kernfs_put_active_of(of);
+       return ret;
+ }
+ 
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index 36025097d21b80..2ca04dcb192aa7 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -859,6 +859,8 @@ static void nfs_server_set_fsinfo(struct nfs_server 
*server,
+ 
+       if (fsinfo->xattr_support)
+               server->caps |= NFS_CAP_XATTR;
++      else
++              server->caps &= ~NFS_CAP_XATTR;
+ #endif
+ }
+ 
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c 
b/fs/nfs/flexfilelayout/flexfilelayout.c
+index 5dd16f4ae74d19..e84ac71bdc18f8 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -276,7 +276,7 @@ ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
+               struct pnfs_layout_segment *l2)
+ {
+       const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
+-      const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1);
++      const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l2);
+       u32 i;
+ 
+       if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
+@@ -756,8 +756,11 @@ ff_layout_choose_ds_for_read(struct pnfs_layout_segment 
*lseg,
+                       continue;
+ 
+               if (check_device &&
+-                  nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
++                  
nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node)) {
++                      // reinitialize the error state in case if this is the 
last iteration
++                      ds = ERR_PTR(-EINVAL);
+                       continue;
++              }
+ 
+               *best_idx = idx;
+               break;
+@@ -787,7 +790,7 @@ ff_layout_choose_best_ds_for_read(struct 
pnfs_layout_segment *lseg,
+       struct nfs4_pnfs_ds *ds;
+ 
+       ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
+-      if (ds)
++      if (!IS_ERR(ds))
+               return ds;
+       return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
+ }
+@@ -801,7 +804,7 @@ ff_layout_get_ds_for_read(struct nfs_pageio_descriptor 
*pgio,
+ 
+       ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
+                                              best_idx);
+-      if (ds || !pgio->pg_mirror_idx)
++      if (!IS_ERR(ds) || !pgio->pg_mirror_idx)
+               return ds;
+       return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
+ }
+@@ -859,7 +862,7 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
+       req->wb_nio = 0;
+ 
+       ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
+-      if (!ds) {
++      if (IS_ERR(ds)) {
+               if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
+                       goto out_mds;
+               pnfs_generic_pg_cleanup(pgio);
+@@ -1063,11 +1066,13 @@ static void ff_layout_resend_pnfs_read(struct 
nfs_pgio_header *hdr)
+ {
+       u32 idx = hdr->pgio_mirror_idx + 1;
+       u32 new_idx = 0;
++      struct nfs4_pnfs_ds *ds;
+ 
+-      if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx))
+-              ff_layout_send_layouterror(hdr->lseg);
+-      else
++      ds = ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx);
++      if (IS_ERR(ds))
+               pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
++      else
++              ff_layout_send_layouterror(hdr->lseg);
+       pnfs_read_resend_pnfs(hdr, new_idx);
+ }
+ 
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 71e96fddc6cb5c..f8a91d15982dcb 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3888,8 +3888,9 @@ static int _nfs4_server_capabilities(struct nfs_server 
*server, struct nfs_fh *f
+                       res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
+               }
+               memcpy(server->attr_bitmask, res.attr_bitmask, 
sizeof(server->attr_bitmask));
+-              server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS |
+-                                NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL);
++              server->caps &=
++                      ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS |
++                        NFS_CAP_SECURITY_LABEL | NFS_CAP_FS_LOCATIONS);
+               server->fattr_valid = NFS_ATTR_FATTR_V4;
+               if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
+                               res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
+@@ -3957,7 +3958,6 @@ int nfs4_server_capabilities(struct nfs_server *server, 
struct nfs_fh *fhandle)
+       };
+       int err;
+ 
+-      nfs_server_set_init_caps(server);
+       do {
+               err = nfs4_handle_exception(server,
+                               _nfs4_server_capabilities(server, fhandle),
+diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
+index 70a768b623cf40..bb3a56b7f9a7c8 100644
+--- a/fs/ocfs2/extent_map.c
++++ b/fs/ocfs2/extent_map.c
+@@ -696,6 +696,8 @@ int ocfs2_extent_map_get_blocks(struct inode *inode, u64 
v_blkno, u64 *p_blkno,
+  * it not only handles the fiemap for inlined files, but also deals
+  * with the fast symlink, cause they have no difference for extent
+  * mapping per se.
++ *
++ * Must be called with ip_alloc_sem semaphore held.
+  */
+ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
+                              struct fiemap_extent_info *fieinfo,
+@@ -707,6 +709,7 @@ static int ocfs2_fiemap_inline(struct inode *inode, struct 
buffer_head *di_bh,
+       u64 phys;
+       u32 flags = FIEMAP_EXTENT_DATA_INLINE|FIEMAP_EXTENT_LAST;
+       struct ocfs2_inode_info *oi = OCFS2_I(inode);
++      lockdep_assert_held_read(&oi->ip_alloc_sem);
+ 
+       di = (struct ocfs2_dinode *)di_bh->b_data;
+       if (ocfs2_inode_is_fast_symlink(inode))
+@@ -722,8 +725,11 @@ static int ocfs2_fiemap_inline(struct inode *inode, 
struct buffer_head *di_bh,
+                       phys += offsetof(struct ocfs2_dinode,
+                                        id2.i_data.id_data);
+ 
++              /* Release the ip_alloc_sem to prevent deadlock on page fault */
++              up_read(&OCFS2_I(inode)->ip_alloc_sem);
+               ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count,
+                                             flags);
++              down_read(&OCFS2_I(inode)->ip_alloc_sem);
+               if (ret < 0)
+                       return ret;
+       }
+@@ -792,9 +798,11 @@ int ocfs2_fiemap(struct inode *inode, struct 
fiemap_extent_info *fieinfo,
+               len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << 
osb->s_clustersize_bits;
+               phys_bytes = le64_to_cpu(rec.e_blkno) << 
osb->sb->s_blocksize_bits;
+               virt_bytes = (u64)le32_to_cpu(rec.e_cpos) << 
osb->s_clustersize_bits;
+-
++              /* Release the ip_alloc_sem to prevent deadlock on page fault */
++              up_read(&OCFS2_I(inode)->ip_alloc_sem);
+               ret = fiemap_fill_next_extent(fieinfo, virt_bytes, phys_bytes,
+                                             len_bytes, fe_flags);
++              down_read(&OCFS2_I(inode)->ip_alloc_sem);
+               if (ret)
+                       break;
+ 
+diff --git a/fs/proc/generic.c b/fs/proc/generic.c
+index c96c884208a983..21820c729b4bd0 100644
+--- a/fs/proc/generic.c
++++ b/fs/proc/generic.c
+@@ -389,7 +389,8 @@ struct proc_dir_entry *proc_register(struct proc_dir_entry 
*dir,
+       if (proc_alloc_inum(&dp->low_ino))
+               goto out_free_entry;
+ 
+-      pde_set_flags(dp);
++      if (!S_ISDIR(dp->mode))
++              pde_set_flags(dp);
+ 
+       write_lock(&proc_subdir_lock);
+       dp->parent = dir;
+diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
+index 9b673fefcef8a6..f9de53fff3acc4 100644
+--- a/include/linux/compiler-clang.h
++++ b/include/linux/compiler-clang.h
+@@ -23,23 +23,42 @@
+ #define KASAN_ABI_VERSION 5
+ 
+ /*
++ * Clang 22 added preprocessor macros to match GCC, in hopes of eventually
++ * dropping __has_feature support for sanitizers:
++ * 
https://github.com/llvm/llvm-project/commit/568c23bbd3303518c5056d7f03444dae4fdc8a9c
++ * Create these macros for older versions of clang so that it is easy to clean
++ * up once the minimum supported version of LLVM for building the kernel 
always
++ * creates these macros.
++ *
+  * Note: Checking __has_feature(*_sanitizer) is only true if the feature is
+  * enabled. Therefore it is not required to additionally check 
defined(CONFIG_*)
+  * to avoid adding redundant attributes in other configurations.
+  */
++#if __has_feature(address_sanitizer) && !defined(__SANITIZE_ADDRESS__)
++#define __SANITIZE_ADDRESS__
++#endif
++#if __has_feature(hwaddress_sanitizer) && !defined(__SANITIZE_HWADDRESS__)
++#define __SANITIZE_HWADDRESS__
++#endif
++#if __has_feature(thread_sanitizer) && !defined(__SANITIZE_THREAD__)
++#define __SANITIZE_THREAD__
++#endif
+ 
+-#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
+-/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
++/*
++ * Treat __SANITIZE_HWADDRESS__ the same as __SANITIZE_ADDRESS__ in the 
kernel.
++ */
++#ifdef __SANITIZE_HWADDRESS__
+ #define __SANITIZE_ADDRESS__
++#endif
++
++#ifdef __SANITIZE_ADDRESS__
+ #define __no_sanitize_address \
+               __attribute__((no_sanitize("address", "hwaddress")))
+ #else
+ #define __no_sanitize_address
+ #endif
+ 
+-#if __has_feature(thread_sanitizer)
+-/* emulate gcc's __SANITIZE_THREAD__ flag */
+-#define __SANITIZE_THREAD__
++#ifdef __SANITIZE_THREAD__
+ #define __no_sanitize_thread \
+               __attribute__((no_sanitize("thread")))
+ #else
+diff --git a/include/linux/pgalloc.h b/include/linux/pgalloc.h
+new file mode 100644
+index 00000000000000..9174fa59bbc54d
+--- /dev/null
++++ b/include/linux/pgalloc.h
+@@ -0,0 +1,29 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _LINUX_PGALLOC_H
++#define _LINUX_PGALLOC_H
++
++#include <linux/pgtable.h>
++#include <asm/pgalloc.h>
++
++/*
++ * {pgd,p4d}_populate_kernel() are defined as macros to allow
++ * compile-time optimization based on the configured page table levels.
++ * Without this, linking may fail because callers (e.g., KASAN) may rely
++ * on calls to these functions being optimized away when passing symbols
++ * that exist only for certain page table levels.
++ */
++#define pgd_populate_kernel(addr, pgd, p4d)                           \
++      do {                                                            \
++              pgd_populate(&init_mm, pgd, p4d);                       \
++              if (ARCH_PAGE_TABLE_SYNC_MASK & PGTBL_PGD_MODIFIED)     \
++                      arch_sync_kernel_mappings(addr, addr);          \
++      } while (0)
++
++#define p4d_populate_kernel(addr, p4d, pud)                           \
++      do {                                                            \
++              p4d_populate(&init_mm, p4d, pud);                       \
++              if (ARCH_PAGE_TABLE_SYNC_MASK & PGTBL_P4D_MODIFIED)     \
++                      arch_sync_kernel_mappings(addr, addr);          \
++      } while (0)
++
++#endif /* _LINUX_PGALLOC_H */
+diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
+index b8dd98edca9996..82d78cba79d69b 100644
+--- a/include/linux/pgtable.h
++++ b/include/linux/pgtable.h
+@@ -1474,8 +1474,8 @@ static inline int pmd_protnone(pmd_t pmd)
+ 
+ /*
+  * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED 
values
+- * and let generic vmalloc and ioremap code know when 
arch_sync_kernel_mappings()
+- * needs to be called.
++ * and let generic vmalloc, ioremap and page table update code know when
++ * arch_sync_kernel_mappings() needs to be called.
+  */
+ #ifndef ARCH_PAGE_TABLE_SYNC_MASK
+ #define ARCH_PAGE_TABLE_SYNC_MASK 0
+@@ -1608,10 +1608,11 @@ static inline bool arch_has_pfn_modify_check(void)
+ /*
+  * Page Table Modification bits for pgtbl_mod_mask.
+  *
+- * These are used by the p?d_alloc_track*() set of functions an in the generic
+- * vmalloc/ioremap code to track at which page-table levels entries have been
+- * modified. Based on that the code can better decide when vmalloc and ioremap
+- * mapping changes need to be synchronized to other page-tables in the system.
++ * These are used by the p?d_alloc_track*() and p*d_populate_kernel()
++ * functions in the generic vmalloc, ioremap and page table update code
++ * to track at which page-table levels entries have been modified.
++ * Based on that the code can better decide when page table changes need
++ * to be synchronized to other page-tables in the system.
+  */
+ #define               __PGTBL_PGD_MODIFIED    0
+ #define               __PGTBL_P4D_MODIFIED    1
+diff --git a/include/net/sock.h b/include/net/sock.h
+index e00cd9d0bec8e5..2da169ea7cc186 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -350,6 +350,8 @@ struct sk_filter;
+   *   @sk_txtime_unused: unused txtime flags
+   *   @ns_tracker: tracker for netns reference
+   *   @sk_bind2_node: bind node in the bhash2 table
++  *   @sk_owner: reference to the real owner of the socket that calls
++  *              sock_lock_init_class_and_name().
+   */
+ struct sock {
+       /*
+@@ -541,6 +543,10 @@ struct sock {
+       struct rcu_head         sk_rcu;
+       netns_tracker           ns_tracker;
+       struct hlist_node       sk_bind2_node;
++
++#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
++      struct module           *sk_owner;
++#endif
+ };
+ 
+ enum sk_pacing {
+@@ -1724,6 +1730,35 @@ static inline void sk_mem_uncharge(struct sock *sk, int 
size)
+       sk_mem_reclaim(sk);
+ }
+ 
++#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
++static inline void sk_owner_set(struct sock *sk, struct module *owner)
++{
++      __module_get(owner);
++      sk->sk_owner = owner;
++}
++
++static inline void sk_owner_clear(struct sock *sk)
++{
++      sk->sk_owner = NULL;
++}
++
++static inline void sk_owner_put(struct sock *sk)
++{
++      module_put(sk->sk_owner);
++}
++#else
++static inline void sk_owner_set(struct sock *sk, struct module *owner)
++{
++}
++
++static inline void sk_owner_clear(struct sock *sk)
++{
++}
++
++static inline void sk_owner_put(struct sock *sk)
++{
++}
++#endif
+ /*
+  * Macro so as to not evaluate some arguments when
+  * lockdep is not enabled.
+@@ -1733,13 +1768,14 @@ static inline void sk_mem_uncharge(struct sock *sk, 
int size)
+  */
+ #define sock_lock_init_class_and_name(sk, sname, skey, name, key)     \
+ do {                                                                  \
++      sk_owner_set(sk, THIS_MODULE);                                  \
+       sk->sk_lock.owned = 0;                                          \
+       init_waitqueue_head(&sk->sk_lock.wq);                           \
+       spin_lock_init(&(sk)->sk_lock.slock);                           \
+       debug_check_no_locks_freed((void *)&(sk)->sk_lock,              \
+-                      sizeof((sk)->sk_lock));                         \
++                                 sizeof((sk)->sk_lock));              \
+       lockdep_set_class_and_name(&(sk)->sk_lock.slock,                \
+-                              (skey), (sname));                               
\
++                                 (skey), (sname));                    \
+       lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);     \
+ } while (0)
+ 
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index b3860ec12450e6..8aa7ede57e7182 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -669,17 +669,12 @@ static inline ktime_t hrtimer_update_base(struct 
hrtimer_cpu_base *base)
+ /*
+  * Is the high resolution mode active ?
+  */
+-static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
++static inline int hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
+ {
+       return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
+               cpu_base->hres_active : 0;
+ }
+ 
+-static inline int hrtimer_hres_active(void)
+-{
+-      return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
+-}
+-
+ static void __hrtimer_reprogram(struct hrtimer_cpu_base *cpu_base,
+                               struct hrtimer *next_timer,
+                               ktime_t expires_next)
+@@ -703,7 +698,7 @@ static void __hrtimer_reprogram(struct hrtimer_cpu_base 
*cpu_base,
+        * set. So we'd effectively block all timers until the T2 event
+        * fires.
+        */
+-      if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
++      if (!hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
+               return;
+ 
+       tick_program_event(expires_next, 1);
+@@ -811,13 +806,13 @@ static void retrigger_next_event(void *arg)
+        * of the next expiring timer is enough. The return from the SMP
+        * function call will take care of the reprogramming in case the
+        * CPU was in a NOHZ idle sleep.
++       *
++       * In periodic low resolution mode, the next softirq expiration
++       * must also be updated.
+        */
+-      if (!__hrtimer_hres_active(base) && !tick_nohz_active)
+-              return;
+-
+       raw_spin_lock(&base->lock);
+       hrtimer_update_base(base);
+-      if (__hrtimer_hres_active(base))
++      if (hrtimer_hres_active(base))
+               hrtimer_force_reprogram(base, 0);
+       else
+               hrtimer_update_next_event(base);
+@@ -974,7 +969,7 @@ void clock_was_set(unsigned int bases)
+       cpumask_var_t mask;
+       int cpu;
+ 
+-      if (!__hrtimer_hres_active(cpu_base) && !tick_nohz_active)
++      if (!hrtimer_hres_active(cpu_base) && !tick_nohz_active)
+               goto out_timerfd;
+ 
+       if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
+@@ -1551,7 +1546,7 @@ u64 hrtimer_get_next_event(void)
+ 
+       raw_spin_lock_irqsave(&cpu_base->lock, flags);
+ 
+-      if (!__hrtimer_hres_active(cpu_base))
++      if (!hrtimer_hres_active(cpu_base))
+               expires = __hrtimer_get_next_event(cpu_base, 
HRTIMER_ACTIVE_ALL);
+ 
+       raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+@@ -1574,7 +1569,7 @@ u64 hrtimer_next_event_without(const struct hrtimer 
*exclude)
+ 
+       raw_spin_lock_irqsave(&cpu_base->lock, flags);
+ 
+-      if (__hrtimer_hres_active(cpu_base)) {
++      if (hrtimer_hres_active(cpu_base)) {
+               unsigned int active;
+ 
+               if (!cpu_base->softirq_activated) {
+@@ -1935,25 +1930,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
+       tick_program_event(expires_next, 1);
+       pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta));
+ }
+-
+-/* called with interrupts disabled */
+-static inline void __hrtimer_peek_ahead_timers(void)
+-{
+-      struct tick_device *td;
+-
+-      if (!hrtimer_hres_active())
+-              return;
+-
+-      td = this_cpu_ptr(&tick_cpu_device);
+-      if (td && td->evtdev)
+-              hrtimer_interrupt(td->evtdev);
+-}
+-
+-#else /* CONFIG_HIGH_RES_TIMERS */
+-
+-static inline void __hrtimer_peek_ahead_timers(void) { }
+-
+-#endif        /* !CONFIG_HIGH_RES_TIMERS */
++#endif /* !CONFIG_HIGH_RES_TIMERS */
+ 
+ /*
+  * Called from run_local_timers in hardirq context every jiffy
+@@ -1964,7 +1941,7 @@ void hrtimer_run_queues(void)
+       unsigned long flags;
+       ktime_t now;
+ 
+-      if (__hrtimer_hres_active(cpu_base))
++      if (hrtimer_hres_active(cpu_base))
+               return;
+ 
+       /*
+@@ -2309,11 +2286,6 @@ int hrtimers_cpu_dying(unsigned int dying_cpu)
+                                    &new_base->clock_base[i]);
+       }
+ 
+-      /*
+-       * The migration might have changed the first expiring softirq
+-       * timer on this CPU. Update it.
+-       */
+-      __hrtimer_get_next_event(new_base, HRTIMER_ACTIVE_SOFT);
+       /* Tell the other CPU to retrigger the next event */
+       smp_call_function_single(ncpu, retrigger_next_event, NULL, 0);
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 7e8ab09d98cc73..9795fc7daeb6bb 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -710,7 +710,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
+               /* copy the current bits to the new max */
+               ret = trace_pid_list_first(filtered_pids, &pid);
+               while (!ret) {
+-                      trace_pid_list_set(pid_list, pid);
++                      ret = trace_pid_list_set(pid_list, pid);
++                      if (ret < 0)
++                              goto out;
++
+                       ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
+                       nr_pids++;
+               }
+@@ -747,6 +750,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
+               trace_parser_clear(&parser);
+               ret = 0;
+       }
++ out:
+       trace_parser_put(&parser);
+ 
+       if (ret < 0) {
+@@ -7253,7 +7257,7 @@ tracing_mark_write(struct file *filp, const char __user 
*ubuf,
+       entry = ring_buffer_event_data(event);
+       entry->ip = _THIS_IP_;
+ 
+-      len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
++      len = copy_from_user_nofault(&entry->buf, ubuf, cnt);
+       if (len) {
+               memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
+               cnt = FAULTED_SIZE;
+@@ -7328,7 +7332,7 @@ tracing_mark_raw_write(struct file *filp, const char 
__user *ubuf,
+ 
+       entry = ring_buffer_event_data(event);
+ 
+-      len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
++      len = copy_from_user_nofault(&entry->id, ubuf, cnt);
+       if (len) {
+               entry->id = -1;
+               memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
+diff --git a/kernel/trace/trace_events_synth.c 
b/kernel/trace/trace_events_synth.c
+index 385e9fbbfbe7c7..c2817d0c05b695 100644
+--- a/kernel/trace/trace_events_synth.c
++++ b/kernel/trace/trace_events_synth.c
+@@ -383,13 +383,11 @@ static enum print_line_t print_synth_event(struct 
trace_iterator *iter,
+                               str_field = (char *)entry + data_offset;
+ 
+                               trace_seq_printf(s, print_fmt, 
se->fields[i]->name,
+-                                               STR_VAR_LEN_MAX,
+                                                str_field,
+                                                i == se->n_fields - 1 ? "" : " 
");
+                               n_u64++;
+                       } else {
+                               trace_seq_printf(s, print_fmt, 
se->fields[i]->name,
+-                                               STR_VAR_LEN_MAX,
+                                                (char *)&entry->fields[n_u64],
+                                                i == se->n_fields - 1 ? "" : " 
");
+                               n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
+index 98a678129b0671..61311800abc984 100644
+--- a/mm/damon/lru_sort.c
++++ b/mm/damon/lru_sort.c
+@@ -203,6 +203,9 @@ static int damon_lru_sort_apply_parameters(void)
+       unsigned int hot_thres, cold_thres;
+       int err = 0;
+ 
++      if (!damon_lru_sort_mon_attrs.sample_interval)
++              return -EINVAL;
++
+       err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs);
+       if (err)
+               return err;
+diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c
+index cc337e94acfda8..7952a0b7f409d2 100644
+--- a/mm/damon/reclaim.c
++++ b/mm/damon/reclaim.c
+@@ -157,6 +157,9 @@ static int damon_reclaim_apply_parameters(void)
+       struct damos *scheme, *old_scheme;
+       int err = 0;
+ 
++      if (!damon_reclaim_mon_attrs.aggr_interval)
++              return -EINVAL;
++
+       err = damon_set_attrs(ctx, &damon_reclaim_mon_attrs);
+       if (err)
+               return err;
+diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
+index 9ea21b6d266beb..18f459a3c9fff2 100644
+--- a/mm/damon/sysfs.c
++++ b/mm/damon/sysfs.c
+@@ -2093,14 +2093,18 @@ static ssize_t state_show(struct kobject *kobj, struct 
kobj_attribute *attr,
+ {
+       struct damon_sysfs_kdamond *kdamond = container_of(kobj,
+                       struct damon_sysfs_kdamond, kobj);
+-      struct damon_ctx *ctx = kdamond->damon_ctx;
+-      bool running;
++      struct damon_ctx *ctx;
++      bool running = false;
+ 
+-      if (!ctx)
+-              running = false;
+-      else
++      if (!mutex_trylock(&damon_sysfs_lock))
++              return -EBUSY;
++
++      ctx = kdamond->damon_ctx;
++      if (ctx)
+               running = damon_sysfs_ctx_running(ctx);
+ 
++      mutex_unlock(&damon_sysfs_lock);
++
+       return sysfs_emit(buf, "%s\n", running ?
+                       damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
+                       damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);
+diff --git a/mm/kasan/init.c b/mm/kasan/init.c
+index cc64ed6858c662..2c17bc77382fb3 100644
+--- a/mm/kasan/init.c
++++ b/mm/kasan/init.c
+@@ -13,9 +13,9 @@
+ #include <linux/mm.h>
+ #include <linux/pfn.h>
+ #include <linux/slab.h>
++#include <linux/pgalloc.h>
+ 
+ #include <asm/page.h>
+-#include <asm/pgalloc.h>
+ 
+ #include "kasan.h"
+ 
+@@ -188,7 +188,7 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned 
long addr,
+                       pud_t *pud;
+                       pmd_t *pmd;
+ 
+-                      p4d_populate(&init_mm, p4d,
++                      p4d_populate_kernel(addr, p4d,
+                                       lm_alias(kasan_early_shadow_pud));
+                       pud = pud_offset(p4d, addr);
+                       pud_populate(&init_mm, pud,
+@@ -207,7 +207,7 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned 
long addr,
+                               if (!p)
+                                       return -ENOMEM;
+                       } else {
+-                              p4d_populate(&init_mm, p4d,
++                              p4d_populate_kernel(addr, p4d,
+                                       early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+                       }
+               }
+@@ -247,10 +247,10 @@ int __ref kasan_populate_early_shadow(const void 
*shadow_start,
+                        * puds,pmds, so pgd_populate(), pud_populate()
+                        * is noops.
+                        */
+-                      pgd_populate(&init_mm, pgd,
++                      pgd_populate_kernel(addr, pgd,
+                                       lm_alias(kasan_early_shadow_p4d));
+                       p4d = p4d_offset(pgd, addr);
+-                      p4d_populate(&init_mm, p4d,
++                      p4d_populate_kernel(addr, p4d,
+                                       lm_alias(kasan_early_shadow_pud));
+                       pud = pud_offset(p4d, addr);
+                       pud_populate(&init_mm, pud,
+@@ -269,7 +269,7 @@ int __ref kasan_populate_early_shadow(const void 
*shadow_start,
+                               if (!p)
+                                       return -ENOMEM;
+                       } else {
+-                              pgd_populate(&init_mm, pgd,
++                              pgd_populate_kernel(addr, pgd,
+                                       early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+                       }
+               }
+diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
+index df9658299a08a0..4bb5159b4a41e9 100644
+--- a/mm/kasan/kasan_test.c
++++ b/mm/kasan/kasan_test.c
+@@ -993,6 +993,7 @@ static void kasan_strings(struct kunit *test)
+ 
+       ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
++      OPTIMIZER_HIDE_VAR(ptr);
+ 
+       kfree(ptr);
+ 
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index eb46acfd3d2057..ded9a00b20b580 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -1140,6 +1140,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
+       int result = SCAN_FAIL, referenced = 0;
+       int none_or_zero = 0, shared = 0;
+       struct page *page = NULL;
++      struct folio *folio = NULL;
+       unsigned long _address;
+       spinlock_t *ptl;
+       int node = NUMA_NO_NODE, unmapped = 0;
+@@ -1221,29 +1222,28 @@ static int hpage_collapse_scan_pmd(struct mm_struct 
*mm,
+                       }
+               }
+ 
+-              page = compound_head(page);
+-
++              folio = page_folio(page);
+               /*
+                * Record which node the original page is from and save this
+                * information to cc->node_load[].
+                * Khugepaged will allocate hugepage from the node has the max
+                * hit record.
+                */
+-              node = page_to_nid(page);
++              node = folio_nid(folio);
+               if (hpage_collapse_scan_abort(node, cc)) {
+                       result = SCAN_SCAN_ABORT;
+                       goto out_unmap;
+               }
+               cc->node_load[node]++;
+-              if (!PageLRU(page)) {
++              if (!folio_test_lru(folio)) {
+                       result = SCAN_PAGE_LRU;
+                       goto out_unmap;
+               }
+-              if (PageLocked(page)) {
++              if (folio_test_locked(folio)) {
+                       result = SCAN_PAGE_LOCK;
+                       goto out_unmap;
+               }
+-              if (!PageAnon(page)) {
++              if (!folio_test_anon(folio)) {
+                       result = SCAN_PAGE_ANON;
+                       goto out_unmap;
+               }
+@@ -1265,7 +1265,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
+                * has excessive GUP pins (i.e. 512).  Anyway the same check
+                * will be done again later the risk seems low.
+                */
+-              if (!is_refcount_suitable(page)) {
++              if (!is_refcount_suitable(&folio->page)) {
+                       result = SCAN_PAGE_COUNT;
+                       goto out_unmap;
+               }
+@@ -1275,9 +1275,9 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
+                * enough young pte to justify collapsing the page
+                */
+               if (cc->is_khugepaged &&
+-                  (pte_young(pteval) || page_is_young(page) ||
+-                   PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
+-                                                                   address)))
++                  (pte_young(pteval) || folio_test_young(folio) ||
++                   folio_test_referenced(folio) ||
++                   mmu_notifier_test_young(vma->vm_mm, _address)))
+                       referenced++;
+       }
+       if (!writable) {
+@@ -1298,7 +1298,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
+               *mmap_locked = false;
+       }
+ out:
+-      trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
++      trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
+                                    none_or_zero, result, unmapped);
+       return result;
+ }
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 0f706ee04bafdf..482c2b6039f0df 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -2346,10 +2346,9 @@ int unpoison_memory(unsigned long pfn)
+       static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
+                                       DEFAULT_RATELIMIT_BURST);
+ 
+-      if (!pfn_valid(pfn))
+-              return -ENXIO;
+-
+-      p = pfn_to_page(pfn);
++      p = pfn_to_online_page(pfn);
++      if (!p)
++              return -EIO;
+       page = compound_head(p);
+ 
+       mutex_lock(&mf_mutex);
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 27697b2429c2e1..39e645dfd46c6c 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -3172,7 +3172,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, 
size_t dyn_size,
+ #endif /* BUILD_EMBED_FIRST_CHUNK */
+ 
+ #ifdef BUILD_PAGE_FIRST_CHUNK
+-#include <asm/pgalloc.h>
++#include <linux/pgalloc.h>
+ 
+ #ifndef P4D_TABLE_SIZE
+ #define P4D_TABLE_SIZE PAGE_SIZE
+@@ -3202,7 +3202,7 @@ void __init __weak pcpu_populate_pte(unsigned long addr)
+               new = memblock_alloc(P4D_TABLE_SIZE, P4D_TABLE_SIZE);
+               if (!new)
+                       goto err_alloc;
+-              pgd_populate(&init_mm, pgd, new);
++              pgd_populate_kernel(addr, pgd, new);
+       }
+ 
+       p4d = p4d_offset(pgd, addr);
+@@ -3212,7 +3212,7 @@ void __init __weak pcpu_populate_pte(unsigned long addr)
+               new = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
+               if (!new)
+                       goto err_alloc;
+-              p4d_populate(&init_mm, p4d, new);
++              p4d_populate_kernel(addr, p4d, new);
+       }
+ 
+       pud = pud_offset(p4d, addr);
+diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
+index 46ae542118c09a..f89dbaa05eef3e 100644
+--- a/mm/sparse-vmemmap.c
++++ b/mm/sparse-vmemmap.c
+@@ -27,9 +27,9 @@
+ #include <linux/spinlock.h>
+ #include <linux/vmalloc.h>
+ #include <linux/sched.h>
++#include <linux/pgalloc.h>
+ 
+ #include <asm/dma.h>
+-#include <asm/pgalloc.h>
+ 
+ /*
+  * Allocate a block of memory to be used to back the virtual memory map
+@@ -215,7 +215,7 @@ p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, 
unsigned long addr, int node)
+               void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
+               if (!p)
+                       return NULL;
+-              p4d_populate(&init_mm, p4d, p);
++              p4d_populate_kernel(addr, p4d, p);
+       }
+       return p4d;
+ }
+@@ -227,7 +227,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, 
int node)
+               void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
+               if (!p)
+                       return NULL;
+-              pgd_populate(&init_mm, pgd, p);
++              pgd_populate_kernel(addr, pgd, p);
+       }
+       return pgd;
+ }
+diff --git a/net/can/j1939/bus.c b/net/can/j1939/bus.c
+index 48668790160211..e0b966c2517cf1 100644
+--- a/net/can/j1939/bus.c
++++ b/net/can/j1939/bus.c
+@@ -290,8 +290,11 @@ int j1939_local_ecu_get(struct j1939_priv *priv, name_t 
name, u8 sa)
+       if (!ecu)
+               ecu = j1939_ecu_create_locked(priv, name);
+       err = PTR_ERR_OR_ZERO(ecu);
+-      if (err)
++      if (err) {
++              if (j1939_address_is_unicast(sa))
++                      priv->ents[sa].nusers--;
+               goto done;
++      }
+ 
+       ecu->nusers++;
+       /* TODO: do we care if ecu->addr != sa? */
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index 0a4267a24263ba..502975fd5f97ee 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -520,6 +520,9 @@ static int j1939_sk_bind(struct socket *sock, struct 
sockaddr *uaddr, int len)
+       ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa);
+       if (ret) {
+               j1939_netdev_stop(priv);
++              jsk->priv = NULL;
++              synchronize_rcu();
++              j1939_priv_put(priv);
+               goto out_release_sock;
+       }
+ 
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index b9b64a2427caf2..db2794d50bdb90 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -1453,7 +1453,7 @@ static void con_fault_finish(struct ceph_connection *con)
+        * in case we faulted due to authentication, invalidate our
+        * current tickets so that we can get new ones.
+        */
+-      if (con->v1.auth_retry) {
++      if (!ceph_msgr2(from_msgr(con->msgr)) && con->v1.auth_retry) {
+               dout("auth_retry %d, invalidating\n", con->v1.auth_retry);
+               if (con->ops->invalidate_authorizer)
+                       con->ops->invalidate_authorizer(con);
+@@ -1643,9 +1643,10 @@ static void clear_standby(struct ceph_connection *con)
+ {
+       /* come back from STANDBY? */
+       if (con->state == CEPH_CON_S_STANDBY) {
+-              dout("clear_standby %p and ++connect_seq\n", con);
++              dout("clear_standby %p\n", con);
+               con->state = CEPH_CON_S_PREOPEN;
+-              con->v1.connect_seq++;
++              if (!ceph_msgr2(from_msgr(con->msgr)))
++                      con->v1.connect_seq++;
+               WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_WRITE_PENDING));
+               WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_KEEPALIVE_PENDING));
+       }
+diff --git a/net/core/sock.c b/net/core/sock.c
+index d8c0650322ea65..d4de3c01bdb679 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1985,6 +1985,8 @@ int sock_getsockopt(struct socket *sock, int level, int 
optname,
+  */
+ static inline void sock_lock_init(struct sock *sk)
+ {
++      sk_owner_clear(sk);
++
+       if (sk->sk_kern_sock)
+               sock_lock_init_class_and_name(
+                       sk,
+@@ -2080,6 +2082,9 @@ static void sk_prot_free(struct proto *prot, struct sock 
*sk)
+       cgroup_sk_free(&sk->sk_cgrp_data);
+       mem_cgroup_sk_free(sk);
+       security_sk_free(sk);
++
++      sk_owner_put(sk);
++
+       if (slab != NULL)
+               kmem_cache_free(slab, sk);
+       else
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 6e434af189bc0f..0b23d52b8d87a1 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -59,7 +59,7 @@ static bool hsr_check_carrier(struct hsr_port *master)
+ 
+       ASSERT_RTNL();
+ 
+-      hsr_for_each_port(master->hsr, port) {
++      hsr_for_each_port_rtnl(master->hsr, port) {
+               if (port->type != HSR_PT_MASTER && is_slave_up(port->dev)) {
+                       netif_carrier_on(master->dev);
+                       return true;
+@@ -109,7 +109,7 @@ int hsr_get_max_mtu(struct hsr_priv *hsr)
+       struct hsr_port *port;
+ 
+       mtu_max = ETH_DATA_LEN;
+-      hsr_for_each_port(hsr, port)
++      hsr_for_each_port_rtnl(hsr, port)
+               if (port->type != HSR_PT_MASTER)
+                       mtu_max = min(port->dev->mtu, mtu_max);
+ 
+@@ -144,7 +144,7 @@ static int hsr_dev_open(struct net_device *dev)
+       hsr = netdev_priv(dev);
+       designation = '\0';
+ 
+-      hsr_for_each_port(hsr, port) {
++      hsr_for_each_port_rtnl(hsr, port) {
+               if (port->type == HSR_PT_MASTER)
+                       continue;
+               switch (port->type) {
+@@ -170,7 +170,24 @@ static int hsr_dev_open(struct net_device *dev)
+ 
+ static int hsr_dev_close(struct net_device *dev)
+ {
+-      /* Nothing to do here. */
++      struct hsr_port *port;
++      struct hsr_priv *hsr;
++
++      hsr = netdev_priv(dev);
++      hsr_for_each_port_rtnl(hsr, port) {
++              if (port->type == HSR_PT_MASTER)
++                      continue;
++              switch (port->type) {
++              case HSR_PT_SLAVE_A:
++              case HSR_PT_SLAVE_B:
++                      dev_uc_unsync(port->dev, dev);
++                      dev_mc_unsync(port->dev, dev);
++                      break;
++              default:
++                      break;
++              }
++      }
++
+       return 0;
+ }
+ 
+@@ -190,7 +207,7 @@ static netdev_features_t hsr_features_recompute(struct 
hsr_priv *hsr,
+        * may become enabled.
+        */
+       features &= ~NETIF_F_ONE_FOR_ALL;
+-      hsr_for_each_port(hsr, port)
++      hsr_for_each_port_rtnl(hsr, port)
+               features = netdev_increment_features(features,
+                                                    port->dev->features,
+                                                    mask);
+@@ -211,6 +228,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, 
struct net_device *dev)
+       struct hsr_priv *hsr = netdev_priv(dev);
+       struct hsr_port *master;
+ 
++      rcu_read_lock();
+       master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+       if (master) {
+               skb->dev = master->dev;
+@@ -223,6 +241,8 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, 
struct net_device *dev)
+               dev_core_stats_tx_dropped_inc(dev);
+               dev_kfree_skb_any(skb);
+       }
++      rcu_read_unlock();
++
+       return NETDEV_TX_OK;
+ }
+ 
+@@ -401,12 +421,133 @@ void hsr_del_ports(struct hsr_priv *hsr)
+               hsr_del_port(port);
+ }
+ 
++static void hsr_set_rx_mode(struct net_device *dev)
++{
++      struct hsr_port *port;
++      struct hsr_priv *hsr;
++
++      hsr = netdev_priv(dev);
++
++      hsr_for_each_port_rtnl(hsr, port) {
++              if (port->type == HSR_PT_MASTER)
++                      continue;
++              switch (port->type) {
++              case HSR_PT_SLAVE_A:
++              case HSR_PT_SLAVE_B:
++                      dev_mc_sync_multiple(port->dev, dev);
++                      dev_uc_sync_multiple(port->dev, dev);
++                      break;
++              default:
++                      break;
++              }
++      }
++}
++
++static void hsr_change_rx_flags(struct net_device *dev, int change)
++{
++      struct hsr_port *port;
++      struct hsr_priv *hsr;
++
++      hsr = netdev_priv(dev);
++
++      hsr_for_each_port_rtnl(hsr, port) {
++              if (port->type == HSR_PT_MASTER)
++                      continue;
++              switch (port->type) {
++              case HSR_PT_SLAVE_A:
++              case HSR_PT_SLAVE_B:
++                      if (change & IFF_ALLMULTI)
++                              dev_set_allmulti(port->dev,
++                                               dev->flags &
++                                               IFF_ALLMULTI ? 1 : -1);
++                      break;
++              default:
++                      break;
++              }
++      }
++}
++
++static int hsr_ndo_vlan_rx_add_vid(struct net_device *dev,
++                                 __be16 proto, u16 vid)
++{
++      bool is_slave_a_added = false;
++      bool is_slave_b_added = false;
++      struct hsr_port *port;
++      struct hsr_priv *hsr;
++      int ret = 0;
++
++      hsr = netdev_priv(dev);
++
++      hsr_for_each_port_rtnl(hsr, port) {
++              if (port->type == HSR_PT_MASTER ||
++                  port->type == HSR_PT_INTERLINK)
++                      continue;
++
++              ret = vlan_vid_add(port->dev, proto, vid);
++              switch (port->type) {
++              case HSR_PT_SLAVE_A:
++                      if (ret) {
++                              /* clean up Slave-B */
++                              netdev_err(dev, "add vid failed for Slave-A\n");
++                              if (is_slave_b_added)
++                                      vlan_vid_del(port->dev, proto, vid);
++                              return ret;
++                      }
++
++                      is_slave_a_added = true;
++                      break;
++
++              case HSR_PT_SLAVE_B:
++                      if (ret) {
++                              /* clean up Slave-A */
++                              netdev_err(dev, "add vid failed for Slave-B\n");
++                              if (is_slave_a_added)
++                                      vlan_vid_del(port->dev, proto, vid);
++                              return ret;
++                      }
++
++                      is_slave_b_added = true;
++                      break;
++              default:
++                      break;
++              }
++      }
++
++      return 0;
++}
++
++static int hsr_ndo_vlan_rx_kill_vid(struct net_device *dev,
++                                  __be16 proto, u16 vid)
++{
++      struct hsr_port *port;
++      struct hsr_priv *hsr;
++
++      hsr = netdev_priv(dev);
++
++      hsr_for_each_port_rtnl(hsr, port) {
++              switch (port->type) {
++              case HSR_PT_SLAVE_A:
++              case HSR_PT_SLAVE_B:
++                      vlan_vid_del(port->dev, proto, vid);
++                      break;
++              default:
++                      break;
++              }
++      }
++
++      return 0;
++}
++
+ static const struct net_device_ops hsr_device_ops = {
+       .ndo_change_mtu = hsr_dev_change_mtu,
+       .ndo_open = hsr_dev_open,
+       .ndo_stop = hsr_dev_close,
+       .ndo_start_xmit = hsr_dev_xmit,
++      .ndo_change_rx_flags = hsr_change_rx_flags,
+       .ndo_fix_features = hsr_fix_features,
++      .ndo_set_rx_mode = hsr_set_rx_mode,
++      .ndo_vlan_rx_add_vid = hsr_ndo_vlan_rx_add_vid,
++      .ndo_vlan_rx_kill_vid = hsr_ndo_vlan_rx_kill_vid,
+ };
+ 
+ static struct device_type hsr_type = {
+@@ -447,7 +588,8 @@ void hsr_dev_setup(struct net_device *dev)
+ 
+       dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
+                          NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
+-                         NETIF_F_HW_VLAN_CTAG_TX;
++                         NETIF_F_HW_VLAN_CTAG_TX |
++                         NETIF_F_HW_VLAN_CTAG_FILTER;
+ 
+       dev->features = dev->hw_features;
+ 
+@@ -529,6 +671,15 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct 
net_device *slave[2],
+       if (res)
+               goto err_add_master;
+ 
++      /* HSR forwarding offload supported in lower device? */
++      if ((slave[0]->features & NETIF_F_HW_HSR_FWD) &&
++          (slave[1]->features & NETIF_F_HW_HSR_FWD))
++              hsr->fwd_offloaded = true;
++
++      if ((slave[0]->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
++          (slave[1]->features & NETIF_F_HW_VLAN_CTAG_FILTER))
++              hsr_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
++
+       res = register_netdevice(hsr_dev);
+       if (res)
+               goto err_unregister;
+diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
+index 257b50124cee5e..76a1958609e291 100644
+--- a/net/hsr/hsr_main.c
++++ b/net/hsr/hsr_main.c
+@@ -22,7 +22,7 @@ static bool hsr_slave_empty(struct hsr_priv *hsr)
+ {
+       struct hsr_port *port;
+ 
+-      hsr_for_each_port(hsr, port)
++      hsr_for_each_port_rtnl(hsr, port)
+               if (port->type != HSR_PT_MASTER)
+                       return false;
+       return true;
+@@ -125,7 +125,7 @@ struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, 
enum hsr_port_type pt)
+ {
+       struct hsr_port *port;
+ 
+-      hsr_for_each_port(hsr, port)
++      hsr_for_each_port_rtnl(hsr, port)
+               if (port->type == pt)
+                       return port;
+       return NULL;
+diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
+index 58a5a8b3891ffd..8c011b113411f1 100644
+--- a/net/hsr/hsr_main.h
++++ b/net/hsr/hsr_main.h
+@@ -202,6 +202,7 @@ struct hsr_priv {
+       u8 net_id;              /* for PRP, it occupies most significant 3 bits
+                                * of lan_id
+                                */
++      bool fwd_offloaded;     /* Forwarding offloaded to HW */
+       unsigned char           sup_multicast_addr[ETH_ALEN] 
__aligned(sizeof(u16));
+                               /* Align to u16 boundary to avoid unaligned 
access
+                                * in ether_addr_equal
+@@ -214,6 +215,9 @@ struct hsr_priv {
+ #define hsr_for_each_port(hsr, port) \
+       list_for_each_entry_rcu((port), &(hsr)->ports, port_list)
+ 
++#define hsr_for_each_port_rtnl(hsr, port) \
++      list_for_each_entry_rcu((port), &(hsr)->ports, port_list, 
lockdep_rtnl_is_held())
++
+ struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type 
pt);
+ 
+ /* Caller must ensure skb is a valid HSR frame */
+diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
+index 0e6daee488b4f1..b8230faa567f77 100644
+--- a/net/hsr/hsr_slave.c
++++ b/net/hsr/hsr_slave.c
+@@ -137,9 +137,14 @@ static int hsr_portdev_setup(struct hsr_priv *hsr, struct 
net_device *dev,
+       struct hsr_port *master;
+       int res;
+ 
+-      res = dev_set_promiscuity(dev, 1);
+-      if (res)
+-              return res;
++      /* Don't use promiscuous mode for offload since L2 frame forward
++       * happens at the offloaded hardware.
++       */
++      if (!port->hsr->fwd_offloaded) {
++              res = dev_set_promiscuity(dev, 1);
++              if (res)
++                      return res;
++      }
+ 
+       master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+       hsr_dev = master->dev;
+@@ -158,7 +163,9 @@ static int hsr_portdev_setup(struct hsr_priv *hsr, struct 
net_device *dev,
+ fail_rx_handler:
+       netdev_upper_dev_unlink(dev, hsr_dev);
+ fail_upper_dev_link:
+-      dev_set_promiscuity(dev, -1);
++      if (!port->hsr->fwd_offloaded)
++              dev_set_promiscuity(dev, -1);
++
+       return res;
+ }
+ 
+@@ -219,7 +226,8 @@ void hsr_del_port(struct hsr_port *port)
+               netdev_update_features(master->dev);
+               dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
+               netdev_rx_handler_unregister(port->dev);
+-              dev_set_promiscuity(port->dev, -1);
++              if (!port->hsr->fwd_offloaded)
++                      dev_set_promiscuity(port->dev, -1);
+               netdev_upper_dev_unlink(port->dev, master->dev);
+       }
+ 
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index deb08cab44640d..75e3d7501752df 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -203,6 +203,9 @@ static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, 
int mtu)
+       if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
+               return -EINVAL;
+ 
++      if (skb_is_gso(skb))
++              skb_gso_reset(skb);
++
+       skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
+       pskb_pull(skb, ETH_HLEN);
+       skb_reset_network_header(skb);
+@@ -297,6 +300,9 @@ static int iptunnel_pmtud_build_icmpv6(struct sk_buff 
*skb, int mtu)
+       if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
+               return -EINVAL;
+ 
++      if (skb_is_gso(skb))
++              skb_gso_reset(skb);
++
+       skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
+       pskb_pull(skb, ETH_HLEN);
+       skb_reset_network_header(skb);
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index bf10fa3c37b76f..1727ac094e1064 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -403,8 +403,11 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct 
sk_psock *psock,
+               if (!psock->cork) {
+                       psock->cork = kzalloc(sizeof(*psock->cork),
+                                             GFP_ATOMIC | __GFP_NOWARN);
+-                      if (!psock->cork)
++                      if (!psock->cork) {
++                              sk_msg_free(sk, msg);
++                              *copied = 0;
+                               return -ENOMEM;
++                      }
+               }
+               memcpy(psock->cork, msg, sizeof(*msg));
+               return 0;
+diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
+index 129c9e9ee396cd..341fca0a4bf4d9 100644
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -1347,13 +1347,12 @@ static void sync_socket_options(struct mptcp_sock 
*msk, struct sock *ssk)
+ {
+       static const unsigned int tx_rx_locks = SOCK_RCVBUF_LOCK | 
SOCK_SNDBUF_LOCK;
+       struct sock *sk = (struct sock *)msk;
++      bool keep_open;
+ 
+-      if (ssk->sk_prot->keepalive) {
+-              if (sock_flag(sk, SOCK_KEEPOPEN))
+-                      ssk->sk_prot->keepalive(ssk, 1);
+-              else
+-                      ssk->sk_prot->keepalive(ssk, 0);
+-      }
++      keep_open = sock_flag(sk, SOCK_KEEPOPEN);
++      if (ssk->sk_prot->keepalive)
++              ssk->sk_prot->keepalive(ssk, keep_open);
++      sock_valbool_flag(ssk, SOCK_KEEPOPEN, keep_open);
+ 
+       ssk->sk_priority = sk->sk_priority;
+       ssk->sk_bound_dev_if = sk->sk_bound_dev_if;
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 73bc39281ef5f5..9b45fbdc90cabe 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -276,8 +276,6 @@ EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
+ 
+ static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
+ {
+-      if (unlikely(current->flags & PF_EXITING))
+-              return -EINTR;
+       schedule();
+       if (signal_pending_state(mode, current))
+               return -ERESTARTSYS;
+diff --git a/samples/ftrace/ftrace-direct-modify.c 
b/samples/ftrace/ftrace-direct-modify.c
+index 39146fa83e20b3..cbef49649ce01d 100644
+--- a/samples/ftrace/ftrace-direct-modify.c
++++ b/samples/ftrace/ftrace-direct-modify.c
+@@ -36,8 +36,8 @@ asm (
+ "     movq %rsp, %rbp\n"
+ "     call my_direct_func1\n"
+ "     leave\n"
+-"     .size           my_tramp1, .-my_tramp1\n"
+       ASM_RET
++"     .size           my_tramp1, .-my_tramp1\n"
+ 
+ "     .type           my_tramp2, @function\n"
+ "     .globl          my_tramp2\n"
+diff --git a/sound/soc/qcom/qdsp6/q6apm-dai.c 
b/sound/soc/qcom/qdsp6/q6apm-dai.c
+index de99c6920df87f..8501e8404b9228 100644
+--- a/sound/soc/qcom/qdsp6/q6apm-dai.c
++++ b/sound/soc/qcom/qdsp6/q6apm-dai.c
+@@ -52,6 +52,7 @@ struct q6apm_dai_rtd {
+       uint16_t bits_per_sample;
+       uint16_t source; /* Encoding source bit mask */
+       uint16_t session_id;
++      snd_pcm_uframes_t queue_ptr;
+       enum stream_state state;
+       struct q6apm_graph *graph;
+       spinlock_t lock;
+@@ -114,8 +115,6 @@ static void event_handler(uint32_t opcode, uint32_t token, 
uint32_t *payload, vo
+               prtd->pos += prtd->pcm_count;
+               spin_unlock_irqrestore(&prtd->lock, flags);
+               snd_pcm_period_elapsed(substream);
+-              if (prtd->state == Q6APM_STREAM_RUNNING)
+-                      q6apm_write_async(prtd->graph, prtd->pcm_count, 0, 0, 
0);
+ 
+               break;
+       case APM_CLIENT_EVENT_DATA_READ_DONE:
+@@ -209,6 +208,27 @@ static int q6apm_dai_prepare(struct snd_soc_component 
*component,
+       return 0;
+ }
+ 
++static int q6apm_dai_ack(struct snd_soc_component *component, struct 
snd_pcm_substream *substream)
++{
++      struct snd_pcm_runtime *runtime = substream->runtime;
++      struct q6apm_dai_rtd *prtd = runtime->private_data;
++      int i, ret = 0, avail_periods;
++
++      if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++              avail_periods = (runtime->control->appl_ptr - 
prtd->queue_ptr)/runtime->period_size;
++              for (i = 0; i < avail_periods; i++) {
++                      ret = q6apm_write_async(prtd->graph, prtd->pcm_count, 
0, 0, 0);
++                      if (ret < 0) {
++                              dev_err(component->dev, "Error queuing playback 
buffer %d\n", ret);
++                              return ret;
++                      }
++                      prtd->queue_ptr += runtime->period_size;
++              }
++      }
++
++      return ret;
++}
++
+ static int q6apm_dai_trigger(struct snd_soc_component *component,
+                            struct snd_pcm_substream *substream, int cmd)
+ {
+@@ -220,9 +240,6 @@ static int q6apm_dai_trigger(struct snd_soc_component 
*component,
+       case SNDRV_PCM_TRIGGER_START:
+       case SNDRV_PCM_TRIGGER_RESUME:
+       case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+-               /* start writing buffers for playback only as we already 
queued capture buffers */
+-              if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+-                      ret = q6apm_write_async(prtd->graph, prtd->pcm_count, 
0, 0, 0);
+               break;
+       case SNDRV_PCM_TRIGGER_STOP:
+               /* TODO support be handled via SoftPause Module */
+@@ -396,6 +413,7 @@ static const struct snd_soc_component_driver 
q6apm_fe_dai_component = {
+       .hw_params      = q6apm_dai_hw_params,
+       .pointer        = q6apm_dai_pointer,
+       .trigger        = q6apm_dai_trigger,
++      .ack            = q6apm_dai_ack,
+ };
+ 
+ static int q6apm_dai_probe(struct platform_device *pdev)

Reply via email to