commit:     69a6caa7fa8e6663f49ee5f2b65074b947de292a
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Wed Feb 22 13:03:42 2023 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Wed Feb 22 13:03:42 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=69a6caa7

Linux patch 5.15.95

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README              |    4 +
 1094_linux-5.15.95.patch | 3177 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3181 insertions(+)

diff --git a/0000_README b/0000_README
index 4eb378f3..4a45faee 100644
--- a/0000_README
+++ b/0000_README
@@ -419,6 +419,10 @@ Patch:  1093_linux-5.15.94.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.15.94
 
+Patch:  1094_linux-5.15.95.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.15.95
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1094_linux-5.15.95.patch b/1094_linux-5.15.95.patch
new file mode 100644
index 00000000..180480c3
--- /dev/null
+++ b/1094_linux-5.15.95.patch
@@ -0,0 +1,3177 @@
+diff --git a/Makefile b/Makefile
+index fcee25420bf9d..e367784df9ba5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 15
+-SUBLEVEL = 94
++SUBLEVEL = 95
+ EXTRAVERSION =
+ NAME = Trick or Treat
+ 
+diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
+index 9d8634e2f12f7..9bcae72dda440 100644
+--- a/arch/arm/probes/kprobes/core.c
++++ b/arch/arm/probes/kprobes/core.c
+@@ -11,6 +11,8 @@
+  * Copyright (C) 2007 Marvell Ltd.
+  */
+ 
++#define pr_fmt(fmt) "kprobes: " fmt
++
+ #include <linux/kernel.h>
+ #include <linux/kprobes.h>
+ #include <linux/module.h>
+@@ -278,7 +280,7 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
+                               break;
+                       case KPROBE_REENTER:
+                               /* A nested probe was hit in FIQ, it is a BUG */
+-                              pr_warn("Unrecoverable kprobe detected.\n");
++                              pr_warn("Failed to recover from reentered 
kprobes.\n");
+                               dump_kprobe(p);
+                               fallthrough;
+                       default:
+diff --git a/arch/arm64/kernel/probes/kprobes.c 
b/arch/arm64/kernel/probes/kprobes.c
+index b7404dba0d623..2162b6fd7251d 100644
+--- a/arch/arm64/kernel/probes/kprobes.c
++++ b/arch/arm64/kernel/probes/kprobes.c
+@@ -7,6 +7,9 @@
+  * Copyright (C) 2013 Linaro Limited.
+  * Author: Sandeepa Prabhu <[email protected]>
+  */
++
++#define pr_fmt(fmt) "kprobes: " fmt
++
+ #include <linux/extable.h>
+ #include <linux/kasan.h>
+ #include <linux/kernel.h>
+@@ -218,7 +221,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p,
+               break;
+       case KPROBE_HIT_SS:
+       case KPROBE_REENTER:
+-              pr_warn("Unrecoverable kprobe detected.\n");
++              pr_warn("Failed to recover from reentered kprobes.\n");
+               dump_kprobe(p);
+               BUG();
+               break;
+diff --git a/arch/csky/kernel/probes/kprobes.c 
b/arch/csky/kernel/probes/kprobes.c
+index 584ed9f36290f..bd92ac376e157 100644
+--- a/arch/csky/kernel/probes/kprobes.c
++++ b/arch/csky/kernel/probes/kprobes.c
+@@ -1,5 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0+
+ 
++#define pr_fmt(fmt) "kprobes: " fmt
++
+ #include <linux/kprobes.h>
+ #include <linux/extable.h>
+ #include <linux/slab.h>
+@@ -77,10 +79,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
+ {
+       unsigned long probe_addr = (unsigned long)p->addr;
+ 
+-      if (probe_addr & 0x1) {
+-              pr_warn("Address not aligned.\n");
+-              return -EINVAL;
+-      }
++      if (probe_addr & 0x1)
++              return -EILSEQ;
+ 
+       /* copy instruction */
+       p->opcode = le32_to_cpu(*p->addr);
+@@ -229,7 +229,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p,
+               break;
+       case KPROBE_HIT_SS:
+       case KPROBE_REENTER:
+-              pr_warn("Unrecoverable kprobe detected.\n");
++              pr_warn("Failed to recover from reentered kprobes.\n");
+               dump_kprobe(p);
+               BUG();
+               break;
+diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
+index 75bff0f773198..b0934a0d7aedd 100644
+--- a/arch/mips/kernel/kprobes.c
++++ b/arch/mips/kernel/kprobes.c
+@@ -11,6 +11,8 @@
+  *   Copyright (C) IBM Corporation, 2002, 2004
+  */
+ 
++#define pr_fmt(fmt) "kprobes: " fmt
++
+ #include <linux/kprobes.h>
+ #include <linux/preempt.h>
+ #include <linux/uaccess.h>
+@@ -80,8 +82,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
+       insn = p->addr[0];
+ 
+       if (insn_has_ll_or_sc(insn)) {
+-              pr_notice("Kprobes for ll and sc instructions are not"
+-                        "supported\n");
++              pr_notice("Kprobes for ll and sc instructions are not 
supported\n");
+               ret = -EINVAL;
+               goto out;
+       }
+@@ -219,7 +220,7 @@ static int evaluate_branch_instruction(struct kprobe *p, 
struct pt_regs *regs,
+       return 0;
+ 
+ unaligned:
+-      pr_notice("%s: unaligned epc - sending SIGBUS.\n", current->comm);
++      pr_notice("Failed to emulate branch instruction because of unaligned 
epc - sending SIGBUS to %s.\n", current->comm);
+       force_sig(SIGBUS);
+       return -EFAULT;
+ 
+@@ -238,10 +239,8 @@ static void prepare_singlestep(struct kprobe *p, struct 
pt_regs *regs,
+               regs->cp0_epc = (unsigned long)p->addr;
+       else if (insn_has_delayslot(p->opcode)) {
+               ret = evaluate_branch_instruction(p, regs, kcb);
+-              if (ret < 0) {
+-                      pr_notice("Kprobes: Error in evaluating branch\n");
++              if (ret < 0)
+                       return;
+-              }
+       }
+       regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
+ }
+diff --git a/arch/riscv/kernel/probes/kprobes.c 
b/arch/riscv/kernel/probes/kprobes.c
+index 125241ce82d6a..7548b1d62509c 100644
+--- a/arch/riscv/kernel/probes/kprobes.c
++++ b/arch/riscv/kernel/probes/kprobes.c
+@@ -1,5 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0+
+ 
++#define pr_fmt(fmt) "kprobes: " fmt
++
+ #include <linux/kprobes.h>
+ #include <linux/extable.h>
+ #include <linux/slab.h>
+@@ -63,19 +65,18 @@ static bool __kprobes arch_check_kprobe(struct kprobe *p)
+ 
+ int __kprobes arch_prepare_kprobe(struct kprobe *p)
+ {
+-      unsigned long probe_addr = (unsigned long)p->addr;
+-
+-      if (probe_addr & 0x1) {
+-              pr_warn("Address not aligned.\n");
++      u16 *insn = (u16 *)p->addr;
+ 
+-              return -EINVAL;
+-      }
++      if ((unsigned long)insn & 0x1)
++              return -EILSEQ;
+ 
+       if (!arch_check_kprobe(p))
+               return -EILSEQ;
+ 
+       /* copy instruction */
+-      p->opcode = *p->addr;
++      p->opcode = (kprobe_opcode_t)(*insn++);
++      if (GET_INSN_LENGTH(p->opcode) == 4)
++              p->opcode |= (kprobe_opcode_t)(*insn) << 16;
+ 
+       /* decode instruction */
+       switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) {
+@@ -209,7 +210,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p,
+               break;
+       case KPROBE_HIT_SS:
+       case KPROBE_REENTER:
+-              pr_warn("Unrecoverable kprobe detected.\n");
++              pr_warn("Failed to recover from reentered kprobes.\n");
+               dump_kprobe(p);
+               BUG();
+               break;
+diff --git a/arch/s390/boot/compressed/decompressor.c 
b/arch/s390/boot/compressed/decompressor.c
+index e27c2140d6206..623f6775d01d7 100644
+--- a/arch/s390/boot/compressed/decompressor.c
++++ b/arch/s390/boot/compressed/decompressor.c
+@@ -80,6 +80,6 @@ void *decompress_kernel(void)
+       void *output = (void *)decompress_offset;
+ 
+       __decompress(_compressed_start, _compressed_end - _compressed_start,
+-                   NULL, NULL, output, 0, NULL, error);
++                   NULL, NULL, output, vmlinux.image_size, NULL, error);
+       return output;
+ }
+diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
+index 52d056a5f89fc..952d44b0610b0 100644
+--- a/arch/s390/kernel/kprobes.c
++++ b/arch/s390/kernel/kprobes.c
+@@ -7,6 +7,8 @@
+  * s390 port, used ppc64 as template. Mike Grundy <[email protected]>
+  */
+ 
++#define pr_fmt(fmt) "kprobes: " fmt
++
+ #include <linux/moduleloader.h>
+ #include <linux/kprobes.h>
+ #include <linux/ptrace.h>
+@@ -259,7 +261,7 @@ static void kprobe_reenter_check(struct kprobe_ctlblk 
*kcb, struct kprobe *p)
+                * is a BUG. The code path resides in the .kprobes.text
+                * section and is executed with interrupts disabled.
+                */
+-              pr_err("Invalid kprobe detected.\n");
++              pr_err("Failed to recover from reentered kprobes.\n");
+               dump_kprobe(p);
+               BUG();
+       }
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 45a3d11bb70d9..75c8f66cce4f6 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4821,12 +4821,11 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct 
kvm_vcpu *vcpu,
+ {
+       unsigned long val;
+ 
++      memset(dbgregs, 0, sizeof(*dbgregs));
+       memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
+       kvm_get_dr(vcpu, 6, &val);
+       dbgregs->dr6 = val;
+       dbgregs->dr7 = vcpu->arch.dr7;
+-      dbgregs->flags = 0;
+-      memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
+ }
+ 
+ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
+diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
+index 2af1ae1721021..4a11a38764321 100644
+--- a/drivers/acpi/x86/s2idle.c
++++ b/drivers/acpi/x86/s2idle.c
+@@ -86,6 +86,8 @@ struct lpi_device_constraint_amd {
+       int min_dstate;
+ };
+ 
++static LIST_HEAD(lps0_s2idle_devops_head);
++
+ static struct lpi_constraints *lpi_constraints_table;
+ static int lpi_constraints_table_size;
+ static int rev_id;
+@@ -434,6 +436,8 @@ static struct acpi_scan_handler lps0_handler = {
+ 
+ int acpi_s2idle_prepare_late(void)
+ {
++      struct acpi_s2idle_dev_ops *handler;
++
+       if (!lps0_device_handle || sleep_no_lps0)
+               return 0;
+ 
+@@ -464,14 +468,26 @@ int acpi_s2idle_prepare_late(void)
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
+                               lps0_dsm_func_mask_microsoft, 
lps0_dsm_guid_microsoft);
+       }
++
++      list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node) {
++              if (handler->prepare)
++                      handler->prepare();
++      }
++
+       return 0;
+ }
+ 
+ void acpi_s2idle_restore_early(void)
+ {
++      struct acpi_s2idle_dev_ops *handler;
++
+       if (!lps0_device_handle || sleep_no_lps0)
+               return;
+ 
++      list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node)
++              if (handler->restore)
++                      handler->restore();
++
+       /* Modern standby exit */
+       if (lps0_dsm_func_mask_microsoft > 0)
+               acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
+@@ -514,4 +530,28 @@ void acpi_s2idle_setup(void)
+       s2idle_set_ops(&acpi_s2idle_ops_lps0);
+ }
+ 
++int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg)
++{
++      if (!lps0_device_handle || sleep_no_lps0)
++              return -ENODEV;
++
++      lock_system_sleep();
++      list_add(&arg->list_node, &lps0_s2idle_devops_head);
++      unlock_system_sleep();
++
++      return 0;
++}
++EXPORT_SYMBOL_GPL(acpi_register_lps0_dev);
++
++void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg)
++{
++      if (!lps0_device_handle || sleep_no_lps0)
++              return;
++
++      lock_system_sleep();
++      list_del(&arg->list_node);
++      unlock_system_sleep();
++}
++EXPORT_SYMBOL_GPL(acpi_unregister_lps0_dev);
++
+ #endif /* CONFIG_SUSPEND */
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index e9797439bb0eb..b4293b5a82526 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4428,6 +4428,17 @@ DEVICE_ATTR_WO(s3_debug);
+ static int dm_early_init(void *handle)
+ {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++      struct amdgpu_mode_info *mode_info = &adev->mode_info;
++      struct atom_context *ctx = mode_info->atom_context;
++      int index = GetIndexIntoMasterTable(DATA, Object_Header);
++      u16 data_offset;
++
++      /* if there is no object header, skip DM */
++      if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, 
&data_offset)) {
++              adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
++              dev_info(adev->dev, "No object header, skipping DM\n");
++              return -ENOENT;
++      }
+ 
+       switch (adev->asic_type) {
+ #if defined(CONFIG_DRM_AMD_DC_SI)
+@@ -10878,7 +10889,11 @@ static int amdgpu_dm_atomic_check(struct drm_device 
*dev,
+        * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
+        * atomic state, so call drm helper to normalize zpos.
+        */
+-      drm_atomic_normalize_zpos(dev, state);
++      ret = drm_atomic_normalize_zpos(dev, state);
++      if (ret) {
++              drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
++              goto fail;
++      }
+ 
+       /* Remove exiting planes if they are modified */
+       for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, 
new_plane_state, i) {
+diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c 
b/drivers/gpu/drm/i915/gt/intel_workarounds.c
+index 6b5ab19a2ada9..de93a1e988f29 100644
+--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
++++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
+@@ -1049,6 +1049,22 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, 
struct i915_wa_list *wal)
+                   GAMT_CHKN_BIT_REG,
+                   GAMT_CHKN_DISABLE_L3_COH_PIPE);
+ 
++      /*
++       * Wa_1408615072:icl,ehl  (vsunit)
++       * Wa_1407596294:icl,ehl  (hsunit)
++       */
++      wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
++                  VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
++
++      /* Wa_1407352427:icl,ehl */
++      wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
++                  PSDUNIT_CLKGATE_DIS);
++
++      /* Wa_1406680159:icl,ehl */
++      wa_write_or(wal,
++                  SUBSLICE_UNIT_LEVEL_CLKGATE,
++                  GWUNIT_CLKGATE_DIS);
++
+       /* Wa_1607087056:icl,ehl,jsl */
+       if (IS_ICELAKE(i915) ||
+           IS_JSL_EHL_GT_STEP(i915, STEP_A0, STEP_B0))
+@@ -1738,22 +1754,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, 
struct i915_wa_list *wal)
+               wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
+                            GEN11_ENABLE_32_PLANE_MODE);
+ 
+-              /*
+-               * Wa_1408615072:icl,ehl  (vsunit)
+-               * Wa_1407596294:icl,ehl  (hsunit)
+-               */
+-              wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+-                          VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
+-
+-              /* Wa_1407352427:icl,ehl */
+-              wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+-                          PSDUNIT_CLKGATE_DIS);
+-
+-              /* Wa_1406680159:icl,ehl */
+-              wa_write_or(wal,
+-                          SUBSLICE_UNIT_LEVEL_CLKGATE,
+-                          GWUNIT_CLKGATE_DIS);
+-
+               /*
+                * Wa_1408767742:icl[a2..forever],ehl[all]
+                * Wa_1605460711:icl[a0..c0]
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c 
b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
+index 634f64f88fc8b..81a1ad2c88a7e 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
+@@ -65,10 +65,33 @@ tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, 
u32 freq)
+       return ret;
+ }
+ 
++static int
++tu102_devinit_wait(struct nvkm_device *device)
++{
++      unsigned timeout = 50 + 2000;
++
++      do {
++              if (nvkm_rd32(device, 0x118128) & 0x00000001) {
++                      if ((nvkm_rd32(device, 0x118234) & 0x000000ff) == 0xff)
++                              return 0;
++              }
++
++              usleep_range(1000, 2000);
++      } while (timeout--);
++
++      return -ETIMEDOUT;
++}
++
+ int
+ tu102_devinit_post(struct nvkm_devinit *base, bool post)
+ {
+       struct nv50_devinit *init = nv50_devinit(base);
++      int ret;
++
++      ret = tu102_devinit_wait(init->base.subdev.device);
++      if (ret)
++              return ret;
++
+       gm200_devinit_preos(init, post);
+       return 0;
+ }
+diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
+index cac9e0f133205..f6cdec00e97e7 100644
+--- a/drivers/mmc/core/sdio_bus.c
++++ b/drivers/mmc/core/sdio_bus.c
+@@ -293,6 +293,12 @@ static void sdio_release_func(struct device *dev)
+       if (!(func->card->quirks & MMC_QUIRK_NONSTD_SDIO))
+               sdio_free_func_cis(func);
+ 
++      /*
++       * We have now removed the link to the tuples in the
++       * card structure, so remove the reference.
++       */
++      put_device(&func->card->dev);
++
+       kfree(func->info);
+       kfree(func->tmpbuf);
+       kfree(func);
+@@ -323,6 +329,12 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)
+ 
+       device_initialize(&func->dev);
+ 
++      /*
++       * We may link to tuples in the card structure,
++       * we need make sure we have a reference to it.
++       */
++      get_device(&func->card->dev);
++
+       func->dev.parent = &card->dev;
+       func->dev.bus = &sdio_bus_type;
+       func->dev.release = sdio_release_func;
+@@ -376,10 +388,9 @@ int sdio_add_func(struct sdio_func *func)
+  */
+ void sdio_remove_func(struct sdio_func *func)
+ {
+-      if (!sdio_func_present(func))
+-              return;
++      if (sdio_func_present(func))
++              device_del(&func->dev);
+ 
+-      device_del(&func->dev);
+       of_node_put(func->dev.of_node);
+       put_device(&func->dev);
+ }
+diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
+index a705ba6eff5bf..afaa6cab1adc3 100644
+--- a/drivers/mmc/core/sdio_cis.c
++++ b/drivers/mmc/core/sdio_cis.c
+@@ -403,12 +403,6 @@ int sdio_read_func_cis(struct sdio_func *func)
+       if (ret)
+               return ret;
+ 
+-      /*
+-       * Since we've linked to tuples in the card structure,
+-       * we must make sure we have a reference to it.
+-       */
+-      get_device(&func->card->dev);
+-
+       /*
+        * Vendor/device id is optional for function CIS, so
+        * copy it from the card structure as needed.
+@@ -434,11 +428,5 @@ void sdio_free_func_cis(struct sdio_func *func)
+       }
+ 
+       func->tuples = NULL;
+-
+-      /*
+-       * We have now removed the link to the tuples in the
+-       * card structure, so remove the reference.
+-       */
+-      put_device(&func->card->dev);
+ }
+ 
+diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
+index 3c59dec08c3bd..8586447d4b4f2 100644
+--- a/drivers/mmc/host/jz4740_mmc.c
++++ b/drivers/mmc/host/jz4740_mmc.c
+@@ -1038,6 +1038,16 @@ static int jz4740_mmc_probe(struct platform_device* 
pdev)
+       mmc->ops = &jz4740_mmc_ops;
+       if (!mmc->f_max)
+               mmc->f_max = JZ_MMC_CLK_RATE;
++
++      /*
++       * There seems to be a problem with this driver on the JZ4760 and
++       * JZ4760B SoCs. There, when using the maximum rate supported (50 MHz),
++       * the communication fails with many SD cards.
++       * Until this bug is sorted out, limit the maximum rate to 24 MHz.
++       */
++      if (host->version == JZ_MMC_JZ4760 && mmc->f_max > JZ_MMC_CLK_RATE)
++              mmc->f_max = JZ_MMC_CLK_RATE;
++
+       mmc->f_min = mmc->f_max / 128;
+       mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ 
+diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
+index b431cdd27353b..91fde4943defa 100644
+--- a/drivers/mmc/host/mmc_spi.c
++++ b/drivers/mmc/host/mmc_spi.c
+@@ -1441,7 +1441,7 @@ static int mmc_spi_probe(struct spi_device *spi)
+ 
+       status = mmc_add_host(mmc);
+       if (status != 0)
+-              goto fail_add_host;
++              goto fail_glue_init;
+ 
+       /*
+        * Index 0 is card detect
+@@ -1449,7 +1449,7 @@ static int mmc_spi_probe(struct spi_device *spi)
+        */
+       status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000);
+       if (status == -EPROBE_DEFER)
+-              goto fail_add_host;
++              goto fail_gpiod_request;
+       if (!status) {
+               /*
+                * The platform has a CD GPIO signal that may support
+@@ -1464,7 +1464,7 @@ static int mmc_spi_probe(struct spi_device *spi)
+       /* Index 1 is write protect/read only */
+       status = mmc_gpiod_request_ro(mmc, NULL, 1, 0);
+       if (status == -EPROBE_DEFER)
+-              goto fail_add_host;
++              goto fail_gpiod_request;
+       if (!status)
+               has_ro = true;
+ 
+@@ -1478,7 +1478,7 @@ static int mmc_spi_probe(struct spi_device *spi)
+                               ? ", cd polling" : "");
+       return 0;
+ 
+-fail_add_host:
++fail_gpiod_request:
+       mmc_remove_host(mmc);
+ fail_glue_init:
+       mmc_spi_dma_free(host);
+diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c 
b/drivers/net/ethernet/broadcom/bgmac-bcma.c
+index 0ce28bc955a4a..92453e68d381b 100644
+--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
++++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
+@@ -228,12 +228,12 @@ static int bgmac_probe(struct bcma_device *core)
+               bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+               bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
+               bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
+-              if (ci->pkg == BCMA_PKG_ID_BCM47188 ||
+-                  ci->pkg == BCMA_PKG_ID_BCM47186) {
++              if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 
BCMA_PKG_ID_BCM47186) ||
++                  (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 
BCMA_PKG_ID_BCM47188)) {
+                       bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
+                       bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
+               }
+-              if (ci->pkg == BCMA_PKG_ID_BCM5358)
++              if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 
BCMA_PKG_ID_BCM5358)
+                       bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII;
+               break;
+       case BCMA_CHIP_ID_BCM53573:
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c 
b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 117f5cc7c1804..f64df4d532896 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -9023,10 +9023,14 @@ int bnxt_reserve_rings(struct bnxt *bp, bool 
irq_re_init)
+               netdev_err(bp->dev, "ring reservation/IRQ init failure rc: 
%d\n", rc);
+               return rc;
+       }
+-      if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
++      if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
++                  bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
+               netdev_err(bp->dev, "tx ring reservation failure\n");
+               netdev_reset_tc(bp->dev);
+-              bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
++              if (bp->tx_nr_rings_xdp)
++                      bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
++              else
++                      bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+               return -ENOMEM;
+       }
+       return 0;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c 
b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index c013d86559af9..5ffcd3cc989f7 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -2789,7 +2789,7 @@ static int i40e_change_mtu(struct net_device *netdev, 
int new_mtu)
+       struct i40e_pf *pf = vsi->back;
+ 
+       if (i40e_enabled_xdp_vsi(vsi)) {
+-              int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
++              int frame_size = new_mtu + I40E_PACKET_HDR_PAD;
+ 
+               if (frame_size > i40e_max_xdp_frame_size(vsi))
+                       return -EINVAL;
+@@ -13005,6 +13005,8 @@ static int i40e_ndo_bridge_setlink(struct net_device 
*dev,
+       }
+ 
+       br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
++      if (!br_spec)
++              return -EINVAL;
+ 
+       nla_for_each_nested(attr, br_spec, rem) {
+               __u16 mode;
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h 
b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+index c375a5d54b40d..737590a0d849e 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+@@ -67,6 +67,8 @@
+ #define IXGBE_RXBUFFER_4K    4096
+ #define IXGBE_MAX_RXBUFFER  16384  /* largest size for a single descriptor */
+ 
++#define IXGBE_PKT_HDR_PAD   (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
++
+ /* Attempt to maximize the headroom available for incoming frames.  We
+  * use a 2K buffer for receives and need 1536/1534 to store the data for
+  * the frame.  This leaves us with 512 bytes of room.  From that we need
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 
b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 8cb20af51ecd6..6fb9c18297bc8 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -6729,6 +6729,18 @@ static void ixgbe_free_all_rx_resources(struct 
ixgbe_adapter *adapter)
+                       ixgbe_free_rx_resources(adapter->rx_ring[i]);
+ }
+ 
++/**
++ * ixgbe_max_xdp_frame_size - returns the maximum allowed frame size for XDP
++ * @adapter: device handle, pointer to adapter
++ */
++static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter)
++{
++      if (PAGE_SIZE >= 8192 || adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
++              return IXGBE_RXBUFFER_2K;
++      else
++              return IXGBE_RXBUFFER_3K;
++}
++
+ /**
+  * ixgbe_change_mtu - Change the Maximum Transfer Unit
+  * @netdev: network interface device structure
+@@ -6740,18 +6752,12 @@ static int ixgbe_change_mtu(struct net_device *netdev, 
int new_mtu)
+ {
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ 
+-      if (adapter->xdp_prog) {
+-              int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
+-                                   VLAN_HLEN;
+-              int i;
+-
+-              for (i = 0; i < adapter->num_rx_queues; i++) {
+-                      struct ixgbe_ring *ring = adapter->rx_ring[i];
++      if (ixgbe_enabled_xdp_adapter(adapter)) {
++              int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD;
+ 
+-                      if (new_frame_size > ixgbe_rx_bufsz(ring)) {
+-                              e_warn(probe, "Requested MTU size is not 
supported with XDP\n");
+-                              return -EINVAL;
+-                      }
++              if (new_frame_size > ixgbe_max_xdp_frame_size(adapter)) {
++                      e_warn(probe, "Requested MTU size is not supported with 
XDP\n");
++                      return -EINVAL;
+               }
+       }
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c 
b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+index 6b1d9e8879f46..d0c7f22a4e55a 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+@@ -505,6 +505,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
+       plat_dat->has_gmac4 = 1;
+       plat_dat->pmt = 1;
+       plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
++      if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
++              plat_dat->rx_clk_runs_in_lpi = 1;
+ 
+       ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+       if (ret)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c 
b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+index 413f660172199..e95d35f1e5a0c 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+@@ -541,9 +541,9 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
+               return 0;
+       }
+ 
+-      val |= PPSCMDx(index, 0x2);
+       val |= TRGTMODSELx(index, 0x2);
+       val |= PPSEN0;
++      writel(val, ioaddr + MAC_PPS_CONTROL);
+ 
+       writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index));
+ 
+@@ -568,6 +568,7 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
+       writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index));
+ 
+       /* Finally, activate it */
++      val |= PPSCMDx(index, 0x2);
+       writel(val, ioaddr + MAC_PPS_CONTROL);
+       return 0;
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 
b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 4191502d6472f..d56f65338ea66 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1174,7 +1174,8 @@ static void stmmac_mac_link_up(struct phylink_config 
*config,
+ 
+       stmmac_mac_set(priv, priv->ioaddr, true);
+       if (phy && priv->dma_cap.eee) {
+-              priv->eee_active = phy_init_eee(phy, 1) >= 0;
++              priv->eee_active =
++                      phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0;
+               priv->eee_enabled = stmmac_eee_init(priv);
+               priv->tx_lpi_enabled = priv->eee_enabled;
+               stmmac_set_eee_pls(priv, priv->hw, true);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 
b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+index 5c234a8158c71..e12df9d99089f 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -558,7 +558,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 
*mac)
+       dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
+ 
+       plat->force_thresh_dma_mode = of_property_read_bool(np, 
"snps,force_thresh_dma_mode");
+-      if (plat->force_thresh_dma_mode) {
++      if (plat->force_thresh_dma_mode && plat->force_sf_dma_mode) {
+               plat->force_sf_dma_mode = 0;
+               dev_warn(&pdev->dev,
+                        "force_sf_dma_mode is ignored if force_thresh_dma_mode 
is set.\n");
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c 
b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 2298b3c38f896..37b9a798dd624 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -564,7 +564,15 @@ static int am65_cpsw_nuss_common_stop(struct 
am65_cpsw_common *common)
+               k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn);
+       }
+ 
++      reinit_completion(&common->tdown_complete);
+       k3_udma_glue_tdown_rx_chn(common->rx_chns.rx_chn, true);
++
++      if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
++              i = wait_for_completion_timeout(&common->tdown_complete, 
msecs_to_jiffies(1000));
++              if (!i)
++                      dev_err(common->dev, "rx teardown timeout\n");
++      }
++
+       napi_disable(&common->napi_rx);
+ 
+       for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
+@@ -786,6 +794,8 @@ static int am65_cpsw_nuss_rx_packets(struct 
am65_cpsw_common *common,
+ 
+       if (cppi5_desc_is_tdcm(desc_dma)) {
+               dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
++              if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ)
++                      complete(&common->tdown_complete);
+               return 0;
+       }
+ 
+@@ -2609,7 +2619,7 @@ static const struct am65_cpsw_pdata j721e_pdata = {
+ };
+ 
+ static const struct am65_cpsw_pdata am64x_cpswxg_pdata = {
+-      .quirks = 0,
++      .quirks = AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ,
+       .ale_dev_id = "am64-cpswxg",
+       .fdqring_mode = K3_RINGACC_RING_MODE_RING,
+ };
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h 
b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+index 048ed10143c17..74569c8ed2eca 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+@@ -84,6 +84,7 @@ struct am65_cpsw_rx_chn {
+ };
+ 
+ #define AM65_CPSW_QUIRK_I2027_NO_TX_CSUM BIT(0)
++#define AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ BIT(1)
+ 
+ struct am65_cpsw_pdata {
+       u32     quirks;
+diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
+index fc5895f85cee2..a552bb1665b8a 100644
+--- a/drivers/net/usb/kalmia.c
++++ b/drivers/net/usb/kalmia.c
+@@ -65,8 +65,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 
init_msg_len,
+               init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT);
+       if (status != 0) {
+               netdev_err(dev->net,
+-                      "Error sending init packet. Status %i, length %i\n",
+-                      status, act_len);
++                      "Error sending init packet. Status %i\n",
++                      status);
+               return status;
+       }
+       else if (act_len != init_msg_len) {
+@@ -83,8 +83,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 
init_msg_len,
+ 
+       if (status != 0)
+               netdev_err(dev->net,
+-                      "Error receiving init result. Status %i, length %i\n",
+-                      status, act_len);
++                      "Error receiving init result. Status %i\n",
++                      status);
+       else if (act_len != expected_len)
+               netdev_err(dev->net, "Unexpected init result length: %i\n",
+                       act_len);
+diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
+index c43bc5e1c7a28..00a2a591f5c1f 100644
+--- a/drivers/nvme/target/fc.c
++++ b/drivers/nvme/target/fc.c
+@@ -1685,8 +1685,10 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport 
*tgtport,
+               else {
+                       queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
+                                       be16_to_cpu(rqst->assoc_cmd.sqsize));
+-                      if (!queue)
++                      if (!queue) {
+                               ret = VERR_QUEUE_ALLOC_FAIL;
++                              nvmet_fc_tgt_a_put(iod->assoc);
++                      }
+               }
+       }
+ 
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index ee86022c4f2b8..47c1487dcf8cc 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -768,14 +768,19 @@ struct nvmem_device *nvmem_register(const struct 
nvmem_config *config)
+ 
+       nvmem->id = rval;
+ 
++      nvmem->dev.type = &nvmem_provider_type;
++      nvmem->dev.bus = &nvmem_bus_type;
++      nvmem->dev.parent = config->dev;
++
++      device_initialize(&nvmem->dev);
++
+       if (!config->ignore_wp)
+               nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
+                                                   GPIOD_OUT_HIGH);
+       if (IS_ERR(nvmem->wp_gpio)) {
+-              ida_free(&nvmem_ida, nvmem->id);
+               rval = PTR_ERR(nvmem->wp_gpio);
+-              kfree(nvmem);
+-              return ERR_PTR(rval);
++              nvmem->wp_gpio = NULL;
++              goto err_put_device;
+       }
+ 
+       kref_init(&nvmem->refcnt);
+@@ -787,9 +792,6 @@ struct nvmem_device *nvmem_register(const struct 
nvmem_config *config)
+       nvmem->stride = config->stride ?: 1;
+       nvmem->word_size = config->word_size ?: 1;
+       nvmem->size = config->size;
+-      nvmem->dev.type = &nvmem_provider_type;
+-      nvmem->dev.bus = &nvmem_bus_type;
+-      nvmem->dev.parent = config->dev;
+       nvmem->root_only = config->root_only;
+       nvmem->priv = config->priv;
+       nvmem->type = config->type;
+@@ -804,18 +806,21 @@ struct nvmem_device *nvmem_register(const struct 
nvmem_config *config)
+ 
+       switch (config->id) {
+       case NVMEM_DEVID_NONE:
+-              dev_set_name(&nvmem->dev, "%s", config->name);
++              rval = dev_set_name(&nvmem->dev, "%s", config->name);
+               break;
+       case NVMEM_DEVID_AUTO:
+-              dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
++              rval = dev_set_name(&nvmem->dev, "%s%d", config->name, 
nvmem->id);
+               break;
+       default:
+-              dev_set_name(&nvmem->dev, "%s%d",
++              rval = dev_set_name(&nvmem->dev, "%s%d",
+                            config->name ? : "nvmem",
+                            config->name ? config->id : nvmem->id);
+               break;
+       }
+ 
++      if (rval)
++              goto err_put_device;
++
+       nvmem->read_only = device_property_present(config->dev, "read-only") ||
+                          config->read_only || !nvmem->reg_write;
+ 
+@@ -823,22 +828,16 @@ struct nvmem_device *nvmem_register(const struct 
nvmem_config *config)
+       nvmem->dev.groups = nvmem_dev_groups;
+ #endif
+ 
+-      dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
+-
+-      rval = device_register(&nvmem->dev);
+-      if (rval)
+-              goto err_put_device;
+-
+       if (nvmem->nkeepout) {
+               rval = nvmem_validate_keepouts(nvmem);
+               if (rval)
+-                      goto err_device_del;
++                      goto err_put_device;
+       }
+ 
+       if (config->compat) {
+               rval = nvmem_sysfs_setup_compat(nvmem, config);
+               if (rval)
+-                      goto err_device_del;
++                      goto err_put_device;
+       }
+ 
+       if (config->cells) {
+@@ -855,6 +854,12 @@ struct nvmem_device *nvmem_register(const struct 
nvmem_config *config)
+       if (rval)
+               goto err_remove_cells;
+ 
++      dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
++
++      rval = device_add(&nvmem->dev);
++      if (rval)
++              goto err_remove_cells;
++
+       blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
+ 
+       return nvmem;
+@@ -863,8 +868,6 @@ err_remove_cells:
+       nvmem_device_remove_all_cells(nvmem);
+       if (config->compat)
+               nvmem_sysfs_remove_compat(nvmem, config);
+-err_device_del:
+-      device_del(&nvmem->dev);
+ err_put_device:
+       put_device(&nvmem->dev);
+ 
+diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
+index 9da8835ba5a58..9e949ddcb1464 100644
+--- a/drivers/of/of_reserved_mem.c
++++ b/drivers/of/of_reserved_mem.c
+@@ -47,9 +47,10 @@ static int __init 
early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
+               err = memblock_mark_nomap(base, size);
+               if (err)
+                       memblock_free(base, size);
+-              kmemleak_ignore_phys(base);
+       }
+ 
++      kmemleak_ignore_phys(base);
++
+       return err;
+ }
+ 
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index f1ff003bb14bc..cd8146dbdd453 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -171,6 +171,7 @@ config ACER_WMI
+ config AMD_PMC
+       tristate "AMD SoC PMC driver"
+       depends on ACPI && PCI
++      select SERIO
+       help
+         The driver provides support for AMD Power Management Controller
+         primarily responsible for S2Idle transactions that are driven from
+diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
+index d3efd514614c0..83fea28bbb4f7 100644
+--- a/drivers/platform/x86/amd-pmc.c
++++ b/drivers/platform/x86/amd-pmc.c
+@@ -20,6 +20,7 @@
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ #include <linux/platform_device.h>
++#include <linux/serio.h>
+ #include <linux/suspend.h>
+ #include <linux/seq_file.h>
+ #include <linux/uaccess.h>
+@@ -29,6 +30,10 @@
+ #define AMD_PMC_REGISTER_RESPONSE     0x980
+ #define AMD_PMC_REGISTER_ARGUMENT     0x9BC
+ 
++/* PMC Scratch Registers */
++#define AMD_PMC_SCRATCH_REG_CZN               0x94
++#define AMD_PMC_SCRATCH_REG_YC                0xD14
++
+ /* Base address of SMU for mapping physical address to virtual address */
+ #define AMD_PMC_SMU_INDEX_ADDRESS     0xB8
+ #define AMD_PMC_SMU_INDEX_DATA                0xBC
+@@ -110,6 +115,11 @@ struct amd_pmc_dev {
+       u32 base_addr;
+       u32 cpu_id;
+       u32 active_ips;
++/* SMU version information */
++      u8 smu_program;
++      u8 major;
++      u8 minor;
++      u8 rev;
+       struct device *dev;
+       struct mutex lock; /* generic mutex lock */
+ #if IS_ENABLED(CONFIG_DEBUG_FS)
+@@ -147,6 +157,51 @@ struct smu_metrics {
+       u64 timecondition_notmet_totaltime[SOC_SUBSYSTEM_IP_MAX];
+ } __packed;
+ 
++static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev)
++{
++      int rc;
++      u32 val;
++
++      rc = amd_pmc_send_cmd(dev, 0, &val, SMU_MSG_GETSMUVERSION, 1);
++      if (rc)
++              return rc;
++
++      dev->smu_program = (val >> 24) & GENMASK(7, 0);
++      dev->major = (val >> 16) & GENMASK(7, 0);
++      dev->minor = (val >> 8) & GENMASK(7, 0);
++      dev->rev = (val >> 0) & GENMASK(7, 0);
++
++      dev_dbg(dev->dev, "SMU program %u version is %u.%u.%u\n",
++              dev->smu_program, dev->major, dev->minor, dev->rev);
++
++      return 0;
++}
++
++static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev,
++                               struct seq_file *s)
++{
++      u32 val;
++
++      switch (pdev->cpu_id) {
++      case AMD_CPU_ID_CZN:
++              val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_CZN);
++              break;
++      case AMD_CPU_ID_YC:
++              val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC);
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      if (dev)
++              dev_dbg(pdev->dev, "SMU idlemask s0i3: 0x%x\n", val);
++
++      if (s)
++              seq_printf(s, "SMU idlemask : 0x%x\n", val);
++
++      return 0;
++}
++
+ #ifdef CONFIG_DEBUG_FS
+ static int smu_fw_info_show(struct seq_file *s, void *unused)
+ {
+@@ -201,6 +256,23 @@ static int s0ix_stats_show(struct seq_file *s, void 
*unused)
+ }
+ DEFINE_SHOW_ATTRIBUTE(s0ix_stats);
+ 
++static int amd_pmc_idlemask_show(struct seq_file *s, void *unused)
++{
++      struct amd_pmc_dev *dev = s->private;
++      int rc;
++
++      if (dev->major > 56 || (dev->major >= 55 && dev->minor >= 37)) {
++              rc = amd_pmc_idlemask_read(dev, NULL, s);
++              if (rc)
++                      return rc;
++      } else {
++              seq_puts(s, "Unsupported SMU version for Idlemask\n");
++      }
++
++      return 0;
++}
++DEFINE_SHOW_ATTRIBUTE(amd_pmc_idlemask);
++
+ static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
+ {
+       debugfs_remove_recursive(dev->dbgfs_dir);
+@@ -213,6 +285,8 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
+                           &smu_fw_info_fops);
+       debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev,
+                           &s0ix_stats_fops);
++      debugfs_create_file("amd_pmc_idlemask", 0644, dev->dbgfs_dir, dev,
++                          &amd_pmc_idlemask_fops);
+ }
+ #else
+ static inline void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
+@@ -339,16 +413,54 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
+       return -EINVAL;
+ }
+ 
++static int amd_pmc_czn_wa_irq1(struct amd_pmc_dev *pdev)
++{
++      struct device *d;
++      int rc;
++
++      if (!pdev->major) {
++              rc = amd_pmc_get_smu_version(pdev);
++              if (rc)
++                      return rc;
++      }
++
++      if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65))
++              return 0;
++
++      d = bus_find_device_by_name(&serio_bus, NULL, "serio0");
++      if (!d)
++              return 0;
++      if (device_may_wakeup(d)) {
++              dev_info_once(d, "Disabling IRQ1 wakeup source to avoid 
platform firmware bug\n");
++              disable_irq_wake(1);
++              device_set_wakeup_enable(d, false);
++      }
++      put_device(d);
++
++      return 0;
++}
++
+ static int __maybe_unused amd_pmc_suspend(struct device *dev)
+ {
+       struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+       int rc;
+       u8 msg;
+ 
++      if (pdev->cpu_id == AMD_CPU_ID_CZN) {
++              int rc = amd_pmc_czn_wa_irq1(pdev);
++
++              if (rc) {
++                      dev_err(pdev->dev, "failed to adjust keyboard wakeup: 
%d\n", rc);
++                      return rc;
++              }
++      }
++
+       /* Reset and Start SMU logging - to monitor the s0i3 stats */
+       amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_RESET, 0);
+       amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_START, 0);
+ 
++      /* Dump the IdleMask before we send hint to SMU */
++      amd_pmc_idlemask_read(pdev, dev, NULL);
+       msg = amd_pmc_get_os_hint(pdev);
+       rc = amd_pmc_send_cmd(pdev, 1, NULL, msg, 0);
+       if (rc)
+@@ -371,6 +483,9 @@ static int __maybe_unused amd_pmc_resume(struct device 
*dev)
+       if (rc)
+               dev_err(pdev->dev, "resume failed\n");
+ 
++      /* Dump the IdleMask to see the blockers */
++      amd_pmc_idlemask_read(pdev, dev, NULL);
++
+       return 0;
+ }
+ 
+@@ -458,6 +573,7 @@ static int amd_pmc_probe(struct platform_device *pdev)
+       if (err)
+               dev_err(dev->dev, "SMU debugging info not supported on this 
platform\n");
+ 
++      amd_pmc_get_smu_version(dev);
+       platform_set_drvdata(pdev, dev);
+       amd_pmc_dbgfs_register(dev);
+       return 0;
+diff --git a/drivers/platform/x86/touchscreen_dmi.c 
b/drivers/platform/x86/touchscreen_dmi.c
+index 93671037fd598..69ba2c5182610 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -1073,6 +1073,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+                       DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"),
+               },
+       },
++      {
++              /* Chuwi Vi8 (CWI501) */
++              .driver_data = (void *)&chuwi_vi8_data,
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "i86"),
++                      DMI_MATCH(DMI_BIOS_VERSION, "CHUWI.W86JLBNR01"),
++              },
++      },
+       {
+               /* Chuwi Vi8 (CWI506) */
+               .driver_data = (void *)&chuwi_vi8_data,
+diff --git a/fs/aio.c b/fs/aio.c
+index 1a78979663dca..e88fd9b58f3f1 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -334,6 +334,9 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
+       spin_lock(&mm->ioctx_lock);
+       rcu_read_lock();
+       table = rcu_dereference(mm->ioctx_table);
++      if (!table)
++              goto out_unlock;
++
+       for (i = 0; i < table->nr; i++) {
+               struct kioctx *ctx;
+ 
+@@ -347,6 +350,7 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
+               }
+       }
+ 
++out_unlock:
+       rcu_read_unlock();
+       spin_unlock(&mm->ioctx_lock);
+       return res;
+diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
+index 640ac8fe891e6..a8509f364bf5c 100644
+--- a/fs/nilfs2/ioctl.c
++++ b/fs/nilfs2/ioctl.c
+@@ -1114,7 +1114,14 @@ static int nilfs_ioctl_set_alloc_range(struct inode 
*inode, void __user *argp)
+ 
+       minseg = range[0] + segbytes - 1;
+       do_div(minseg, segbytes);
++
++      if (range[1] < 4096)
++              goto out;
++
+       maxseg = NILFS_SB2_OFFSET_BYTES(range[1]);
++      if (maxseg < segbytes)
++              goto out;
++
+       do_div(maxseg, segbytes);
+       maxseg--;
+ 
+diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
+index 663e68c22db88..bc96900262254 100644
+--- a/fs/nilfs2/super.c
++++ b/fs/nilfs2/super.c
+@@ -408,6 +408,15 @@ int nilfs_resize_fs(struct super_block *sb, __u64 newsize)
+       if (newsize > devsize)
+               goto out;
+ 
++      /*
++       * Prevent underflow in second superblock position calculation.
++       * The exact minimum size check is done in nilfs_sufile_resize().
++       */
++      if (newsize < 4096) {
++              ret = -ENOSPC;
++              goto out;
++      }
++
+       /*
+        * Write lock is required to protect some functions depending
+        * on the number of segments, the number of reserved segments,
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index 6aa6cef0757f1..1068ff40077ca 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -544,9 +544,15 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
+ {
+       struct nilfs_super_block **sbp = nilfs->ns_sbp;
+       struct buffer_head **sbh = nilfs->ns_sbh;
+-      u64 sb2off = NILFS_SB2_OFFSET_BYTES(nilfs->ns_bdev->bd_inode->i_size);
++      u64 sb2off, devsize = nilfs->ns_bdev->bd_inode->i_size;
+       int valid[2], swp = 0;
+ 
++      if (devsize < NILFS_SEG_MIN_BLOCKS * NILFS_MIN_BLOCK_SIZE + 4096) {
++              nilfs_err(sb, "device size too small");
++              return -EINVAL;
++      }
++      sb2off = NILFS_SB2_OFFSET_BYTES(devsize);
++
+       sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize,
+                                       &sbh[0]);
+       sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]);
+diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c
+index b88d19e9581e9..c8469c656e0dc 100644
+--- a/fs/squashfs/xattr_id.c
++++ b/fs/squashfs/xattr_id.c
+@@ -76,7 +76,7 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, 
u64 table_start,
+       /* Sanity check values */
+ 
+       /* there is always at least one xattr id */
+-      if (*xattr_ids <= 0)
++      if (*xattr_ids == 0)
+               return ERR_PTR(-EINVAL);
+ 
+       len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
+diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
+index 005abfd9fd347..aff6fb5281f63 100644
+--- a/fs/xfs/libxfs/xfs_ag.c
++++ b/fs/xfs/libxfs/xfs_ag.c
+@@ -173,7 +173,6 @@ __xfs_free_perag(
+       struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
+ 
+       ASSERT(!delayed_work_pending(&pag->pag_blockgc_work));
+-      ASSERT(atomic_read(&pag->pag_ref) == 0);
+       kmem_free(pag);
+ }
+ 
+@@ -192,7 +191,7 @@ xfs_free_perag(
+               pag = radix_tree_delete(&mp->m_perag_tree, agno);
+               spin_unlock(&mp->m_perag_lock);
+               ASSERT(pag);
+-              ASSERT(atomic_read(&pag->pag_ref) == 0);
++              XFS_IS_CORRUPT(pag->pag_mount, atomic_read(&pag->pag_ref) != 0);
+ 
+               cancel_delayed_work_sync(&pag->pag_blockgc_work);
+               xfs_iunlink_destroy(pag);
+diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
+index 2983954817135..dffe4ca584935 100644
+--- a/fs/xfs/libxfs/xfs_btree.c
++++ b/fs/xfs/libxfs/xfs_btree.c
+@@ -51,6 +51,71 @@ xfs_btree_magic(
+       return magic;
+ }
+ 
++/*
++ * These sibling pointer checks are optimised for null sibling pointers. This
++ * happens a lot, and we don't need to byte swap at runtime if the sibling
++ * pointer is NULL.
++ *
++ * These are explicitly marked at inline because the cost of calling them as
++ * functions instead of inlining them is about 36 bytes extra code per call 
site
++ * on x86-64. Yes, gcc-11 fails to inline them, and explicit inlining of these
++ * two sibling check functions reduces the compiled code size by over 300
++ * bytes.
++ */
++static inline xfs_failaddr_t
++xfs_btree_check_lblock_siblings(
++      struct xfs_mount        *mp,
++      struct xfs_btree_cur    *cur,
++      int                     level,
++      xfs_fsblock_t           fsb,
++      __be64                  dsibling)
++{
++      xfs_fsblock_t           sibling;
++
++      if (dsibling == cpu_to_be64(NULLFSBLOCK))
++              return NULL;
++
++      sibling = be64_to_cpu(dsibling);
++      if (sibling == fsb)
++              return __this_address;
++      if (level >= 0) {
++              if (!xfs_btree_check_lptr(cur, sibling, level + 1))
++                      return __this_address;
++      } else {
++              if (!xfs_verify_fsbno(mp, sibling))
++                      return __this_address;
++      }
++
++      return NULL;
++}
++
++static inline xfs_failaddr_t
++xfs_btree_check_sblock_siblings(
++      struct xfs_mount        *mp,
++      struct xfs_btree_cur    *cur,
++      int                     level,
++      xfs_agnumber_t          agno,
++      xfs_agblock_t           agbno,
++      __be32                  dsibling)
++{
++      xfs_agblock_t           sibling;
++
++      if (dsibling == cpu_to_be32(NULLAGBLOCK))
++              return NULL;
++
++      sibling = be32_to_cpu(dsibling);
++      if (sibling == agbno)
++              return __this_address;
++      if (level >= 0) {
++              if (!xfs_btree_check_sptr(cur, sibling, level + 1))
++                      return __this_address;
++      } else {
++              if (!xfs_verify_agbno(mp, agno, sibling))
++                      return __this_address;
++      }
++      return NULL;
++}
++
+ /*
+  * Check a long btree block header.  Return the address of the failing check,
+  * or NULL if everything is ok.
+@@ -65,6 +130,8 @@ __xfs_btree_check_lblock(
+       struct xfs_mount        *mp = cur->bc_mp;
+       xfs_btnum_t             btnum = cur->bc_btnum;
+       int                     crc = xfs_has_crc(mp);
++      xfs_failaddr_t          fa;
++      xfs_fsblock_t           fsb = NULLFSBLOCK;
+ 
+       if (crc) {
+               if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
+@@ -83,16 +150,16 @@ __xfs_btree_check_lblock(
+       if (be16_to_cpu(block->bb_numrecs) >
+           cur->bc_ops->get_maxrecs(cur, level))
+               return __this_address;
+-      if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) &&
+-          !xfs_btree_check_lptr(cur, be64_to_cpu(block->bb_u.l.bb_leftsib),
+-                      level + 1))
+-              return __this_address;
+-      if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) &&
+-          !xfs_btree_check_lptr(cur, be64_to_cpu(block->bb_u.l.bb_rightsib),
+-                      level + 1))
+-              return __this_address;
+ 
+-      return NULL;
++      if (bp)
++              fsb = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
++
++      fa = xfs_btree_check_lblock_siblings(mp, cur, level, fsb,
++                      block->bb_u.l.bb_leftsib);
++      if (!fa)
++              fa = xfs_btree_check_lblock_siblings(mp, cur, level, fsb,
++                              block->bb_u.l.bb_rightsib);
++      return fa;
+ }
+ 
+ /* Check a long btree block header. */
+@@ -130,6 +197,9 @@ __xfs_btree_check_sblock(
+       struct xfs_mount        *mp = cur->bc_mp;
+       xfs_btnum_t             btnum = cur->bc_btnum;
+       int                     crc = xfs_has_crc(mp);
++      xfs_failaddr_t          fa;
++      xfs_agblock_t           agbno = NULLAGBLOCK;
++      xfs_agnumber_t          agno = NULLAGNUMBER;
+ 
+       if (crc) {
+               if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
+@@ -146,16 +216,18 @@ __xfs_btree_check_sblock(
+       if (be16_to_cpu(block->bb_numrecs) >
+           cur->bc_ops->get_maxrecs(cur, level))
+               return __this_address;
+-      if (block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK) &&
+-          !xfs_btree_check_sptr(cur, be32_to_cpu(block->bb_u.s.bb_leftsib),
+-                      level + 1))
+-              return __this_address;
+-      if (block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK) &&
+-          !xfs_btree_check_sptr(cur, be32_to_cpu(block->bb_u.s.bb_rightsib),
+-                      level + 1))
+-              return __this_address;
+ 
+-      return NULL;
++      if (bp) {
++              agbno = xfs_daddr_to_agbno(mp, xfs_buf_daddr(bp));
++              agno = xfs_daddr_to_agno(mp, xfs_buf_daddr(bp));
++      }
++
++      fa = xfs_btree_check_sblock_siblings(mp, cur, level, agno, agbno,
++                      block->bb_u.s.bb_leftsib);
++      if (!fa)
++              fa = xfs_btree_check_sblock_siblings(mp, cur, level, agno,
++                               agbno, block->bb_u.s.bb_rightsib);
++      return fa;
+ }
+ 
+ /* Check a short btree block header. */
+@@ -373,8 +445,14 @@ xfs_btree_del_cursor(
+                       break;
+       }
+ 
++      /*
++       * If we are doing a BMBT update, the number of unaccounted blocks
++       * allocated during this cursor life time should be zero. If it's not
++       * zero, then we should be shut down or on our way to shutdown due to
++       * cancelling a dirty transaction on error.
++       */
+       ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || cur->bc_ino.allocated == 0 ||
+-             xfs_is_shutdown(cur->bc_mp));
++             xfs_is_shutdown(cur->bc_mp) || error != 0);
+       if (unlikely(cur->bc_flags & XFS_BTREE_STAGING))
+               kmem_free(cur->bc_ops);
+       if (!(cur->bc_flags & XFS_BTREE_LONG_PTRS) && cur->bc_ag.pag)
+@@ -3188,7 +3266,7 @@ xfs_btree_insrec(
+       struct xfs_btree_block  *block; /* btree block */
+       struct xfs_buf          *bp;    /* buffer for block */
+       union xfs_btree_ptr     nptr;   /* new block ptr */
+-      struct xfs_btree_cur    *ncur;  /* new btree cursor */
++      struct xfs_btree_cur    *ncur = NULL;   /* new btree cursor */
+       union xfs_btree_key     nkey;   /* new block key */
+       union xfs_btree_key     *lkey;
+       int                     optr;   /* old key/record index */
+@@ -3268,7 +3346,7 @@ xfs_btree_insrec(
+ #ifdef DEBUG
+       error = xfs_btree_check_block(cur, block, level, bp);
+       if (error)
+-              return error;
++              goto error0;
+ #endif
+ 
+       /*
+@@ -3288,7 +3366,7 @@ xfs_btree_insrec(
+               for (i = numrecs - ptr; i >= 0; i--) {
+                       error = xfs_btree_debug_check_ptr(cur, pp, i, level);
+                       if (error)
+-                              return error;
++                              goto error0;
+               }
+ 
+               xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1);
+@@ -3373,6 +3451,8 @@ xfs_btree_insrec(
+       return 0;
+ 
+ error0:
++      if (ncur)
++              xfs_btree_del_cursor(ncur, error);
+       return error;
+ }
+ 
+@@ -4265,6 +4345,21 @@ xfs_btree_visit_block(
+       if (xfs_btree_ptr_is_null(cur, &rptr))
+               return -ENOENT;
+ 
++      /*
++       * We only visit blocks once in this walk, so we have to avoid the
++       * internal xfs_btree_lookup_get_block() optimisation where it will
++       * return the same block without checking if the right sibling points
++       * back to us and creates a cyclic reference in the btree.
++       */
++      if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
++              if (be64_to_cpu(rptr.l) == XFS_DADDR_TO_FSB(cur->bc_mp,
++                                                      xfs_buf_daddr(bp)))
++                      return -EFSCORRUPTED;
++      } else {
++              if (be32_to_cpu(rptr.s) == xfs_daddr_to_agbno(cur->bc_mp,
++                                                      xfs_buf_daddr(bp)))
++                      return -EFSCORRUPTED;
++      }
+       return xfs_btree_lookup_get_block(cur, level, &rptr, &block);
+ }
+ 
+@@ -4439,20 +4534,21 @@ xfs_btree_lblock_verify(
+ {
+       struct xfs_mount        *mp = bp->b_mount;
+       struct xfs_btree_block  *block = XFS_BUF_TO_BLOCK(bp);
++      xfs_fsblock_t           fsb;
++      xfs_failaddr_t          fa;
+ 
+       /* numrecs verification */
+       if (be16_to_cpu(block->bb_numrecs) > max_recs)
+               return __this_address;
+ 
+       /* sibling pointer verification */
+-      if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) &&
+-          !xfs_verify_fsbno(mp, be64_to_cpu(block->bb_u.l.bb_leftsib)))
+-              return __this_address;
+-      if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) &&
+-          !xfs_verify_fsbno(mp, be64_to_cpu(block->bb_u.l.bb_rightsib)))
+-              return __this_address;
+-
+-      return NULL;
++      fsb = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
++      fa = xfs_btree_check_lblock_siblings(mp, NULL, -1, fsb,
++                      block->bb_u.l.bb_leftsib);
++      if (!fa)
++              fa = xfs_btree_check_lblock_siblings(mp, NULL, -1, fsb,
++                              block->bb_u.l.bb_rightsib);
++      return fa;
+ }
+ 
+ /**
+@@ -4493,7 +4589,9 @@ xfs_btree_sblock_verify(
+ {
+       struct xfs_mount        *mp = bp->b_mount;
+       struct xfs_btree_block  *block = XFS_BUF_TO_BLOCK(bp);
+-      xfs_agblock_t           agno;
++      xfs_agnumber_t          agno;
++      xfs_agblock_t           agbno;
++      xfs_failaddr_t          fa;
+ 
+       /* numrecs verification */
+       if (be16_to_cpu(block->bb_numrecs) > max_recs)
+@@ -4501,14 +4599,13 @@ xfs_btree_sblock_verify(
+ 
+       /* sibling pointer verification */
+       agno = xfs_daddr_to_agno(mp, xfs_buf_daddr(bp));
+-      if (block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK) &&
+-          !xfs_verify_agbno(mp, agno, be32_to_cpu(block->bb_u.s.bb_leftsib)))
+-              return __this_address;
+-      if (block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK) &&
+-          !xfs_verify_agbno(mp, agno, be32_to_cpu(block->bb_u.s.bb_rightsib)))
+-              return __this_address;
+-
+-      return NULL;
++      agbno = xfs_daddr_to_agbno(mp, xfs_buf_daddr(bp));
++      fa = xfs_btree_check_sblock_siblings(mp, NULL, -1, agno, agbno,
++                      block->bb_u.s.bb_leftsib);
++      if (!fa)
++              fa = xfs_btree_check_sblock_siblings(mp, NULL, -1, agno, agbno,
++                              block->bb_u.s.bb_rightsib);
++      return fa;
+ }
+ 
+ /*
+diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
+index 1d174909f9bdf..20095233d7bc0 100644
+--- a/fs/xfs/libxfs/xfs_inode_fork.c
++++ b/fs/xfs/libxfs/xfs_inode_fork.c
+@@ -50,8 +50,13 @@ xfs_init_local_fork(
+               mem_size++;
+ 
+       if (size) {
++              /*
++               * As we round up the allocation here, we need to ensure the
++               * bytes we don't copy data into are zeroed because the log
++               * vectors still copy them into the journal.
++               */
+               real_size = roundup(mem_size, 4);
+-              ifp->if_u1.if_data = kmem_alloc(real_size, KM_NOFS);
++              ifp->if_u1.if_data = kmem_zalloc(real_size, KM_NOFS);
+               memcpy(ifp->if_u1.if_data, data, size);
+               if (zero_terminate)
+                       ifp->if_u1.if_data[size] = '\0';
+@@ -500,10 +505,11 @@ xfs_idata_realloc(
+       /*
+        * For inline data, the underlying buffer must be a multiple of 4 bytes
+        * in size so that it can be logged and stay on word boundaries.
+-       * We enforce that here.
++       * We enforce that here, and use __GFP_ZERO to ensure that size
++       * extensions always zero the unused roundup area.
+        */
+       ifp->if_u1.if_data = krealloc(ifp->if_u1.if_data, roundup(new_size, 4),
+-                                    GFP_NOFS | __GFP_NOFAIL);
++                                    GFP_NOFS | __GFP_NOFAIL | __GFP_ZERO);
+       ifp->if_bytes = new_size;
+ }
+ 
+diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
+index e58349be78bd5..04e2a57313fa0 100644
+--- a/fs/xfs/libxfs/xfs_sb.c
++++ b/fs/xfs/libxfs/xfs_sb.c
+@@ -30,6 +30,47 @@
+  * Physical superblock buffer manipulations. Shared with libxfs in userspace.
+  */
+ 
++/*
++ * Check that all the V4 feature bits that the V5 filesystem format requires 
are
++ * correctly set.
++ */
++static bool
++xfs_sb_validate_v5_features(
++      struct xfs_sb   *sbp)
++{
++      /* We must not have any unknown V4 feature bits set */
++      if (sbp->sb_versionnum & ~XFS_SB_VERSION_OKBITS)
++              return false;
++
++      /*
++       * The CRC bit is considered an invalid V4 flag, so we have to add it
++       * manually to the OKBITS mask.
++       */
++      if (sbp->sb_features2 & ~(XFS_SB_VERSION2_OKBITS |
++                                XFS_SB_VERSION2_CRCBIT))
++              return false;
++
++      /* Now check all the required V4 feature flags are set. */
++
++#define V5_VERS_FLAGS (XFS_SB_VERSION_NLINKBIT        | \
++                      XFS_SB_VERSION_ALIGNBIT         | \
++                      XFS_SB_VERSION_LOGV2BIT         | \
++                      XFS_SB_VERSION_EXTFLGBIT        | \
++                      XFS_SB_VERSION_DIRV2BIT         | \
++                      XFS_SB_VERSION_MOREBITSBIT)
++
++#define V5_FEAT_FLAGS (XFS_SB_VERSION2_LAZYSBCOUNTBIT | \
++                      XFS_SB_VERSION2_ATTR2BIT        | \
++                      XFS_SB_VERSION2_PROJID32BIT     | \
++                      XFS_SB_VERSION2_CRCBIT)
++
++      if ((sbp->sb_versionnum & V5_VERS_FLAGS) != V5_VERS_FLAGS)
++              return false;
++      if ((sbp->sb_features2 & V5_FEAT_FLAGS) != V5_FEAT_FLAGS)
++              return false;
++      return true;
++}
++
+ /*
+  * We support all XFS versions newer than a v4 superblock with V2 directories.
+  */
+@@ -37,9 +78,19 @@ bool
+ xfs_sb_good_version(
+       struct xfs_sb   *sbp)
+ {
+-      /* all v5 filesystems are supported */
++      /*
++       * All v5 filesystems are supported, but we must check that all the
++       * required v4 feature flags are enabled correctly as the code checks
++       * those flags and not for v5 support.
++       */
+       if (xfs_sb_is_v5(sbp))
+-              return true;
++              return xfs_sb_validate_v5_features(sbp);
++
++      /* We must not have any unknown v4 feature bits set */
++      if ((sbp->sb_versionnum & ~XFS_SB_VERSION_OKBITS) ||
++          ((sbp->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT) &&
++           (sbp->sb_features2 & ~XFS_SB_VERSION2_OKBITS)))
++              return false;
+ 
+       /* versions prior to v4 are not supported */
+       if (XFS_SB_VERSION_NUM(sbp) < XFS_SB_VERSION_4)
+@@ -51,12 +102,6 @@ xfs_sb_good_version(
+       if (!(sbp->sb_versionnum & XFS_SB_VERSION_EXTFLGBIT))
+               return false;
+ 
+-      /* And must not have any unknown v4 feature bits set */
+-      if ((sbp->sb_versionnum & ~XFS_SB_VERSION_OKBITS) ||
+-          ((sbp->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT) &&
+-           (sbp->sb_features2 & ~XFS_SB_VERSION2_OKBITS)))
+-              return false;
+-
+       /* It's a supported v4 filesystem */
+       return true;
+ }
+@@ -70,6 +115,8 @@ xfs_sb_version_to_features(
+       /* optional V4 features */
+       if (sbp->sb_rblocks > 0)
+               features |= XFS_FEAT_REALTIME;
++      if (sbp->sb_versionnum & XFS_SB_VERSION_NLINKBIT)
++              features |= XFS_FEAT_NLINK;
+       if (sbp->sb_versionnum & XFS_SB_VERSION_ATTRBIT)
+               features |= XFS_FEAT_ATTR;
+       if (sbp->sb_versionnum & XFS_SB_VERSION_QUOTABIT)
+@@ -262,12 +309,15 @@ xfs_validate_sb_common(
+       bool                    has_dalign;
+ 
+       if (!xfs_verify_magic(bp, dsb->sb_magicnum)) {
+-              xfs_warn(mp, "bad magic number");
++              xfs_warn(mp,
++"Superblock has bad magic number 0x%x. Not an XFS filesystem?",
++                      be32_to_cpu(dsb->sb_magicnum));
+               return -EWRONGFS;
+       }
+ 
+       if (!xfs_sb_good_version(sbp)) {
+-              xfs_warn(mp, "bad version");
++              xfs_warn(mp,
++"Superblock has unknown features enabled or corrupted feature masks.");
+               return -EWRONGFS;
+       }
+ 
+diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
+index 03159970133ff..51ffdec5e4faa 100644
+--- a/fs/xfs/xfs_bmap_item.c
++++ b/fs/xfs/xfs_bmap_item.c
+@@ -39,6 +39,7 @@ STATIC void
+ xfs_bui_item_free(
+       struct xfs_bui_log_item *buip)
+ {
++      kmem_free(buip->bui_item.li_lv_shadow);
+       kmem_cache_free(xfs_bui_zone, buip);
+ }
+ 
+@@ -198,6 +199,7 @@ xfs_bud_item_release(
+       struct xfs_bud_log_item *budp = BUD_ITEM(lip);
+ 
+       xfs_bui_release(budp->bud_buip);
++      kmem_free(budp->bud_item.li_lv_shadow);
+       kmem_cache_free(xfs_bud_zone, budp);
+ }
+ 
+diff --git a/fs/xfs/xfs_icreate_item.c b/fs/xfs/xfs_icreate_item.c
+index 017904a34c023..c265ae20946d5 100644
+--- a/fs/xfs/xfs_icreate_item.c
++++ b/fs/xfs/xfs_icreate_item.c
+@@ -63,6 +63,7 @@ STATIC void
+ xfs_icreate_item_release(
+       struct xfs_log_item     *lip)
+ {
++      kmem_free(ICR_ITEM(lip)->ic_item.li_lv_shadow);
+       kmem_cache_free(xfs_icreate_zone, ICR_ITEM(lip));
+ }
+ 
+diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
+index 5608066d6e539..623244650a2f0 100644
+--- a/fs/xfs/xfs_qm.c
++++ b/fs/xfs/xfs_qm.c
+@@ -1317,8 +1317,15 @@ xfs_qm_quotacheck(
+ 
+       error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
+                       NULL);
+-      if (error)
++      if (error) {
++              /*
++               * The inode walk may have partially populated the dquot
++               * caches.  We must purge them before disabling quota and
++               * tearing down the quotainfo, or else the dquots will leak.
++               */
++              xfs_qm_dqpurge_all(mp);
+               goto error_return;
++      }
+ 
+       /*
+        * We've made all the changes that we need to make incore.  Flush them
+diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
+index 46904b793bd48..8ef842d17916a 100644
+--- a/fs/xfs/xfs_refcount_item.c
++++ b/fs/xfs/xfs_refcount_item.c
+@@ -35,6 +35,7 @@ STATIC void
+ xfs_cui_item_free(
+       struct xfs_cui_log_item *cuip)
+ {
++      kmem_free(cuip->cui_item.li_lv_shadow);
+       if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
+               kmem_free(cuip);
+       else
+@@ -204,6 +205,7 @@ xfs_cud_item_release(
+       struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
+ 
+       xfs_cui_release(cudp->cud_cuip);
++      kmem_free(cudp->cud_item.li_lv_shadow);
+       kmem_cache_free(xfs_cud_zone, cudp);
+ }
+ 
+diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c
+index 5f06959804678..15e7b01740a77 100644
+--- a/fs/xfs/xfs_rmap_item.c
++++ b/fs/xfs/xfs_rmap_item.c
+@@ -35,6 +35,7 @@ STATIC void
+ xfs_rui_item_free(
+       struct xfs_rui_log_item *ruip)
+ {
++      kmem_free(ruip->rui_item.li_lv_shadow);
+       if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
+               kmem_free(ruip);
+       else
+@@ -227,6 +228,7 @@ xfs_rud_item_release(
+       struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
+ 
+       xfs_rui_release(rudp->rud_ruip);
++      kmem_free(rudp->rud_item.li_lv_shadow);
+       kmem_cache_free(xfs_rud_zone, rudp);
+ }
+ 
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index 6224b1e32681c..2d7df5cea2494 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -1005,7 +1005,15 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 
sleep_state,
+ 
+ acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
+                                          u32 val_a, u32 val_b);
+-
++#ifdef CONFIG_X86
++struct acpi_s2idle_dev_ops {
++      struct list_head list_node;
++      void (*prepare)(void);
++      void (*restore)(void);
++};
++int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg);
++void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg);
++#endif /* CONFIG_X86 */
+ #ifndef CONFIG_IA64
+ void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
+ #else
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index f98d747f983b9..4ede8df5818e1 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -684,7 +684,10 @@ static inline struct hstate *hstate_sizelog(int 
page_size_log)
+       if (!page_size_log)
+               return &default_hstate;
+ 
+-      return size_to_hstate(1UL << page_size_log);
++      if (page_size_log < BITS_PER_LONG)
++              return size_to_hstate(1UL << page_size_log);
++
++      return NULL;
+ }
+ 
+ static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
+diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
+index 48d015ed21752..cc338c6c74954 100644
+--- a/include/linux/stmmac.h
++++ b/include/linux/stmmac.h
+@@ -251,6 +251,7 @@ struct plat_stmmacenet_data {
+       int rss_en;
+       int mac_port_sel_speed;
+       bool en_tx_lpi_clockgating;
++      bool rx_clk_runs_in_lpi;
+       int has_xgmac;
+       bool vlan_fail_q_en;
+       u8 vlan_fail_q;
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 3e9db5146765f..cd6f2ae28ecf2 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2315,6 +2315,19 @@ static inline __must_check bool 
skb_set_owner_sk_safe(struct sk_buff *skb, struc
+       return false;
+ }
+ 
++static inline struct sk_buff *skb_clone_and_charge_r(struct sk_buff *skb, 
struct sock *sk)
++{
++      skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
++      if (skb) {
++              if (sk_rmem_schedule(sk, skb, skb->truesize)) {
++                      skb_set_owner_r(skb, sk);
++                      return skb;
++              }
++              __kfree_skb(skb);
++      }
++      return NULL;
++}
++
+ static inline void skb_prepare_for_gro(struct sk_buff *skb)
+ {
+       if (skb->destructor != sock_wfree) {
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 23af2f8e8563e..8818f3a89fef3 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -18,6 +18,9 @@
+  *            <[email protected]> and Prasanna S Panchamukhi
+  *            <[email protected]> added function-return probes.
+  */
++
++#define pr_fmt(fmt) "kprobes: " fmt
++
+ #include <linux/kprobes.h>
+ #include <linux/hash.h>
+ #include <linux/init.h>
+@@ -892,7 +895,7 @@ static void optimize_all_kprobes(void)
+                               optimize_kprobe(p);
+       }
+       cpus_read_unlock();
+-      printk(KERN_INFO "Kprobes globally optimized\n");
++      pr_info("kprobe jump-optimization is enabled. All kprobes are optimized 
if possible.\n");
+ out:
+       mutex_unlock(&kprobe_mutex);
+ }
+@@ -925,7 +928,7 @@ static void unoptimize_all_kprobes(void)
+ 
+       /* Wait for unoptimizing completion */
+       wait_for_kprobe_optimizer();
+-      printk(KERN_INFO "Kprobes globally unoptimized\n");
++      pr_info("kprobe jump-optimization is disabled. All kprobes are based on 
software breakpoint.\n");
+ }
+ 
+ static DEFINE_MUTEX(kprobe_sysctl_mutex);
+@@ -1003,7 +1006,7 @@ static int reuse_unused_kprobe(struct kprobe *ap)
+        * unregistered.
+        * Thus there should be no chance to reuse unused kprobe.
+        */
+-      printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
++      WARN_ON_ONCE(1);
+       return -EINVAL;
+ }
+ 
+@@ -1049,18 +1052,13 @@ static int __arm_kprobe_ftrace(struct kprobe *p, 
struct ftrace_ops *ops,
+       int ret = 0;
+ 
+       ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0);
+-      if (ret) {
+-              pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n",
+-                       p->addr, ret);
++      if (WARN_ONCE(ret < 0, "Failed to arm kprobe-ftrace at %pS (error 
%d)\n", p->addr, ret))
+               return ret;
+-      }
+ 
+       if (*cnt == 0) {
+               ret = register_ftrace_function(ops);
+-              if (ret) {
+-                      pr_debug("Failed to init kprobe-ftrace (%d)\n", ret);
++              if (WARN(ret < 0, "Failed to register kprobe-ftrace (error 
%d)\n", ret))
+                       goto err_ftrace;
+-              }
+       }
+ 
+       (*cnt)++;
+@@ -1092,14 +1090,14 @@ static int __disarm_kprobe_ftrace(struct kprobe *p, 
struct ftrace_ops *ops,
+ 
+       if (*cnt == 1) {
+               ret = unregister_ftrace_function(ops);
+-              if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", 
ret))
++              if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (error 
%d)\n", ret))
+                       return ret;
+       }
+ 
+       (*cnt)--;
+ 
+       ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
+-      WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n",
++      WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (error %d)\n",
+                 p->addr, ret);
+       return ret;
+ }
+@@ -1894,7 +1892,7 @@ unsigned long __kretprobe_trampoline_handler(struct 
pt_regs *regs,
+ 
+               node = node->next;
+       }
+-      pr_err("Oops! Kretprobe fails to find correct return address.\n");
++      pr_err("kretprobe: Return address not found, not execute handler. Maybe 
there is a bug in the kernel.\n");
+       BUG_ON(1);
+ 
+ found:
+@@ -2229,8 +2227,7 @@ EXPORT_SYMBOL_GPL(enable_kprobe);
+ /* Caller must NOT call this in usual path. This is only for critical case */
+ void dump_kprobe(struct kprobe *kp)
+ {
+-      pr_err("Dumping kprobe:\n");
+-      pr_err("Name: %s\nOffset: %x\nAddress: %pS\n",
++      pr_err("Dump kprobe:\n.symbol_name = %s, .offset = %x, .addr = %pS\n",
+              kp->symbol_name, kp->offset, kp->addr);
+ }
+ NOKPROBE_SYMBOL(dump_kprobe);
+@@ -2493,8 +2490,7 @@ static int __init init_kprobes(void)
+       err = populate_kprobe_blacklist(__start_kprobe_blacklist,
+                                       __stop_kprobe_blacklist);
+       if (err) {
+-              pr_err("kprobes: failed to populate blacklist: %d\n", err);
+-              pr_err("Please take care of using kprobes.\n");
++              pr_err("Failed to populate blacklist (error %d), kprobes not 
restricted, be careful using them!\n", err);
+       }
+ 
+       if (kretprobe_blacklist_size) {
+@@ -2503,7 +2499,7 @@ static int __init init_kprobes(void)
+                       kretprobe_blacklist[i].addr =
+                               kprobe_lookup_name(kretprobe_blacklist[i].name, 
0);
+                       if (!kretprobe_blacklist[i].addr)
+-                              printk("kretprobe: lookup failed: %s\n",
++                              pr_err("Failed to lookup symbol '%s' for 
kretprobe blacklist. Maybe the target function is removed or renamed.\n",
+                                      kretprobe_blacklist[i].name);
+               }
+       }
+@@ -2707,7 +2703,7 @@ static int arm_all_kprobes(void)
+       }
+ 
+       if (errors)
+-              pr_warn("Kprobes globally enabled, but failed to arm %d out of 
%d probes\n",
++              pr_warn("Kprobes globally enabled, but failed to enable %d out 
of %d probes. Please check which kprobes are kept disabled via debugfs.\n",
+                       errors, total);
+       else
+               pr_info("Kprobes globally enabled\n");
+@@ -2750,7 +2746,7 @@ static int disarm_all_kprobes(void)
+       }
+ 
+       if (errors)
+-              pr_warn("Kprobes globally disabled, but failed to disarm %d out 
of %d probes\n",
++              pr_warn("Kprobes globally disabled, but failed to disable %d 
out of %d probes. Please check which kprobes are kept enabled via debugfs.\n",
+                       errors, total);
+       else
+               pr_info("Kprobes globally disabled\n");
+diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
+index cad2a1b34ed04..fa88bf6ccce02 100644
+--- a/kernel/sched/psi.c
++++ b/kernel/sched/psi.c
+@@ -1169,10 +1169,11 @@ void psi_trigger_destroy(struct psi_trigger *t)
+ 
+       group = t->group;
+       /*
+-       * Wakeup waiters to stop polling. Can happen if cgroup is deleted
+-       * from under a polling process.
++       * Wakeup waiters to stop polling and clear the queue to prevent it from
++       * being accessed later. Can happen if cgroup is deleted from under a
++       * polling process.
+        */
+-      wake_up_interruptible(&t->event_wait);
++      wake_up_pollfree(&t->event_wait);
+ 
+       mutex_lock(&group->trigger_lock);
+ 
+diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
+index 5897828b9d7ed..7e5dff602585d 100644
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -470,11 +470,35 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, 
ktime_t interval)
+ }
+ EXPORT_SYMBOL_GPL(alarm_forward);
+ 
+-u64 alarm_forward_now(struct alarm *alarm, ktime_t interval)
++static u64 __alarm_forward_now(struct alarm *alarm, ktime_t interval, bool 
throttle)
+ {
+       struct alarm_base *base = &alarm_bases[alarm->type];
++      ktime_t now = base->get_ktime();
++
++      if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && throttle) {
++              /*
++               * Same issue as with posix_timer_fn(). Timers which are
++               * periodic but the signal is ignored can starve the system
++               * with a very small interval. The real fix which was
++               * promised in the context of posix_timer_fn() never
++               * materialized, but someone should really work on it.
++               *
++               * To prevent DOS fake @now to be 1 jiffie out which keeps
++               * the overrun accounting correct but creates an
++               * inconsistency vs. timer_gettime(2).
++               */
++              ktime_t kj = NSEC_PER_SEC / HZ;
++
++              if (interval < kj)
++                      now = ktime_add(now, kj);
++      }
++
++      return alarm_forward(alarm, now, interval);
++}
+ 
+-      return alarm_forward(alarm, base->get_ktime(), interval);
++u64 alarm_forward_now(struct alarm *alarm, ktime_t interval)
++{
++      return __alarm_forward_now(alarm, interval, false);
+ }
+ EXPORT_SYMBOL_GPL(alarm_forward_now);
+ 
+@@ -551,9 +575,10 @@ static enum alarmtimer_restart alarm_handle_timer(struct 
alarm *alarm,
+       if (posix_timer_event(ptr, si_private) && ptr->it_interval) {
+               /*
+                * Handle ignored signals and rearm the timer. This will go
+-               * away once we handle ignored signals proper.
++               * away once we handle ignored signals proper. Ensure that
++               * small intervals cannot starve the system.
+                */
+-              ptr->it_overrun += alarm_forward_now(alarm, ptr->it_interval);
++              ptr->it_overrun += __alarm_forward_now(alarm, ptr->it_interval, 
true);
+               ++ptr->it_requeue_pending;
+               ptr->it_active = 1;
+               result = ALARMTIMER_RESTART;
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 30c9c2a63746a..81e28722edfaf 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2538,18 +2538,19 @@ static int filemap_get_pages(struct kiocb *iocb, 
struct iov_iter *iter,
+       struct page *page;
+       int err = 0;
+ 
++      /* "last_index" is the index of the page beyond the end of the read */
+       last_index = DIV_ROUND_UP(iocb->ki_pos + iter->count, PAGE_SIZE);
+ retry:
+       if (fatal_signal_pending(current))
+               return -EINTR;
+ 
+-      filemap_get_read_batch(mapping, index, last_index, pvec);
++      filemap_get_read_batch(mapping, index, last_index - 1, pvec);
+       if (!pagevec_count(pvec)) {
+               if (iocb->ki_flags & IOCB_NOIO)
+                       return -EAGAIN;
+               page_cache_sync_readahead(mapping, ra, filp, index,
+                               last_index - index);
+-              filemap_get_read_batch(mapping, index, last_index, pvec);
++              filemap_get_read_batch(mapping, index, last_index - 1, pvec);
+       }
+       if (!pagevec_count(pvec)) {
+               if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
+diff --git a/mm/memblock.c b/mm/memblock.c
+index 838d59a74c65e..2b7397781c99a 100644
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -1615,13 +1615,7 @@ void __init __memblock_free_late(phys_addr_t base, 
phys_addr_t size)
+       end = PFN_DOWN(base + size);
+ 
+       for (; cursor < end; cursor++) {
+-              /*
+-               * Reserved pages are always initialized by the end of
+-               * memblock_free_all() (by memmap_init() and, if deferred
+-               * initialization is enabled, memmap_init_reserved_pages()), so
+-               * these pages can be released directly to the buddy allocator.
+-               */
+-              __free_pages_core(pfn_to_page(cursor), 0);
++              memblock_free_pages(pfn_to_page(cursor), cursor, 0);
+               totalram_pages_inc();
+       }
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 33d6b691e15ea..24a80e960d2d9 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -10646,7 +10646,7 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 
*stats64,
+ 
+       BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
+       for (i = 0; i < n; i++)
+-              dst[i] = atomic_long_read(&src[i]);
++              dst[i] = (unsigned long)atomic_long_read(&src[i]);
+       /* zero out counters that only exist in rtnl_link_stats64 */
+       memset((char *)stats64 + n * sizeof(u64), 0,
+              sizeof(*stats64) - n * sizeof(u64));
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index ae6013a8bce53..86b4e8909ad1e 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -1514,15 +1514,16 @@ void sock_map_unhash(struct sock *sk)
+       psock = sk_psock(sk);
+       if (unlikely(!psock)) {
+               rcu_read_unlock();
+-              if (sk->sk_prot->unhash)
+-                      sk->sk_prot->unhash(sk);
+-              return;
++              saved_unhash = READ_ONCE(sk->sk_prot)->unhash;
++      } else {
++              saved_unhash = psock->saved_unhash;
++              sock_map_remove_links(sk, psock);
++              rcu_read_unlock();
+       }
+-
+-      saved_unhash = psock->saved_unhash;
+-      sock_map_remove_links(sk, psock);
+-      rcu_read_unlock();
+-      saved_unhash(sk);
++      if (WARN_ON_ONCE(saved_unhash == sock_map_unhash))
++              return;
++      if (saved_unhash)
++              saved_unhash(sk);
+ }
+ EXPORT_SYMBOL_GPL(sock_map_unhash);
+ 
+@@ -1535,17 +1536,18 @@ void sock_map_destroy(struct sock *sk)
+       psock = sk_psock_get(sk);
+       if (unlikely(!psock)) {
+               rcu_read_unlock();
+-              if (sk->sk_prot->destroy)
+-                      sk->sk_prot->destroy(sk);
+-              return;
++              saved_destroy = READ_ONCE(sk->sk_prot)->destroy;
++      } else {
++              saved_destroy = psock->saved_destroy;
++              sock_map_remove_links(sk, psock);
++              rcu_read_unlock();
++              sk_psock_stop(psock);
++              sk_psock_put(sk, psock);
+       }
+-
+-      saved_destroy = psock->saved_destroy;
+-      sock_map_remove_links(sk, psock);
+-      rcu_read_unlock();
+-      sk_psock_stop(psock);
+-      sk_psock_put(sk, psock);
+-      saved_destroy(sk);
++      if (WARN_ON_ONCE(saved_destroy == sock_map_destroy))
++              return;
++      if (saved_destroy)
++              saved_destroy(sk);
+ }
+ EXPORT_SYMBOL_GPL(sock_map_destroy);
+ 
+@@ -1560,16 +1562,21 @@ void sock_map_close(struct sock *sk, long timeout)
+       if (unlikely(!psock)) {
+               rcu_read_unlock();
+               release_sock(sk);
+-              return sk->sk_prot->close(sk, timeout);
++              saved_close = READ_ONCE(sk->sk_prot)->close;
++      } else {
++              saved_close = psock->saved_close;
++              sock_map_remove_links(sk, psock);
++              rcu_read_unlock();
++              sk_psock_stop(psock);
++              release_sock(sk);
++              cancel_work_sync(&psock->work);
++              sk_psock_put(sk, psock);
+       }
+-
+-      saved_close = psock->saved_close;
+-      sock_map_remove_links(sk, psock);
+-      rcu_read_unlock();
+-      sk_psock_stop(psock);
+-      release_sock(sk);
+-      cancel_work_sync(&psock->work);
+-      sk_psock_put(sk, psock);
++      /* Make sure we do not recurse. This is a bug.
++       * Leak the socket instead of crashing on a stack overflow.
++       */
++      if (WARN_ON_ONCE(saved_close == sock_map_close))
++              return;
+       saved_close(sk, timeout);
+ }
+ EXPORT_SYMBOL_GPL(sock_map_close);
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 071620622e1e1..a28536ad765b1 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -551,11 +551,9 @@ static struct sock *dccp_v6_request_recv_sock(const 
struct sock *sk,
+       *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL);
+       /* Clone pktoptions received with SYN, if we own the req */
+       if (*own_req && ireq->pktopts) {
+-              newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
++              newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, 
newsk);
+               consume_skb(ireq->pktopts);
+               ireq->pktopts = NULL;
+-              if (newnp->pktoptions)
+-                      skb_set_owner_r(newnp->pktoptions, newsk);
+       }
+ 
+       return newsk;
+@@ -615,7 +613,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff 
*skb)
+                                              --ANK (980728)
+        */
+       if (np->rxopt.all)
+-              opt_skb = skb_clone(skb, GFP_ATOMIC);
++              opt_skb = skb_clone_and_charge_r(skb, sk);
+ 
+       if (sk->sk_state == DCCP_OPEN) { /* Fast path */
+               if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
+@@ -679,7 +677,6 @@ ipv6_pktoptions:
+                       np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
+               if (ipv6_opt_accepted(sk, opt_skb,
+                                     &DCCP_SKB_CB(opt_skb)->header.h6)) {
+-                      skb_set_owner_r(opt_skb, sk);
+                       memmove(IP6CB(opt_skb),
+                               &DCCP_SKB_CB(opt_skb)->header.h6,
+                               sizeof(struct inet6_skb_parm));
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index a86140ff093cb..29ec42c1f5d09 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -1070,6 +1070,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
+        * It is OK, because this socket enters to hash table only
+        * after validation is complete.
+        */
++      err = -EADDRINUSE;
+       inet_sk_state_store(sk, TCP_LISTEN);
+       if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
+               inet->inet_sport = htons(inet->inet_num);
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index f4559e5bc84bf..a30ff5d6808aa 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -51,7 +51,7 @@ static void ip6_datagram_flow_key_init(struct flowi6 *fl6, 
struct sock *sk)
+       fl6->flowi6_mark = sk->sk_mark;
+       fl6->fl6_dport = inet->inet_dport;
+       fl6->fl6_sport = inet->inet_sport;
+-      fl6->flowlabel = np->flow_label;
++      fl6->flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label);
+       fl6->flowi6_uid = sk->sk_uid;
+ 
+       if (!fl6->flowi6_oif)
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 93b3e7c247cec..3f331455f0202 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -269,6 +269,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr 
*uaddr,
+       fl6.flowi6_proto = IPPROTO_TCP;
+       fl6.daddr = sk->sk_v6_daddr;
+       fl6.saddr = saddr ? *saddr : np->saddr;
++      fl6.flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label);
+       fl6.flowi6_oif = sk->sk_bound_dev_if;
+       fl6.flowi6_mark = sk->sk_mark;
+       fl6.fl6_dport = usin->sin6_port;
+@@ -1429,14 +1430,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct 
sock *sk, struct sk_buff *
+ 
+               /* Clone pktoptions received with SYN, if we own the req */
+               if (ireq->pktopts) {
+-                      newnp->pktoptions = skb_clone(ireq->pktopts,
+-                                                    sk_gfp_mask(sk, 
GFP_ATOMIC));
++                      newnp->pktoptions = 
skb_clone_and_charge_r(ireq->pktopts, newsk);
+                       consume_skb(ireq->pktopts);
+                       ireq->pktopts = NULL;
+-                      if (newnp->pktoptions) {
++                      if (newnp->pktoptions)
+                               tcp_v6_restore_cb(newnp->pktoptions);
+-                              skb_set_owner_r(newnp->pktoptions, newsk);
+-                      }
+               }
+       } else {
+               if (!req_unhash && found_dup_sk) {
+@@ -1506,7 +1504,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff 
*skb)
+                                              --ANK (980728)
+        */
+       if (np->rxopt.all)
+-              opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
++              opt_skb = skb_clone_and_charge_r(skb, sk);
+ 
+       if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
+               struct dst_entry *dst;
+@@ -1590,7 +1588,6 @@ ipv6_pktoptions:
+               if (np->repflow)
+                       np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
+               if (ipv6_opt_accepted(sk, opt_skb, 
&TCP_SKB_CB(opt_skb)->header.h6)) {
+-                      skb_set_owner_r(opt_skb, sk);
+                       tcp_v6_restore_cb(opt_skb);
+                       opt_skb = xchg(&np->pktoptions, opt_skb);
+               } else {
+diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
+index 58a7075084d17..e69bed96811b5 100644
+--- a/net/mpls/af_mpls.c
++++ b/net/mpls/af_mpls.c
+@@ -1428,6 +1428,7 @@ static int mpls_dev_sysctl_register(struct net_device 
*dev,
+ free:
+       kfree(table);
+ out:
++      mdev->sysctl = NULL;
+       return -ENOBUFS;
+ }
+ 
+@@ -1437,6 +1438,9 @@ static void mpls_dev_sysctl_unregister(struct net_device 
*dev,
+       struct net *net = dev_net(dev);
+       struct ctl_table *table;
+ 
++      if (!mdev->sysctl)
++              return;
++
+       table = mdev->sysctl->ctl_table_arg;
+       unregister_net_sysctl_table(mdev->sysctl);
+       kfree(table);
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 2b1b40199c617..3a1e8f2388665 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -891,8 +891,8 @@ static int mptcp_pm_nl_create_listen_socket(struct sock 
*sk,
+ {
+       int addrlen = sizeof(struct sockaddr_in);
+       struct sockaddr_storage addr;
+-      struct mptcp_sock *msk;
+       struct socket *ssock;
++      struct sock *newsk;
+       int backlog = 1024;
+       int err;
+ 
+@@ -901,13 +901,15 @@ static int mptcp_pm_nl_create_listen_socket(struct sock 
*sk,
+       if (err)
+               return err;
+ 
+-      msk = mptcp_sk(entry->lsk->sk);
+-      if (!msk) {
++      newsk = entry->lsk->sk;
++      if (!newsk) {
+               err = -EINVAL;
+               goto out;
+       }
+ 
+-      ssock = __mptcp_nmpc_socket(msk);
++      lock_sock(newsk);
++      ssock = __mptcp_nmpc_socket(mptcp_sk(newsk));
++      release_sock(newsk);
+       if (!ssock) {
+               err = -EINVAL;
+               goto out;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 47f359dac247b..5d05d85242bc6 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2726,6 +2726,7 @@ static void mptcp_close(struct sock *sk, long timeout)
+ {
+       struct mptcp_subflow_context *subflow;
+       bool do_cancel_work = false;
++      int subflows_alive = 0;
+ 
+       lock_sock(sk);
+       sk->sk_shutdown = SHUTDOWN_MASK;
+@@ -2747,11 +2748,19 @@ cleanup:
+               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+               bool slow = lock_sock_fast_nested(ssk);
+ 
++              subflows_alive += ssk->sk_state != TCP_CLOSE;
++
+               sock_orphan(ssk);
+               unlock_sock_fast(ssk, slow);
+       }
+       sock_orphan(sk);
+ 
++      /* all the subflows are closed, only timeout can change the msk
++       * state, let's not keep resources busy for no reasons
++       */
++      if (subflows_alive == 0)
++              inet_sk_state_store(sk, TCP_CLOSE);
++
+       sock_hold(sk);
+       pr_debug("msk=%p state=%d", sk, sk->sk_state);
+       if (sk->sk_state == TCP_CLOSE) {
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 15dbaa202c7cf..b0e9548f00bf1 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1570,7 +1570,7 @@ int mptcp_subflow_create_socket(struct sock *sk, struct 
socket **new_sock)
+       if (err)
+               return err;
+ 
+-      lock_sock(sf->sk);
++      lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING);
+ 
+       /* the newly created socket has to be in the same cgroup as its parent 
*/
+       mptcp_attach_cgroup(sk, sf->sk);
+diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
+index b5b09a902c7ac..9fea90ed79d44 100644
+--- a/net/netfilter/nft_tproxy.c
++++ b/net/netfilter/nft_tproxy.c
+@@ -312,6 +312,13 @@ static int nft_tproxy_dump(struct sk_buff *skb,
+       return 0;
+ }
+ 
++static int nft_tproxy_validate(const struct nft_ctx *ctx,
++                             const struct nft_expr *expr,
++                             const struct nft_data **data)
++{
++      return nft_chain_validate_hooks(ctx->chain, 1 << NF_INET_PRE_ROUTING);
++}
++
+ static struct nft_expr_type nft_tproxy_type;
+ static const struct nft_expr_ops nft_tproxy_ops = {
+       .type           = &nft_tproxy_type,
+@@ -320,6 +327,7 @@ static const struct nft_expr_ops nft_tproxy_ops = {
+       .init           = nft_tproxy_init,
+       .destroy        = nft_tproxy_destroy,
+       .dump           = nft_tproxy_dump,
++      .validate       = nft_tproxy_validate,
+ };
+ 
+ static struct nft_expr_type nft_tproxy_type __read_mostly = {
+diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
+index 896b8f5bc8853..67b471c666c7e 100644
+--- a/net/openvswitch/meter.c
++++ b/net/openvswitch/meter.c
+@@ -450,7 +450,7 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct 
genl_info *info)
+ 
+       err = attach_meter(meter_tbl, meter);
+       if (err)
+-              goto exit_unlock;
++              goto exit_free_old_meter;
+ 
+       ovs_unlock();
+ 
+@@ -473,6 +473,8 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct 
genl_info *info)
+       genlmsg_end(reply, ovs_reply_header);
+       return genlmsg_reply(reply, info);
+ 
++exit_free_old_meter:
++      ovs_meter_free(old_meter);
+ exit_unlock:
+       ovs_unlock();
+       nlmsg_free(reply);
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index 29a208ed8fb88..86c93cf1744b0 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -487,6 +487,12 @@ static int rose_listen(struct socket *sock, int backlog)
+ {
+       struct sock *sk = sock->sk;
+ 
++      lock_sock(sk);
++      if (sock->state != SS_UNCONNECTED) {
++              release_sock(sk);
++              return -EINVAL;
++      }
++
+       if (sk->sk_state != TCP_LISTEN) {
+               struct rose_sock *rose = rose_sk(sk);
+ 
+@@ -496,8 +502,10 @@ static int rose_listen(struct socket *sock, int backlog)
+               memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
+               sk->sk_max_ack_backlog = backlog;
+               sk->sk_state           = TCP_LISTEN;
++              release_sock(sk);
+               return 0;
+       }
++      release_sock(sk);
+ 
+       return -EOPNOTSUPP;
+ }
+diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
+index 5c36013339e11..2a05bad56ef3e 100644
+--- a/net/sched/act_bpf.c
++++ b/net/sched/act_bpf.c
+@@ -305,7 +305,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr 
*nla,
+       ret = tcf_idr_check_alloc(tn, &index, act, bind);
+       if (!ret) {
+               ret = tcf_idr_create(tn, index, est, act,
+-                                   &act_bpf_ops, bind, true, 0);
++                                   &act_bpf_ops, bind, true, flags);
+               if (ret < 0) {
+                       tcf_idr_cleanup(tn, index);
+                       return ret;
+diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
+index 032ef927d0ebb..0deb4e96a6c2e 100644
+--- a/net/sched/act_connmark.c
++++ b/net/sched/act_connmark.c
+@@ -124,7 +124,7 @@ static int tcf_connmark_init(struct net *net, struct 
nlattr *nla,
+       ret = tcf_idr_check_alloc(tn, &index, a, bind);
+       if (!ret) {
+               ret = tcf_idr_create(tn, index, est, a,
+-                                   &act_connmark_ops, bind, false, 0);
++                                   &act_connmark_ops, bind, false, flags);
+               if (ret) {
+                       tcf_idr_cleanup(tn, index);
+                       return ret;
+diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
+index 2d75fe1223ac4..56e0a5eb64942 100644
+--- a/net/sched/act_ctinfo.c
++++ b/net/sched/act_ctinfo.c
+@@ -92,7 +92,7 @@ static int tcf_ctinfo_act(struct sk_buff *skb, const struct 
tc_action *a,
+       cp = rcu_dereference_bh(ca->params);
+ 
+       tcf_lastuse_update(&ca->tcf_tm);
+-      bstats_update(&ca->tcf_bstats, skb);
++      tcf_action_update_bstats(&ca->common, skb);
+       action = READ_ONCE(ca->tcf_action);
+ 
+       wlen = skb_network_offset(skb);
+@@ -211,8 +211,8 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr 
*nla,
+       index = actparm->index;
+       err = tcf_idr_check_alloc(tn, &index, a, bind);
+       if (!err) {
+-              ret = tcf_idr_create(tn, index, est, a,
+-                                   &act_ctinfo_ops, bind, false, 0);
++              ret = tcf_idr_create_from_flags(tn, index, est, a,
++                                              &act_ctinfo_ops, bind, flags);
+               if (ret) {
+                       tcf_idr_cleanup(tn, index);
+                       return ret;
+diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c
+index 7df72a4197a3f..ac985c53ebafe 100644
+--- a/net/sched/act_gate.c
++++ b/net/sched/act_gate.c
+@@ -357,7 +357,7 @@ static int tcf_gate_init(struct net *net, struct nlattr 
*nla,
+ 
+       if (!err) {
+               ret = tcf_idr_create(tn, index, est, a,
+-                                   &act_gate_ops, bind, false, 0);
++                                   &act_gate_ops, bind, false, flags);
+               if (ret) {
+                       tcf_idr_cleanup(tn, index);
+                       return ret;
+diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
+index 7064a365a1a98..ec987ec758070 100644
+--- a/net/sched/act_ife.c
++++ b/net/sched/act_ife.c
+@@ -553,7 +553,7 @@ static int tcf_ife_init(struct net *net, struct nlattr 
*nla,
+ 
+       if (!exists) {
+               ret = tcf_idr_create(tn, index, est, a, &act_ife_ops,
+-                                   bind, true, 0);
++                                   bind, true, flags);
+               if (ret) {
+                       tcf_idr_cleanup(tn, index);
+                       kfree(p);
+diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
+index 265b1443e252f..2f3d507c24a1f 100644
+--- a/net/sched/act_ipt.c
++++ b/net/sched/act_ipt.c
+@@ -145,7 +145,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int 
id, struct nlattr *nla,
+ 
+       if (!exists) {
+               ret = tcf_idr_create(tn, index, est, a, ops, bind,
+-                                   false, 0);
++                                   false, flags);
+               if (ret) {
+                       tcf_idr_cleanup(tn, index);
+                       return ret;
+diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
+index db0ef0486309b..980ad795727e9 100644
+--- a/net/sched/act_mpls.c
++++ b/net/sched/act_mpls.c
+@@ -254,7 +254,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr 
*nla,
+ 
+       if (!exists) {
+               ret = tcf_idr_create(tn, index, est, a,
+-                                   &act_mpls_ops, bind, true, 0);
++                                   &act_mpls_ops, bind, true, flags);
+               if (ret) {
+                       tcf_idr_cleanup(tn, index);
+                       return ret;
+diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
+index 7dd6b586ba7f6..2a39b3729e844 100644
+--- a/net/sched/act_nat.c
++++ b/net/sched/act_nat.c
+@@ -61,7 +61,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, 
struct nlattr *est,
+       err = tcf_idr_check_alloc(tn, &index, a, bind);
+       if (!err) {
+               ret = tcf_idr_create(tn, index, est, a,
+-                                   &act_nat_ops, bind, false, 0);
++                                   &act_nat_ops, bind, false, flags);
+               if (ret) {
+                       tcf_idr_cleanup(tn, index);
+                       return ret;
+diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
+index 1262a84b725fc..4f72e6e7dbda5 100644
+--- a/net/sched/act_pedit.c
++++ b/net/sched/act_pedit.c
+@@ -189,7 +189,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr 
*nla,
+       err = tcf_idr_check_alloc(tn, &index, a, bind);
+       if (!err) {
+               ret = tcf_idr_create(tn, index, est, a,
+-                                   &act_pedit_ops, bind, false, 0);
++                                   &act_pedit_ops, bind, false, flags);
+               if (ret) {
+                       tcf_idr_cleanup(tn, index);
+                       goto out_free;
+diff --git a/net/sched/act_police.c b/net/sched/act_police.c
+index 5c0a3ea9fe120..d44b933b821d7 100644
+--- a/net/sched/act_police.c
++++ b/net/sched/act_police.c
+@@ -90,7 +90,7 @@ static int tcf_police_init(struct net *net, struct nlattr 
*nla,
+ 
+       if (!exists) {
+               ret = tcf_idr_create(tn, index, NULL, a,
+-                                   &act_police_ops, bind, true, 0);
++                                   &act_police_ops, bind, true, flags);
+               if (ret) {
+                       tcf_idr_cleanup(tn, index);
+                       return ret;
+diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
+index 230501eb9e069..ab4ae24ab886f 100644
+--- a/net/sched/act_sample.c
++++ b/net/sched/act_sample.c
+@@ -70,7 +70,7 @@ static int tcf_sample_init(struct net *net, struct nlattr 
*nla,
+ 
+       if (!exists) {
+               ret = tcf_idr_create(tn, index, est, a,
+-                                   &act_sample_ops, bind, true, 0);
++                                   &act_sample_ops, bind, true, flags);
+               if (ret) {
+                       tcf_idr_cleanup(tn, index);
+                       return ret;
+diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
+index cbbe1861d3a20..7885271540259 100644
+--- a/net/sched/act_simple.c
++++ b/net/sched/act_simple.c
+@@ -128,7 +128,7 @@ static int tcf_simp_init(struct net *net, struct nlattr 
*nla,
+ 
+       if (!exists) {
+               ret = tcf_idr_create(tn, index, est, a,
+-                                   &act_simp_ops, bind, false, 0);
++                                   &act_simp_ops, bind, false, flags);
+               if (ret) {
+                       tcf_idr_cleanup(tn, index);
+                       return ret;
+diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
+index 6054185383474..6088ceaf582e8 100644
+--- a/net/sched/act_skbedit.c
++++ b/net/sched/act_skbedit.c
+@@ -176,7 +176,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr 
*nla,
+ 
+       if (!exists) {
+               ret = tcf_idr_create(tn, index, est, a,
+-                                   &act_skbedit_ops, bind, true, 0);
++                                   &act_skbedit_ops, bind, true, act_flags);
+               if (ret) {
+                       tcf_idr_cleanup(tn, index);
+                       return ret;
+diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
+index ecb9ee6660954..ee9cc0abf9e10 100644
+--- a/net/sched/act_skbmod.c
++++ b/net/sched/act_skbmod.c
+@@ -168,7 +168,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr 
*nla,
+ 
+       if (!exists) {
+               ret = tcf_idr_create(tn, index, est, a,
+-                                   &act_skbmod_ops, bind, true, 0);
++                                   &act_skbmod_ops, bind, true, flags);
+               if (ret) {
+                       tcf_idr_cleanup(tn, index);
+                       return ret;
+diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
+index 8d1ef858db87d..54c5ff207fb1b 100644
+--- a/net/sched/cls_tcindex.c
++++ b/net/sched/cls_tcindex.c
+@@ -12,6 +12,7 @@
+ #include <linux/errno.h>
+ #include <linux/slab.h>
+ #include <linux/refcount.h>
++#include <linux/rcupdate.h>
+ #include <net/act_api.h>
+ #include <net/netlink.h>
+ #include <net/pkt_cls.h>
+@@ -338,6 +339,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, 
unsigned long base,
+       struct tcf_result cr = {};
+       int err, balloc = 0;
+       struct tcf_exts e;
++      bool update_h = false;
+ 
+       err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+       if (err < 0)
+@@ -455,10 +457,13 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, 
unsigned long base,
+               }
+       }
+ 
+-      if (cp->perfect)
++      if (cp->perfect) {
+               r = cp->perfect + handle;
+-      else
+-              r = tcindex_lookup(cp, handle) ? : &new_filter_result;
++      } else {
++              /* imperfect area is updated in-place using rcu */
++              update_h = !!tcindex_lookup(cp, handle);
++              r = &new_filter_result;
++      }
+ 
+       if (r == &new_filter_result) {
+               f = kzalloc(sizeof(*f), GFP_KERNEL);
+@@ -484,7 +489,28 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, 
unsigned long base,
+ 
+       rcu_assign_pointer(tp->root, cp);
+ 
+-      if (r == &new_filter_result) {
++      if (update_h) {
++              struct tcindex_filter __rcu **fp;
++              struct tcindex_filter *cf;
++
++              f->result.res = r->res;
++              tcf_exts_change(&f->result.exts, &r->exts);
++
++              /* imperfect area bucket */
++              fp = cp->h + (handle % cp->hash);
++
++              /* lookup the filter, guaranteed to exist */
++              for (cf = rcu_dereference_bh_rtnl(*fp); cf;
++                   fp = &cf->next, cf = rcu_dereference_bh_rtnl(*fp))
++                      if (cf->key == (u16)handle)
++                              break;
++
++              f->next = cf->next;
++
++              cf = rcu_replace_pointer(*fp, f, 1);
++              tcf_exts_get_net(&cf->result.exts);
++              tcf_queue_work(&cf->rwork, tcindex_destroy_fexts_work);
++      } else if (r == &new_filter_result) {
+               struct tcindex_filter *nfp;
+               struct tcindex_filter __rcu **fp;
+ 
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 45b92e40082ef..8ce999e4ca323 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -427,7 +427,10 @@ static void htb_activate_prios(struct htb_sched *q, 
struct htb_class *cl)
+       while (cl->cmode == HTB_MAY_BORROW && p && mask) {
+               m = mask;
+               while (m) {
+-                      int prio = ffz(~m);
++                      unsigned int prio = ffz(~m);
++
++                      if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio)))
++                              break;
+                       m &= ~(1 << prio);
+ 
+                       if (p->inner.clprio[prio].feed.rb_node)
+diff --git a/net/sctp/diag.c b/net/sctp/diag.c
+index d9c6d8f30f093..b0ce1080842d4 100644
+--- a/net/sctp/diag.c
++++ b/net/sctp/diag.c
+@@ -343,11 +343,9 @@ static int sctp_sock_filter(struct sctp_endpoint *ep, 
struct sctp_transport *tsp
+       struct sctp_comm_param *commp = p;
+       struct sock *sk = ep->base.sk;
+       const struct inet_diag_req_v2 *r = commp->r;
+-      struct sctp_association *assoc =
+-              list_entry(ep->asocs.next, struct sctp_association, asocs);
+ 
+       /* find the ep only once through the transports by this condition */
+-      if (tsp->asoc != assoc)
++      if (!list_is_first(&tsp->asoc->asocs, &ep->asocs))
+               return 0;
+ 
+       if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
+diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
+index 7af2515735957..8e35009ec25cb 100644
+--- a/sound/pci/hda/hda_bind.c
++++ b/sound/pci/hda/hda_bind.c
+@@ -144,6 +144,7 @@ static int hda_codec_driver_probe(struct device *dev)
+ 
+  error:
+       snd_hda_codec_cleanup_for_unbind(codec);
++      codec->preset = NULL;
+       return err;
+ }
+ 
+@@ -166,6 +167,7 @@ static int hda_codec_driver_remove(struct device *dev)
+       if (codec->patch_ops.free)
+               codec->patch_ops.free(codec);
+       snd_hda_codec_cleanup_for_unbind(codec);
++      codec->preset = NULL;
+       module_put(dev->driver->owner);
+       return 0;
+ }
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index f552785d301e0..19be60bb57810 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -791,7 +791,6 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec 
*codec)
+       snd_array_free(&codec->cvt_setups);
+       snd_array_free(&codec->spdif_out);
+       snd_array_free(&codec->verbs);
+-      codec->preset = NULL;
+       codec->follower_dig_outs = NULL;
+       codec->spdif_status_reset = 0;
+       snd_array_free(&codec->mixers);
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 2bc9274e0960b..9d6464ded63e5 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -1124,6 +1124,7 @@ static const struct hda_device_id snd_hda_id_conexant[] 
= {
+       HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto),
+       HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto),
+       HDA_CODEC_ENTRY(0x14f120d0, "CX11970", patch_conexant_auto),
++      HDA_CODEC_ENTRY(0x14f120d1, "SN6180", patch_conexant_auto),
+       HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto),
+       HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto),
+       HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 83c69d1754930..dddb6f842ff29 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -826,7 +826,7 @@ do_sku:
+                       alc_setup_gpio(codec, 0x02);
+                       break;
+               case 7:
+-                      alc_setup_gpio(codec, 0x03);
++                      alc_setup_gpio(codec, 0x04);
+                       break;
+               case 5:
+               default:
+diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
+index b39c25409c239..f0af8c18e5efa 100644
+--- a/sound/soc/codecs/cs42l56.c
++++ b/sound/soc/codecs/cs42l56.c
+@@ -1193,18 +1193,12 @@ static int cs42l56_i2c_probe(struct i2c_client 
*i2c_client,
+       if (pdata) {
+               cs42l56->pdata = *pdata;
+       } else {
+-              pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
+-                                   GFP_KERNEL);
+-              if (!pdata)
+-                      return -ENOMEM;
+-
+               if (i2c_client->dev.of_node) {
+                       ret = cs42l56_handle_of_data(i2c_client,
+                                                    &cs42l56->pdata);
+                       if (ret != 0)
+                               return ret;
+               }
+-              cs42l56->pdata = *pdata;
+       }
+ 
+       if (cs42l56->pdata.gpio_nreset) {
+diff --git a/sound/soc/intel/boards/sof_cs42l42.c 
b/sound/soc/intel/boards/sof_cs42l42.c
+index ce78c18798876..8061082d9fbf3 100644
+--- a/sound/soc/intel/boards/sof_cs42l42.c
++++ b/sound/soc/intel/boards/sof_cs42l42.c
+@@ -311,6 +311,9 @@ static int create_spk_amp_dai_links(struct device *dev,
+       links[*id].platforms = platform_component;
+       links[*id].num_platforms = ARRAY_SIZE(platform_component);
+       links[*id].dpcm_playback = 1;
++      /* firmware-generated echo reference */
++      links[*id].dpcm_capture = 1;
++
+       links[*id].no_pcm = 1;
+       links[*id].cpus = &cpus[*id];
+       links[*id].num_cpus = 1;
+diff --git a/sound/soc/intel/boards/sof_rt5682.c 
b/sound/soc/intel/boards/sof_rt5682.c
+index f096bd6d69be7..d0ce2f06b30c6 100644
+--- a/sound/soc/intel/boards/sof_rt5682.c
++++ b/sound/soc/intel/boards/sof_rt5682.c
+@@ -737,8 +737,6 @@ static struct snd_soc_dai_link 
*sof_card_dai_links_create(struct device *dev,
+                       links[id].num_codecs = ARRAY_SIZE(max_98373_components);
+                       links[id].init = max_98373_spk_codec_init;
+                       links[id].ops = &max_98373_ops;
+-                      /* feedback stream */
+-                      links[id].dpcm_capture = 1;
+               } else if (sof_rt5682_quirk &
+                               SOF_MAX98360A_SPEAKER_AMP_PRESENT) {
+                       max_98360a_dai_link(&links[id]);
+@@ -751,6 +749,9 @@ static struct snd_soc_dai_link 
*sof_card_dai_links_create(struct device *dev,
+               links[id].platforms = platform_component;
+               links[id].num_platforms = ARRAY_SIZE(platform_component);
+               links[id].dpcm_playback = 1;
++              /* feedback stream or firmware-generated echo reference */
++              links[id].dpcm_capture = 1;
++
+               links[id].no_pcm = 1;
+               links[id].cpus = &cpus[id];
+               links[id].num_cpus = 1;
+diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
+index 5f355b8d57a09..56653d78d2200 100644
+--- a/sound/soc/sof/intel/hda-dai.c
++++ b/sound/soc/sof/intel/hda-dai.c
+@@ -212,6 +212,10 @@ static int hda_link_hw_params(struct snd_pcm_substream 
*substream,
+       int stream_tag;
+       int ret;
+ 
++      link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name);
++      if (!link)
++              return -EINVAL;
++
+       /* get stored dma data if resuming from system suspend */
+       link_dev = snd_soc_dai_get_dma_data(dai, substream);
+       if (!link_dev) {
+@@ -232,10 +236,6 @@ static int hda_link_hw_params(struct snd_pcm_substream 
*substream,
+       if (ret < 0)
+               return ret;
+ 
+-      link = snd_hdac_ext_bus_get_link(bus, codec_dai->component->name);
+-      if (!link)
+-              return -EINVAL;
+-
+       /* set the hdac_stream in the codec dai */
+       snd_soc_dai_set_stream(codec_dai, hdac_stream(link_dev), 
substream->stream);
+ 
+diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c 
b/tools/testing/selftests/bpf/verifier/search_pruning.c
+index 7e50cb80873a5..7e36078f8f482 100644
+--- a/tools/testing/selftests/bpf/verifier/search_pruning.c
++++ b/tools/testing/selftests/bpf/verifier/search_pruning.c
+@@ -154,3 +154,39 @@
+       .result_unpriv = ACCEPT,
+       .insn_processed = 15,
+ },
++/* The test performs a conditional 64-bit write to a stack location
++ * fp[-8], this is followed by an unconditional 8-bit write to fp[-8],
++ * then data is read from fp[-8]. This sequence is unsafe.
++ *
++ * The test would be mistakenly marked as safe w/o dst register parent
++ * preservation in verifier.c:copy_register_state() function.
++ *
++ * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the
++ * checkpoint state after conditional 64-bit assignment.
++ */
++{
++      "write tracking and register parent chain bug",
++      .insns = {
++      /* r6 = ktime_get_ns() */
++      BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
++      /* r0 = ktime_get_ns() */
++      BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++      /* if r0 > r6 goto +1 */
++      BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_6, 1),
++      /* *(u64 *)(r10 - 8) = 0xdeadbeef */
++      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0xdeadbeef),
++      /* r1 = 42 */
++      BPF_MOV64_IMM(BPF_REG_1, 42),
++      /* *(u8 *)(r10 - 8) = r1 */
++      BPF_STX_MEM(BPF_B, BPF_REG_FP, BPF_REG_1, -8),
++      /* r2 = *(u64 *)(r10 - 8) */
++      BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_FP, -8),
++      /* exit(0) */
++      BPF_MOV64_IMM(BPF_REG_0, 0),
++      BPF_EXIT_INSN(),
++      },
++      .flags = BPF_F_TEST_STATE_FREQ,
++      .errstr = "invalid read from stack off -8+1 size 8",
++      .result = REJECT,
++},
+diff --git a/tools/testing/selftests/lkdtm/stack-entropy.sh 
b/tools/testing/selftests/lkdtm/stack-entropy.sh
+index 1b4d95d575f85..14fedeef762ed 100755
+--- a/tools/testing/selftests/lkdtm/stack-entropy.sh
++++ b/tools/testing/selftests/lkdtm/stack-entropy.sh
+@@ -4,13 +4,27 @@
+ # Measure kernel stack entropy by sampling via LKDTM's REPORT_STACK test.
+ set -e
+ samples="${1:-1000}"
++TRIGGER=/sys/kernel/debug/provoke-crash/DIRECT
++KSELFTEST_SKIP_TEST=4
++
++# Verify we have LKDTM available in the kernel.
++if [ ! -r $TRIGGER ] ; then
++      /sbin/modprobe -q lkdtm || true
++      if [ ! -r $TRIGGER ] ; then
++              echo "Cannot find $TRIGGER (missing CONFIG_LKDTM?)"
++      else
++              echo "Cannot write $TRIGGER (need to run as root?)"
++      fi
++      # Skip this test
++      exit $KSELFTEST_SKIP_TEST
++fi
+ 
+ # Capture dmesg continuously since it may fill up depending on sample size.
+ log=$(mktemp -t stack-entropy-XXXXXX)
+ dmesg --follow >"$log" & pid=$!
+ report=-1
+ for i in $(seq 1 $samples); do
+-        echo "REPORT_STACK" >/sys/kernel/debug/provoke-crash/DIRECT
++        echo "REPORT_STACK" > $TRIGGER
+       if [ -t 1 ]; then
+               percent=$(( 100 * $i / $samples ))
+               if [ "$percent" -ne "$report" ]; then
+diff --git a/tools/virtio/linux/bug.h b/tools/virtio/linux/bug.h
+index 813baf13f62a2..51a919083d9b8 100644
+--- a/tools/virtio/linux/bug.h
++++ b/tools/virtio/linux/bug.h
+@@ -1,13 +1,11 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef BUG_H
+-#define BUG_H
++#ifndef _LINUX_BUG_H
++#define _LINUX_BUG_H
+ 
+ #include <asm/bug.h>
+ 
+ #define BUG_ON(__BUG_ON_cond) assert(!(__BUG_ON_cond))
+ 
+-#define BUILD_BUG_ON(x)
+-
+ #define BUG() abort()
+ 
+-#endif /* BUG_H */
++#endif /* _LINUX_BUG_H */
+diff --git a/tools/virtio/linux/build_bug.h b/tools/virtio/linux/build_bug.h
+new file mode 100644
+index 0000000000000..cdbb75e28a604
+--- /dev/null
++++ b/tools/virtio/linux/build_bug.h
+@@ -0,0 +1,7 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _LINUX_BUILD_BUG_H
++#define _LINUX_BUILD_BUG_H
++
++#define BUILD_BUG_ON(x)
++
++#endif        /* _LINUX_BUILD_BUG_H */
+diff --git a/tools/virtio/linux/cpumask.h b/tools/virtio/linux/cpumask.h
+new file mode 100644
+index 0000000000000..307da69d6b26c
+--- /dev/null
++++ b/tools/virtio/linux/cpumask.h
+@@ -0,0 +1,7 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _LINUX_CPUMASK_H
++#define _LINUX_CPUMASK_H
++
++#include <linux/kernel.h>
++
++#endif /* _LINUX_CPUMASK_H */
+diff --git a/tools/virtio/linux/gfp.h b/tools/virtio/linux/gfp.h
+new file mode 100644
+index 0000000000000..43d146f236f14
+--- /dev/null
++++ b/tools/virtio/linux/gfp.h
+@@ -0,0 +1,7 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef __LINUX_GFP_H
++#define __LINUX_GFP_H
++
++#include <linux/topology.h>
++
++#endif
+diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
+index 0b493542e61a6..a4beb719d2174 100644
+--- a/tools/virtio/linux/kernel.h
++++ b/tools/virtio/linux/kernel.h
+@@ -10,6 +10,7 @@
+ #include <stdarg.h>
+ 
+ #include <linux/compiler.h>
++#include <linux/log2.h>
+ #include <linux/types.h>
+ #include <linux/overflow.h>
+ #include <linux/list.h>
+diff --git a/tools/virtio/linux/kmsan.h b/tools/virtio/linux/kmsan.h
+new file mode 100644
+index 0000000000000..272b5aa285d5a
+--- /dev/null
++++ b/tools/virtio/linux/kmsan.h
+@@ -0,0 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _LINUX_KMSAN_H
++#define _LINUX_KMSAN_H
++
++#include <linux/gfp.h>
++
++inline void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
++                           enum dma_data_direction dir)
++{
++}
++
++#endif /* _LINUX_KMSAN_H */
+diff --git a/tools/virtio/linux/scatterlist.h 
b/tools/virtio/linux/scatterlist.h
+index 369ee308b6686..74d9e1825748e 100644
+--- a/tools/virtio/linux/scatterlist.h
++++ b/tools/virtio/linux/scatterlist.h
+@@ -2,6 +2,7 @@
+ #ifndef SCATTERLIST_H
+ #define SCATTERLIST_H
+ #include <linux/kernel.h>
++#include <linux/bug.h>
+ 
+ struct scatterlist {
+       unsigned long   page_link;
+diff --git a/tools/virtio/linux/topology.h b/tools/virtio/linux/topology.h
+new file mode 100644
+index 0000000000000..910794afb993a
+--- /dev/null
++++ b/tools/virtio/linux/topology.h
+@@ -0,0 +1,7 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _LINUX_TOPOLOGY_H
++#define _LINUX_TOPOLOGY_H
++
++#include <linux/cpumask.h>
++
++#endif /* _LINUX_TOPOLOGY_H */

Reply via email to