commit:     36635be3a2dfb97f52a0c05f27c9fedcb70e08e3
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jul 11 11:49:17 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jul 11 11:49:17 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=36635be3

Linux patch 6.1.98

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1097_linux-6.1.98.patch | 3629 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3633 insertions(+)

diff --git a/0000_README b/0000_README
index 13913ad0..67ef8e56 100644
--- a/0000_README
+++ b/0000_README
@@ -431,6 +431,10 @@ Patch:  1096_linux-6.1.97.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.97
 
+Patch:  1097_linux-6.1.98.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.98
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1097_linux-6.1.98.patch b/1097_linux-6.1.98.patch
new file mode 100644
index 00000000..ed79fc38
--- /dev/null
+++ b/1097_linux-6.1.98.patch
@@ -0,0 +1,3629 @@
+diff --git a/Makefile b/Makefile
+index c2148be2ba340..c9a630cdb2ec2 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 97
++SUBLEVEL = 98
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts 
b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
+index b276eb0810c70..0ab611e9fbb7c 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
+@@ -290,7 +290,7 @@ vdd_gpu: DCDC_REG2 {
+                               regulator-name = "vdd_gpu";
+                               regulator-always-on;
+                               regulator-boot-on;
+-                              regulator-min-microvolt = <900000>;
++                              regulator-min-microvolt = <500000>;
+                               regulator-max-microvolt = <1350000>;
+                               regulator-init-microvolt = <900000>;
+                               regulator-ramp-delay = <6001>;
+diff --git a/arch/powerpc/include/asm/interrupt.h 
b/arch/powerpc/include/asm/interrupt.h
+index 6d8492b6e2b83..4999de47b4a38 100644
+--- a/arch/powerpc/include/asm/interrupt.h
++++ b/arch/powerpc/include/asm/interrupt.h
+@@ -355,6 +355,14 @@ static inline void interrupt_nmi_enter_prepare(struct 
pt_regs *regs, struct inte
+       if (IS_ENABLED(CONFIG_KASAN))
+               return;
+ 
++      /*
++       * Likewise, do not use it in real mode if percpu first chunk is not
++       * embedded. With CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled there
++       * are chances where percpu allocation can come from vmalloc area.
++       */
++      if (percpu_first_chunk_is_paged)
++              return;
++
+       /* Otherwise, it should be safe to call it */
+       nmi_enter();
+ }
+@@ -370,6 +378,8 @@ static inline void interrupt_nmi_exit_prepare(struct 
pt_regs *regs, struct inter
+               // no nmi_exit for a pseries hash guest taking a real mode 
exception
+       } else if (IS_ENABLED(CONFIG_KASAN)) {
+               // no nmi_exit for KASAN in real mode
++      } else if (percpu_first_chunk_is_paged) {
++              // no nmi_exit if percpu first chunk is not embedded
+       } else {
+               nmi_exit();
+       }
+diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
+index 0e1745e5125b0..6d3ce049babdf 100644
+--- a/arch/powerpc/include/asm/io.h
++++ b/arch/powerpc/include/asm/io.h
+@@ -42,7 +42,7 @@ extern struct pci_dev *isa_bridge_pcidev;
+  * define properly based on the platform
+  */
+ #ifndef CONFIG_PCI
+-#define _IO_BASE      0
++#define _IO_BASE      POISON_POINTER_DELTA
+ #define _ISA_MEM_BASE 0
+ #define PCI_DRAM_OFFSET 0
+ #elif defined(CONFIG_PPC32)
+diff --git a/arch/powerpc/include/asm/percpu.h 
b/arch/powerpc/include/asm/percpu.h
+index 8e5b7d0b851c6..634970ce13c6b 100644
+--- a/arch/powerpc/include/asm/percpu.h
++++ b/arch/powerpc/include/asm/percpu.h
+@@ -15,6 +15,16 @@
+ #endif /* CONFIG_SMP */
+ #endif /* __powerpc64__ */
+ 
++#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) && defined(CONFIG_SMP)
++#include <linux/jump_label.h>
++DECLARE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
++
++#define percpu_first_chunk_is_paged   \
++              (static_key_enabled(&__percpu_first_chunk_is_paged.key))
++#else
++#define percpu_first_chunk_is_paged   false
++#endif /* CONFIG_PPC64 && CONFIG_SMP */
++
+ #include <asm-generic/percpu.h>
+ 
+ #include <asm/paca.h>
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index b2e0d3ce4261c..7662265f24337 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -834,6 +834,7 @@ static __init int pcpu_cpu_to_node(int cpu)
+ 
+ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+ EXPORT_SYMBOL(__per_cpu_offset);
++DEFINE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
+ 
+ void __init setup_per_cpu_areas(void)
+ {
+@@ -876,6 +877,7 @@ void __init setup_per_cpu_areas(void)
+       if (rc < 0)
+               panic("cannot initialize percpu area (err=%d)", rc);
+ 
++      static_key_enable(&__percpu_first_chunk_is_paged.key);
+       delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+       for_each_possible_cpu(cpu) {
+                 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
+diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c
+index a79e28c91e2be..e465e44877376 100644
+--- a/arch/powerpc/kexec/core_64.c
++++ b/arch/powerpc/kexec/core_64.c
+@@ -26,6 +26,7 @@
+ #include <asm/paca.h>
+ #include <asm/mmu.h>
+ #include <asm/sections.h>     /* _end */
++#include <asm/setup.h>
+ #include <asm/smp.h>
+ #include <asm/hw_breakpoint.h>
+ #include <asm/svm.h>
+@@ -316,6 +317,16 @@ void default_machine_kexec(struct kimage *image)
+       if (!kdump_in_progress())
+               kexec_prepare_cpus();
+ 
++#ifdef CONFIG_PPC_PSERIES
++      /*
++       * This must be done after other CPUs have shut down, otherwise they
++       * could execute the 'scv' instruction, which is not supported with
++       * reloc disabled (see configure_exceptions()).
++       */
++      if (firmware_has_feature(FW_FEATURE_SET_MODE))
++              pseries_disable_reloc_on_exc();
++#endif
++
+       printk("kexec: Starting switchover sequence.\n");
+ 
+       /* switch to a staticly allocated stack.  Based on irq stack code.
+diff --git a/arch/powerpc/platforms/pseries/kexec.c 
b/arch/powerpc/platforms/pseries/kexec.c
+index 096d09ed89f67..431be156ca9bb 100644
+--- a/arch/powerpc/platforms/pseries/kexec.c
++++ b/arch/powerpc/platforms/pseries/kexec.c
+@@ -61,11 +61,3 @@ void pseries_kexec_cpu_down(int crash_shutdown, int 
secondary)
+       } else
+               xics_kexec_teardown_cpu(secondary);
+ }
+-
+-void pseries_machine_kexec(struct kimage *image)
+-{
+-      if (firmware_has_feature(FW_FEATURE_SET_MODE))
+-              pseries_disable_reloc_on_exc();
+-
+-      default_machine_kexec(image);
+-}
+diff --git a/arch/powerpc/platforms/pseries/pseries.h 
b/arch/powerpc/platforms/pseries/pseries.h
+index 1d75b7742ef00..3f9b51298aa34 100644
+--- a/arch/powerpc/platforms/pseries/pseries.h
++++ b/arch/powerpc/platforms/pseries/pseries.h
+@@ -38,7 +38,6 @@ static inline void smp_init_pseries(void) { }
+ #endif
+ 
+ extern void pseries_kexec_cpu_down(int crash_shutdown, int secondary);
+-void pseries_machine_kexec(struct kimage *image);
+ 
+ extern void pSeries_final_fixup(void);
+ 
+diff --git a/arch/powerpc/platforms/pseries/setup.c 
b/arch/powerpc/platforms/pseries/setup.c
+index a0701dbdb1348..df07726192000 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -1149,7 +1149,6 @@ define_machine(pseries) {
+       .machine_check_exception = pSeries_machine_check_exception,
+       .machine_check_log_err  = pSeries_machine_check_log_err,
+ #ifdef CONFIG_KEXEC_CORE
+-      .machine_kexec          = pseries_machine_kexec,
+       .kexec_cpu_down         = pseries_kexec_cpu_down,
+ #endif
+ #ifdef CONFIG_MEMORY_HOTPLUG
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index cd692f399cd18..72307168d38ac 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -1354,7 +1354,7 @@ static int cpu_cmd(void)
+       }
+       termch = cpu;
+ 
+-      if (!scanhex(&cpu)) {
++      if (!scanhex(&cpu) || cpu >= num_possible_cpus()) {
+               /* print cpus waiting or in xmon */
+               printf("cpus stopped:");
+               last_cpu = first_cpu = NR_CPUS;
+@@ -2776,7 +2776,7 @@ static void dump_pacas(void)
+ 
+       termch = c;     /* Put c back, it wasn't 'a' */
+ 
+-      if (scanhex(&num))
++      if (scanhex(&num) && num < num_possible_cpus())
+               dump_one_paca(num);
+       else
+               dump_one_paca(xmon_owner);
+@@ -2849,7 +2849,7 @@ static void dump_xives(void)
+ 
+       termch = c;     /* Put c back, it wasn't 'a' */
+ 
+-      if (scanhex(&num))
++      if (scanhex(&num) && num < num_possible_cpus())
+               dump_one_xive(num);
+       else
+               dump_one_xive(xmon_owner);
+diff --git a/arch/riscv/kernel/machine_kexec.c 
b/arch/riscv/kernel/machine_kexec.c
+index 2d139b724bc84..ccb0c5d5c63c4 100644
+--- a/arch/riscv/kernel/machine_kexec.c
++++ b/arch/riscv/kernel/machine_kexec.c
+@@ -147,20 +147,12 @@ static void machine_kexec_mask_interrupts(void)
+ 
+       for_each_irq_desc(i, desc) {
+               struct irq_chip *chip;
+-              int ret;
+ 
+               chip = irq_desc_get_chip(desc);
+               if (!chip)
+                       continue;
+ 
+-              /*
+-               * First try to remove the active state. If this
+-               * fails, try to EOI the interrupt.
+-               */
+-              ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
+-
+-              if (ret && irqd_irq_inprogress(&desc->irq_data) &&
+-                  chip->irq_eoi)
++              if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
+                       chip->irq_eoi(&desc->irq_data);
+ 
+               if (chip->irq_mask)
+diff --git a/arch/s390/include/asm/kvm_host.h 
b/arch/s390/include/asm/kvm_host.h
+index 09abf000359f8..0ef662fbade0d 100644
+--- a/arch/s390/include/asm/kvm_host.h
++++ b/arch/s390/include/asm/kvm_host.h
+@@ -427,6 +427,7 @@ struct kvm_vcpu_stat {
+       u64 instruction_io_other;
+       u64 instruction_lpsw;
+       u64 instruction_lpswe;
++      u64 instruction_lpswey;
+       u64 instruction_pfmf;
+       u64 instruction_ptff;
+       u64 instruction_sck;
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 348d49268a7ec..e6606ff91921a 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -132,6 +132,7 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+       STATS_DESC_COUNTER(VCPU, instruction_io_other),
+       STATS_DESC_COUNTER(VCPU, instruction_lpsw),
+       STATS_DESC_COUNTER(VCPU, instruction_lpswe),
++      STATS_DESC_COUNTER(VCPU, instruction_lpswey),
+       STATS_DESC_COUNTER(VCPU, instruction_pfmf),
+       STATS_DESC_COUNTER(VCPU, instruction_ptff),
+       STATS_DESC_COUNTER(VCPU, instruction_sck),
+diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
+index 4755492dfabc6..bb8d1a050b669 100644
+--- a/arch/s390/kvm/kvm-s390.h
++++ b/arch/s390/kvm/kvm-s390.h
+@@ -119,6 +119,21 @@ static inline u64 kvm_s390_get_base_disp_s(struct 
kvm_vcpu *vcpu, u8 *ar)
+       return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
+ }
+ 
++static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar)
++{
++      u32 base1 = vcpu->arch.sie_block->ipb >> 28;
++      s64 disp1;
++
++      /* The displacement is a 20bit _SIGNED_ value */
++      disp1 = sign_extend64(((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
++                            ((vcpu->arch.sie_block->ipb & 0xff00) << 4), 19);
++
++      if (ar)
++              *ar = base1;
++
++      return (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
++}
++
+ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
+                                             u64 *address1, u64 *address2,
+                                             u8 *ar_b1, u8 *ar_b2)
+diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
+index 3335fa09b6f1d..9af826d093efc 100644
+--- a/arch/s390/kvm/priv.c
++++ b/arch/s390/kvm/priv.c
+@@ -794,6 +794,36 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
+       return 0;
+ }
+ 
++static int handle_lpswey(struct kvm_vcpu *vcpu)
++{
++      psw_t new_psw;
++      u64 addr;
++      int rc;
++      u8 ar;
++
++      vcpu->stat.instruction_lpswey++;
++
++      if (!test_kvm_facility(vcpu->kvm, 193))
++              return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
++
++      if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
++              return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
++
++      addr = kvm_s390_get_base_disp_siy(vcpu, &ar);
++      if (addr & 7)
++              return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
++
++      rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
++      if (rc)
++              return kvm_s390_inject_prog_cond(vcpu, rc);
++
++      vcpu->arch.sie_block->gpsw = new_psw;
++      if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
++              return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
++
++      return 0;
++}
++
+ static int handle_stidp(struct kvm_vcpu *vcpu)
+ {
+       u64 stidp_data = vcpu->kvm->arch.model.cpuid;
+@@ -1460,6 +1490,8 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
+       case 0x61:
+       case 0x62:
+               return handle_ri(vcpu);
++      case 0x71:
++              return handle_lpswey(vcpu);
+       default:
+               return -EOPNOTSUPP;
+       }
+diff --git a/crypto/aead.c b/crypto/aead.c
+index 16991095270d2..c4ece86c45bc4 100644
+--- a/crypto/aead.c
++++ b/crypto/aead.c
+@@ -35,8 +35,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const 
u8 *key,
+       alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+       memcpy(alignbuffer, key, keylen);
+       ret = crypto_aead_alg(tfm)->setkey(tfm, alignbuffer, keylen);
+-      memset(alignbuffer, 0, keylen);
+-      kfree(buffer);
++      kfree_sensitive(buffer);
+       return ret;
+ }
+ 
+diff --git a/crypto/cipher.c b/crypto/cipher.c
+index b47141ed4a9f3..395f0c2fbb9ff 100644
+--- a/crypto/cipher.c
++++ b/crypto/cipher.c
+@@ -34,8 +34,7 @@ static int setkey_unaligned(struct crypto_cipher *tfm, const 
u8 *key,
+       alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+       memcpy(alignbuffer, key, keylen);
+       ret = cia->cia_setkey(crypto_cipher_tfm(tfm), alignbuffer, keylen);
+-      memset(alignbuffer, 0, keylen);
+-      kfree(buffer);
++      kfree_sensitive(buffer);
+       return ret;
+ 
+ }
+diff --git a/drivers/base/regmap/regmap-i2c.c 
b/drivers/base/regmap/regmap-i2c.c
+index 3ec611dc0c09f..a905e955bbfc7 100644
+--- a/drivers/base/regmap/regmap-i2c.c
++++ b/drivers/base/regmap/regmap-i2c.c
+@@ -350,7 +350,8 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct 
i2c_client *i2c,
+ 
+               if (quirks->max_write_len &&
+                   (bus->max_raw_write == 0 || bus->max_raw_write > 
quirks->max_write_len))
+-                      max_write = quirks->max_write_len;
++                      max_write = quirks->max_write_len -
++                              (config->reg_bits + config->pad_bits) / 
BITS_PER_BYTE;
+ 
+               if (max_read || max_write) {
+                       ret_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
+diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
+index b0264b3df6f3d..206c2a7a5100e 100644
+--- a/drivers/block/null_blk/zoned.c
++++ b/drivers/block/null_blk/zoned.c
+@@ -83,6 +83,17 @@ int null_init_zoned_dev(struct nullb_device *dev, struct 
request_queue *q)
+               return -EINVAL;
+       }
+ 
++      /*
++       * If a smaller zone capacity was requested, do not allow a smaller last
++       * zone at the same time as such zone configuration does not correspond
++       * to any real zoned device.
++       */
++      if (dev->zone_capacity != dev->zone_size &&
++          dev->size & (dev->zone_size - 1)) {
++              pr_err("A smaller last zone is not allowed with zone capacity 
smaller than zone size.\n");
++              return -EINVAL;
++      }
++
+       zone_capacity_sects = mb_to_sects(dev->zone_capacity);
+       dev_capacity_sects = mb_to_sects(dev->size);
+       dev->zone_size_sects = mb_to_sects(dev->zone_size);
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 070014d0fc994..4fddb4666d54e 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -2385,15 +2385,27 @@ static void qca_serdev_shutdown(struct device *dev)
+       struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
+       struct hci_uart *hu = &qcadev->serdev_hu;
+       struct hci_dev *hdev = hu->hdev;
+-      struct qca_data *qca = hu->priv;
+       const u8 ibs_wake_cmd[] = { 0xFD };
+       const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 };
+ 
+       if (qcadev->btsoc_type == QCA_QCA6390) {
+-              if (test_bit(QCA_BT_OFF, &qca->flags) ||
+-                  !test_bit(HCI_RUNNING, &hdev->flags))
++              /* The purpose of sending the VSC is to reset SOC into a initial
++               * state and the state will ensure next hdev->setup() success.
++               * if HCI_QUIRK_NON_PERSISTENT_SETUP is set, it means that
++               * hdev->setup() can do its job regardless of SoC state, so
++               * don't need to send the VSC.
++               * if HCI_SETUP is set, it means that hdev->setup() was never
++               * invoked and the SOC is already in the initial state, so
++               * don't also need to send the VSC.
++               */
++              if (test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks) ||
++                  hci_dev_test_flag(hdev, HCI_SETUP))
+                       return;
+ 
++              /* The serdev must be in open state when conrol logic arrives
++               * here, so also fix the use-after-free issue caused by that
++               * the serdev is flushed or wrote after it is closed.
++               */
+               serdev_device_write_flush(serdev);
+               ret = serdev_device_write_buf(serdev, ibs_wake_cmd,
+                                             sizeof(ibs_wake_cmd));
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index 416f723a2dbb3..8e3eeb96db63e 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -2372,7 +2372,7 @@ static int cdrom_ioctl_timed_media_change(struct 
cdrom_device_info *cdi,
+               return -EFAULT;
+ 
+       tmp_info.media_flags = 0;
+-      if (tmp_info.last_media_change - cdi->last_media_change_ms < 0)
++      if (cdi->last_media_change_ms > tmp_info.last_media_change)
+               tmp_info.media_flags |= MEDIA_CHANGED_FLAG;
+ 
+       tmp_info.last_media_change = cdi->last_media_change_ms;
+diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c 
b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
+index 730c9ae5ea124..50ccd59794464 100644
+--- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
++++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
+@@ -29,6 +29,7 @@ static const struct mtk_gate mfg_clks[] = {
+ static const struct mtk_clk_desc mfg_desc = {
+       .clks = mfg_clks,
+       .num_clks = ARRAY_SIZE(mfg_clks),
++      .need_runtime_pm = true,
+ };
+ 
+ static const struct of_device_id of_match_clk_mt8183_mfg[] = {
+diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
+index fa2c1b1c7dee4..9dbfc11d5c591 100644
+--- a/drivers/clk/mediatek/clk-mtk.c
++++ b/drivers/clk/mediatek/clk-mtk.c
+@@ -472,14 +472,16 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
+       }
+ 
+ 
+-      devm_pm_runtime_enable(&pdev->dev);
+-      /*
+-       * Do a pm_runtime_resume_and_get() to workaround a possible
+-       * deadlock between clk_register() and the genpd framework.
+-       */
+-      r = pm_runtime_resume_and_get(&pdev->dev);
+-      if (r)
+-              return r;
++      if (mcd->need_runtime_pm) {
++              devm_pm_runtime_enable(&pdev->dev);
++              /*
++               * Do a pm_runtime_resume_and_get() to workaround a possible
++               * deadlock between clk_register() and the genpd framework.
++               */
++              r = pm_runtime_resume_and_get(&pdev->dev);
++              if (r)
++                      return r;
++      }
+ 
+       /* Calculate how many clk_hw_onecell_data entries to allocate */
+       num_clks = mcd->num_clks + mcd->num_composite_clks;
+@@ -529,6 +531,14 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
+                       goto unregister_composites;
+       }
+ 
++      if (mcd->clk_notifier_func) {
++              struct clk *mfg_mux = clk_data->hws[mcd->mfg_clk_idx]->clk;
++
++              r = mcd->clk_notifier_func(&pdev->dev, mfg_mux);
++              if (r)
++                      goto unregister_clks;
++      }
++
+       r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+       if (r)
+               goto unregister_clks;
+@@ -542,7 +552,8 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
+                       goto unregister_clks;
+       }
+ 
+-      pm_runtime_put(&pdev->dev);
++      if (mcd->need_runtime_pm)
++              pm_runtime_put(&pdev->dev);
+ 
+       return r;
+ 
+@@ -570,7 +581,8 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
+       if (mcd->shared_io && base)
+               iounmap(base);
+ 
+-      pm_runtime_put(&pdev->dev);
++      if (mcd->need_runtime_pm)
++              pm_runtime_put(&pdev->dev);
+       return r;
+ }
+ EXPORT_SYMBOL_GPL(mtk_clk_simple_probe);
+diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
+index 880b3d6d80119..65c24ab6c9470 100644
+--- a/drivers/clk/mediatek/clk-mtk.h
++++ b/drivers/clk/mediatek/clk-mtk.h
+@@ -207,6 +207,11 @@ struct mtk_clk_desc {
+       const struct mtk_clk_rst_desc *rst_desc;
+       spinlock_t *clk_lock;
+       bool shared_io;
++
++      int (*clk_notifier_func)(struct device *dev, struct clk *clk);
++      unsigned int mfg_clk_idx;
++
++      bool need_runtime_pm;
+ };
+ 
+ int mtk_clk_simple_probe(struct platform_device *pdev);
+diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
+index cf4a7b6e0b23a..0559a33faf00e 100644
+--- a/drivers/clk/qcom/gcc-sm6350.c
++++ b/drivers/clk/qcom/gcc-sm6350.c
+@@ -100,8 +100,8 @@ static struct clk_alpha_pll gpll6 = {
+               .enable_mask = BIT(6),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpll6",
+-                      .parent_hws = (const struct clk_hw*[]){
+-                              &gpll0.clkr.hw,
++                      .parent_data = &(const struct clk_parent_data){
++                              .fw_name = "bi_tcxo",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_alpha_pll_fixed_fabia_ops,
+@@ -124,7 +124,7 @@ static struct clk_alpha_pll_postdiv gpll6_out_even = {
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "gpll6_out_even",
+               .parent_hws = (const struct clk_hw*[]){
+-                      &gpll0.clkr.hw,
++                      &gpll6.clkr.hw,
+               },
+               .num_parents = 1,
+               .ops = &clk_alpha_pll_postdiv_fabia_ops,
+@@ -139,8 +139,8 @@ static struct clk_alpha_pll gpll7 = {
+               .enable_mask = BIT(7),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gpll7",
+-                      .parent_hws = (const struct clk_hw*[]){
+-                              &gpll0.clkr.hw,
++                      .parent_data = &(const struct clk_parent_data){
++                              .fw_name = "bi_tcxo",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_alpha_pll_fixed_fabia_ops,
+diff --git a/drivers/crypto/hisilicon/debugfs.c 
b/drivers/crypto/hisilicon/debugfs.c
+index 13bec8b2d7237..a1d41ee39816b 100644
+--- a/drivers/crypto/hisilicon/debugfs.c
++++ b/drivers/crypto/hisilicon/debugfs.c
+@@ -744,8 +744,14 @@ static void dfx_regs_uninit(struct hisi_qm *qm,
+ {
+       int i;
+ 
++      if (!dregs)
++              return;
++
+       /* Setting the pointer is NULL to prevent double free */
+       for (i = 0; i < reg_len; i++) {
++              if (!dregs[i].regs)
++                      continue;
++
+               kfree(dregs[i].regs);
+               dregs[i].regs = NULL;
+       }
+@@ -795,14 +801,21 @@ static struct dfx_diff_registers *dfx_regs_init(struct 
hisi_qm *qm,
+ static int qm_diff_regs_init(struct hisi_qm *qm,
+               struct dfx_diff_registers *dregs, u32 reg_len)
+ {
++      int ret;
++
+       qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs, 
ARRAY_SIZE(qm_diff_regs));
+-      if (IS_ERR(qm->debug.qm_diff_regs))
+-              return PTR_ERR(qm->debug.qm_diff_regs);
++      if (IS_ERR(qm->debug.qm_diff_regs)) {
++              ret = PTR_ERR(qm->debug.qm_diff_regs);
++              qm->debug.qm_diff_regs = NULL;
++              return ret;
++      }
+ 
+       qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len);
+       if (IS_ERR(qm->debug.acc_diff_regs)) {
+               dfx_regs_uninit(qm, qm->debug.qm_diff_regs, 
ARRAY_SIZE(qm_diff_regs));
+-              return PTR_ERR(qm->debug.acc_diff_regs);
++              ret = PTR_ERR(qm->debug.acc_diff_regs);
++              qm->debug.acc_diff_regs = NULL;
++              return ret;
+       }
+ 
+       return 0;
+@@ -843,7 +856,9 @@ static int qm_last_regs_init(struct hisi_qm *qm)
+ static void qm_diff_regs_uninit(struct hisi_qm *qm, u32 reg_len)
+ {
+       dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len);
++      qm->debug.acc_diff_regs = NULL;
+       dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
++      qm->debug.qm_diff_regs = NULL;
+ }
+ 
+ /**
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index 015c95a825d31..ac2a5d2d47463 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -101,6 +101,17 @@ static void dmi_decode_table(u8 *buf,
+              (data - buf + sizeof(struct dmi_header)) <= dmi_len) {
+               const struct dmi_header *dm = (const struct dmi_header *)data;
+ 
++              /*
++               * If a short entry is found (less than 4 bytes), not only it
++               * is invalid, but we cannot reliably locate the next entry.
++               */
++              if (dm->length < sizeof(struct dmi_header)) {
++                      pr_warn(FW_BUG
++                              "Corrupted DMI table, offset %zd (only %d 
entries processed)\n",
++                              data - buf, i);
++                      break;
++              }
++
+               /*
+                *  We want to know the total length (formatted area and
+                *  strings) before decoding to make sure we won't run off the
+diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c 
b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+index fa6193535d485..7fea4f0f495a3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
++++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+@@ -100,7 +100,7 @@ static int aldebaran_mode2_suspend_ip(struct amdgpu_device 
*adev)
+               adev->ip_blocks[i].status.hw = false;
+       }
+ 
+-      return r;
++      return 0;
+ }
+ 
+ static int
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index b803e785d3aff..e9e0e7328c4e1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -1004,7 +1004,8 @@ void amdgpu_gfx_cp_init_microcode(struct amdgpu_device 
*adev,
+               fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
+               break;
+       default:
+-              break;
++              dev_err(adev->dev, "Invalid ucode id %u\n", ucode_id);
++              return;
+       }
+ 
+       if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+index 9efbc0f7c6bdf..c3da333f09de4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -480,6 +480,14 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
+ 
+       entry.ih = ih;
+       entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
++
++      /*
++       * timestamp is not supported on some legacy SOCs (cik, cz, iceland,
++       * si and tonga), so initialize timestamp and timestamp_src to 0
++       */
++      entry.timestamp = 0;
++      entry.timestamp_src = 0;
++
+       amdgpu_ih_decode_iv(adev, &entry);
+ 
+       trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
+diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c 
b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
+index 81a6d5b94987f..1311e72486fdc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
++++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
+@@ -93,7 +93,7 @@ static int sienna_cichlid_mode2_suspend_ip(struct 
amdgpu_device *adev)
+               adev->ip_blocks[i].status.hw = false;
+       }
+ 
+-      return r;
++      return 0;
+ }
+ 
+ static int
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index e2f80cd0ca8cb..83898e46bcadf 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -2146,6 +2146,9 @@ static struct audio *find_first_free_audio(
+ {
+       int i, available_audio_count;
+ 
++      if (id == ENGINE_ID_UNKNOWN)
++              return NULL;
++
+       available_audio_count = pool->audio_count;
+ 
+       for (i = 0; i < available_audio_count; i++) {
+diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c 
b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+index 44649db5f3e32..5646b7788f02e 100644
+--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
++++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+@@ -211,8 +211,12 @@ bool dce110_vblank_set(struct irq_service *irq_service,
+                                                  info->ext_id);
+       uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK;
+ 
+-      struct timing_generator *tg =
+-                      
dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
++      struct timing_generator *tg;
++
++      if (pipe_offset >= MAX_PIPES)
++              return false;
++
++      tg = dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
+ 
+       if (enable) {
+               if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c 
b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+index f7b5583ee609a..8e9caae7c9559 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+@@ -156,6 +156,10 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
+       uint32_t cur_size = 0;
+       uint32_t data_offset = 0;
+ 
++      if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
++              return MOD_HDCP_STATUS_DDC_FAILURE;
++      }
++
+       if (is_dp_hdcp(hdcp)) {
+               while (buf_len > 0) {
+                       cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+@@ -215,6 +219,10 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
+       uint32_t cur_size = 0;
+       uint32_t data_offset = 0;
+ 
++      if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
++              return MOD_HDCP_STATUS_DDC_FAILURE;
++      }
++
+       if (is_dp_hdcp(hdcp)) {
+               while (buf_len > 0) {
+                       cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h 
b/drivers/gpu/drm/amd/include/atomfirmware.h
+index e2207f1c5bad3..6ea596a8a03df 100644
+--- a/drivers/gpu/drm/amd/include/atomfirmware.h
++++ b/drivers/gpu/drm/amd/include/atomfirmware.h
+@@ -701,7 +701,7 @@ struct atom_gpio_pin_lut_v2_1
+ {
+   struct  atom_common_table_header  table_header;
+   /*the real number of this included in the structure is calcualted by using 
the (whole structure size - the header size)/size of atom_gpio_pin_lut  */
+-  struct  atom_gpio_pin_assignment  gpio_pin[8];
++  struct  atom_gpio_pin_assignment  gpio_pin[];
+ };
+ 
+ 
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c 
b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index aa93129c3397e..426bbee2d9f5e 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -421,6 +421,13 @@ static const struct dmi_system_id orientation_data[] = {
+                 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
+               },
+               .driver_data = (void *)&lcd800x1280_rightside_up,
++      }, {    /* Valve Steam Deck */
++              .matches = {
++                DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"),
++                DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galileo"),
++                DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
++              },
++              .driver_data = (void *)&lcd800x1280_rightside_up,
+       }, {    /* VIOS LTH17 */
+               .matches = {
+                 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
+diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
+index 6cf46b653e810..ca3842f719842 100644
+--- a/drivers/gpu/drm/lima/lima_gp.c
++++ b/drivers/gpu/drm/lima/lima_gp.c
+@@ -324,7 +324,9 @@ int lima_gp_init(struct lima_ip *ip)
+ 
+ void lima_gp_fini(struct lima_ip *ip)
+ {
++      struct lima_device *dev = ip->dev;
+ 
++      devm_free_irq(dev->dev, ip->irq, ip);
+ }
+ 
+ int lima_gp_pipe_init(struct lima_device *dev)
+diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
+index a1ae6c252dc2b..8ca7047adbaca 100644
+--- a/drivers/gpu/drm/lima/lima_mmu.c
++++ b/drivers/gpu/drm/lima/lima_mmu.c
+@@ -118,7 +118,12 @@ int lima_mmu_init(struct lima_ip *ip)
+ 
+ void lima_mmu_fini(struct lima_ip *ip)
+ {
++      struct lima_device *dev = ip->dev;
++
++      if (ip->id == lima_ip_ppmmu_bcast)
++              return;
+ 
++      devm_free_irq(dev->dev, ip->irq, ip);
+ }
+ 
+ void lima_mmu_flush_tlb(struct lima_ip *ip)
+diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
+index 54b208a4a768e..d34c9e8840f45 100644
+--- a/drivers/gpu/drm/lima/lima_pp.c
++++ b/drivers/gpu/drm/lima/lima_pp.c
+@@ -266,7 +266,9 @@ int lima_pp_init(struct lima_ip *ip)
+ 
+ void lima_pp_fini(struct lima_ip *ip)
+ {
++      struct lima_device *dev = ip->dev;
+ 
++      devm_free_irq(dev->dev, ip->irq, ip);
+ }
+ 
+ int lima_pp_bcast_resume(struct lima_ip *ip)
+@@ -299,7 +301,9 @@ int lima_pp_bcast_init(struct lima_ip *ip)
+ 
+ void lima_pp_bcast_fini(struct lima_ip *ip)
+ {
++      struct lima_device *dev = ip->dev;
+ 
++      devm_free_irq(dev->dev, ip->irq, ip);
+ }
+ 
+ static int lima_pp_task_validate(struct lima_sched_pipe *pipe,
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c 
b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index d6dd79541f6a9..bdf5262ebd35e 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -980,6 +980,9 @@ nouveau_connector_get_modes(struct drm_connector 
*connector)
+               struct drm_display_mode *mode;
+ 
+               mode = drm_mode_duplicate(dev, nv_connector->native_mode);
++              if (!mode)
++                      return 0;
++
+               drm_mode_probed_add(connector, mode);
+               ret = 1;
+       }
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 1c970842624ba..208c7741bc681 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -1057,7 +1057,7 @@ static const struct pci_device_id i801_ids[] = {
+ MODULE_DEVICE_TABLE(pci, i801_ids);
+ 
+ #if defined CONFIG_X86 && defined CONFIG_DMI
+-static unsigned char apanel_addr;
++static unsigned char apanel_addr __ro_after_init;
+ 
+ /* Scan the system ROM for the signature "FJKEYINF" */
+ static __init const void __iomem *bios_signature(const void __iomem *bios)
+diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
+index 50f21cdbe90d3..d2c09b0fdf527 100644
+--- a/drivers/i2c/busses/i2c-pnx.c
++++ b/drivers/i2c/busses/i2c-pnx.c
+@@ -15,7 +15,6 @@
+ #include <linux/ioport.h>
+ #include <linux/delay.h>
+ #include <linux/i2c.h>
+-#include <linux/timer.h>
+ #include <linux/completion.h>
+ #include <linux/platform_device.h>
+ #include <linux/io.h>
+@@ -32,7 +31,6 @@ struct i2c_pnx_mif {
+       int                     ret;            /* Return value */
+       int                     mode;           /* Interface mode */
+       struct completion       complete;       /* I/O completion */
+-      struct timer_list       timer;          /* Timeout */
+       u8 *                    buf;            /* Data buffer */
+       int                     len;            /* Length of data buffer */
+       int                     order;          /* RX Bytes to order via TX */
+@@ -117,24 +115,6 @@ static inline int wait_reset(struct i2c_pnx_algo_data 
*data)
+       return (timeout <= 0);
+ }
+ 
+-static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data)
+-{
+-      struct timer_list *timer = &alg_data->mif.timer;
+-      unsigned long expires = msecs_to_jiffies(alg_data->timeout);
+-
+-      if (expires <= 1)
+-              expires = 2;
+-
+-      del_timer_sync(timer);
+-
+-      dev_dbg(&alg_data->adapter.dev, "Timer armed at %lu plus %lu 
jiffies.\n",
+-              jiffies, expires);
+-
+-      timer->expires = jiffies + expires;
+-
+-      add_timer(timer);
+-}
+-
+ /**
+  * i2c_pnx_start - start a device
+  * @slave_addr:               slave address
+@@ -259,8 +239,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data 
*alg_data)
+                               ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
+                                 I2C_REG_CTL(alg_data));
+ 
+-                      del_timer_sync(&alg_data->mif.timer);
+-
+                       dev_dbg(&alg_data->adapter.dev,
+                               "%s(): Waking up xfer routine.\n",
+                               __func__);
+@@ -276,8 +254,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data 
*alg_data)
+                       ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
+                         I2C_REG_CTL(alg_data));
+ 
+-              /* Stop timer. */
+-              del_timer_sync(&alg_data->mif.timer);
+               dev_dbg(&alg_data->adapter.dev,
+                       "%s(): Waking up xfer routine after zero-xfer.\n",
+                       __func__);
+@@ -364,8 +340,6 @@ static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data 
*alg_data)
+                                mcntrl_drmie | mcntrl_daie);
+                       iowrite32(ctl, I2C_REG_CTL(alg_data));
+ 
+-                      /* Kill timer. */
+-                      del_timer_sync(&alg_data->mif.timer);
+                       complete(&alg_data->mif.complete);
+               }
+       }
+@@ -400,8 +374,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
+                        mcntrl_drmie);
+               iowrite32(ctl, I2C_REG_CTL(alg_data));
+ 
+-              /* Stop timer, to prevent timeout. */
+-              del_timer_sync(&alg_data->mif.timer);
+               complete(&alg_data->mif.complete);
+       } else if (stat & mstatus_nai) {
+               /* Slave did not acknowledge, generate a STOP */
+@@ -419,8 +391,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
+               /* Our return value. */
+               alg_data->mif.ret = -EIO;
+ 
+-              /* Stop timer, to prevent timeout. */
+-              del_timer_sync(&alg_data->mif.timer);
+               complete(&alg_data->mif.complete);
+       } else {
+               /*
+@@ -453,9 +423,8 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
+       return IRQ_HANDLED;
+ }
+ 
+-static void i2c_pnx_timeout(struct timer_list *t)
++static void i2c_pnx_timeout(struct i2c_pnx_algo_data *alg_data)
+ {
+-      struct i2c_pnx_algo_data *alg_data = from_timer(alg_data, t, mif.timer);
+       u32 ctl;
+ 
+       dev_err(&alg_data->adapter.dev,
+@@ -472,7 +441,6 @@ static void i2c_pnx_timeout(struct timer_list *t)
+       iowrite32(ctl, I2C_REG_CTL(alg_data));
+       wait_reset(alg_data);
+       alg_data->mif.ret = -EIO;
+-      complete(&alg_data->mif.complete);
+ }
+ 
+ static inline void bus_reset_if_active(struct i2c_pnx_algo_data *alg_data)
+@@ -514,6 +482,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg 
*msgs, int num)
+       struct i2c_msg *pmsg;
+       int rc = 0, completed = 0, i;
+       struct i2c_pnx_algo_data *alg_data = adap->algo_data;
++      unsigned long time_left;
+       u32 stat;
+ 
+       dev_dbg(&alg_data->adapter.dev,
+@@ -548,7 +517,6 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg 
*msgs, int num)
+               dev_dbg(&alg_data->adapter.dev, "%s(): mode %d, %d bytes\n",
+                       __func__, alg_data->mif.mode, alg_data->mif.len);
+ 
+-              i2c_pnx_arm_timer(alg_data);
+ 
+               /* initialize the completion var */
+               init_completion(&alg_data->mif.complete);
+@@ -564,7 +532,10 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg 
*msgs, int num)
+                       break;
+ 
+               /* Wait for completion */
+-              wait_for_completion(&alg_data->mif.complete);
++              time_left = wait_for_completion_timeout(&alg_data->mif.complete,
++                                                      alg_data->timeout);
++              if (time_left == 0)
++                      i2c_pnx_timeout(alg_data);
+ 
+               if (!(rc = alg_data->mif.ret))
+                       completed++;
+@@ -657,7 +628,10 @@ static int i2c_pnx_probe(struct platform_device *pdev)
+       alg_data->adapter.algo_data = alg_data;
+       alg_data->adapter.nr = pdev->id;
+ 
+-      alg_data->timeout = I2C_PNX_TIMEOUT_DEFAULT;
++      alg_data->timeout = msecs_to_jiffies(I2C_PNX_TIMEOUT_DEFAULT);
++      if (alg_data->timeout <= 1)
++              alg_data->timeout = 2;
++
+ #ifdef CONFIG_OF
+       alg_data->adapter.dev.of_node = of_node_get(pdev->dev.of_node);
+       if (pdev->dev.of_node) {
+@@ -677,8 +651,6 @@ static int i2c_pnx_probe(struct platform_device *pdev)
+       if (IS_ERR(alg_data->clk))
+               return PTR_ERR(alg_data->clk);
+ 
+-      timer_setup(&alg_data->mif.timer, i2c_pnx_timeout, 0);
+-
+       snprintf(alg_data->adapter.name, sizeof(alg_data->adapter.name),
+                "%s", pdev->name);
+ 
+diff --git a/drivers/infiniband/core/user_mad.c 
b/drivers/infiniband/core/user_mad.c
+index 5c284dfbe6923..66a0c5a73b832 100644
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -63,6 +63,8 @@ MODULE_AUTHOR("Roland Dreier");
+ MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
+ MODULE_LICENSE("Dual BSD/GPL");
+ 
++#define MAX_UMAD_RECV_LIST_SIZE 200000
++
+ enum {
+       IB_UMAD_MAX_PORTS  = RDMA_MAX_PORTS,
+       IB_UMAD_MAX_AGENTS = 32,
+@@ -113,6 +115,7 @@ struct ib_umad_file {
+       struct mutex            mutex;
+       struct ib_umad_port    *port;
+       struct list_head        recv_list;
++      atomic_t                recv_list_size;
+       struct list_head        send_list;
+       struct list_head        port_list;
+       spinlock_t              send_lock;
+@@ -180,24 +183,28 @@ static struct ib_mad_agent *__get_agent(struct 
ib_umad_file *file, int id)
+       return file->agents_dead ? NULL : file->agent[id];
+ }
+ 
+-static int queue_packet(struct ib_umad_file *file,
+-                      struct ib_mad_agent *agent,
+-                      struct ib_umad_packet *packet)
++static int queue_packet(struct ib_umad_file *file, struct ib_mad_agent *agent,
++                      struct ib_umad_packet *packet, bool is_recv_mad)
+ {
+       int ret = 1;
+ 
+       mutex_lock(&file->mutex);
+ 
++      if (is_recv_mad &&
++          atomic_read(&file->recv_list_size) > MAX_UMAD_RECV_LIST_SIZE)
++              goto unlock;
++
+       for (packet->mad.hdr.id = 0;
+            packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
+            packet->mad.hdr.id++)
+               if (agent == __get_agent(file, packet->mad.hdr.id)) {
+                       list_add_tail(&packet->list, &file->recv_list);
++                      atomic_inc(&file->recv_list_size);
+                       wake_up_interruptible(&file->recv_wait);
+                       ret = 0;
+                       break;
+               }
+-
++unlock:
+       mutex_unlock(&file->mutex);
+ 
+       return ret;
+@@ -224,7 +231,7 @@ static void send_handler(struct ib_mad_agent *agent,
+       if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
+               packet->length = IB_MGMT_MAD_HDR;
+               packet->mad.hdr.status = ETIMEDOUT;
+-              if (!queue_packet(file, agent, packet))
++              if (!queue_packet(file, agent, packet, false))
+                       return;
+       }
+       kfree(packet);
+@@ -284,7 +291,7 @@ static void recv_handler(struct ib_mad_agent *agent,
+               rdma_destroy_ah_attr(&ah_attr);
+       }
+ 
+-      if (queue_packet(file, agent, packet))
++      if (queue_packet(file, agent, packet, true))
+               goto err2;
+       return;
+ 
+@@ -409,6 +416,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user 
*buf,
+ 
+       packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
+       list_del(&packet->list);
++      atomic_dec(&file->recv_list_size);
+ 
+       mutex_unlock(&file->mutex);
+ 
+@@ -421,6 +429,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user 
*buf,
+               /* Requeue packet */
+               mutex_lock(&file->mutex);
+               list_add(&packet->list, &file->recv_list);
++              atomic_inc(&file->recv_list_size);
+               mutex_unlock(&file->mutex);
+       } else {
+               if (packet->recv_wc)
+diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
+index 16231fe080b00..609a5f01761bd 100644
+--- a/drivers/input/ff-core.c
++++ b/drivers/input/ff-core.c
+@@ -9,8 +9,10 @@
+ /* #define DEBUG */
+ 
+ #include <linux/input.h>
++#include <linux/limits.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
++#include <linux/overflow.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ 
+@@ -315,9 +317,8 @@ int input_ff_create(struct input_dev *dev, unsigned int 
max_effects)
+               return -EINVAL;
+       }
+ 
+-      ff_dev_size = sizeof(struct ff_device) +
+-                              max_effects * sizeof(struct file *);
+-      if (ff_dev_size < max_effects) /* overflow */
++      ff_dev_size = struct_size(ff, effect_owners, max_effects);
++      if (ff_dev_size == SIZE_MAX) /* overflow */
+               return -EINVAL;
+ 
+       ff = kzalloc(ff_dev_size, GFP_KERNEL);
+diff --git a/drivers/media/dvb-frontends/as102_fe_types.h 
b/drivers/media/dvb-frontends/as102_fe_types.h
+index 297f9520ebf9d..8a4e392c88965 100644
+--- a/drivers/media/dvb-frontends/as102_fe_types.h
++++ b/drivers/media/dvb-frontends/as102_fe_types.h
+@@ -174,6 +174,6 @@ struct as10x_register_addr {
+       uint32_t addr;
+       /* register mode access */
+       uint8_t mode;
+-};
++} __packed;
+ 
+ #endif
+diff --git a/drivers/media/dvb-frontends/tda10048.c 
b/drivers/media/dvb-frontends/tda10048.c
+index f6d8a64762b99..907e786c5e10b 100644
+--- a/drivers/media/dvb-frontends/tda10048.c
++++ b/drivers/media/dvb-frontends/tda10048.c
+@@ -410,6 +410,7 @@ static int tda10048_set_if(struct dvb_frontend *fe, u32 bw)
+       struct tda10048_config *config = &state->config;
+       int i;
+       u32 if_freq_khz;
++      u64 sample_freq;
+ 
+       dprintk(1, "%s(bw = %d)\n", __func__, bw);
+ 
+@@ -451,9 +452,11 @@ static int tda10048_set_if(struct dvb_frontend *fe, u32 
bw)
+       dprintk(1, "- pll_pfactor = %d\n", state->pll_pfactor);
+ 
+       /* Calculate the sample frequency */
+-      state->sample_freq = state->xtal_hz * (state->pll_mfactor + 45);
+-      state->sample_freq /= (state->pll_nfactor + 1);
+-      state->sample_freq /= (state->pll_pfactor + 4);
++      sample_freq = state->xtal_hz;
++      sample_freq *= state->pll_mfactor + 45;
++      do_div(sample_freq, state->pll_nfactor + 1);
++      do_div(sample_freq, state->pll_pfactor + 4);
++      state->sample_freq = sample_freq;
+       dprintk(1, "- sample_freq = %d\n", state->sample_freq);
+ 
+       /* Update the I/F */
+diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c 
b/drivers/media/dvb-frontends/tda18271c2dd.c
+index a348344879433..fd928787207ed 100644
+--- a/drivers/media/dvb-frontends/tda18271c2dd.c
++++ b/drivers/media/dvb-frontends/tda18271c2dd.c
+@@ -328,7 +328,7 @@ static int CalcMainPLL(struct tda_state *state, u32 freq)
+ 
+       OscFreq = (u64) freq * (u64) Div;
+       OscFreq *= (u64) 16384;
+-      do_div(OscFreq, (u64)16000000);
++      do_div(OscFreq, 16000000);
+       MainDiv = OscFreq;
+ 
+       state->m_Regs[MPD] = PostDiv & 0x77;
+@@ -352,7 +352,7 @@ static int CalcCalPLL(struct tda_state *state, u32 freq)
+       OscFreq = (u64)freq * (u64)Div;
+       /* CalDiv = u32( OscFreq * 16384 / 16000000 ); */
+       OscFreq *= (u64)16384;
+-      do_div(OscFreq, (u64)16000000);
++      do_div(OscFreq, 16000000);
+       CalDiv = OscFreq;
+ 
+       state->m_Regs[CPD] = PostDiv;
+diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c 
b/drivers/media/usb/dvb-usb/dib0700_devices.c
+index 7f8bebfa3e8e9..3d14aba7a0dea 100644
+--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
++++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
+@@ -2419,7 +2419,12 @@ static int stk9090m_frontend_attach(struct 
dvb_usb_adapter *adap)
+ 
+       adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 
0x80, &stk9090m_config);
+ 
+-      return adap->fe_adap[0].fe == NULL ?  -ENODEV : 0;
++      if (!adap->fe_adap[0].fe) {
++              release_firmware(state->frontend_firmware);
++              return -ENODEV;
++      }
++
++      return 0;
+ }
+ 
+ static int dib9090_tuner_attach(struct dvb_usb_adapter *adap)
+@@ -2492,8 +2497,10 @@ static int nim9090md_frontend_attach(struct 
dvb_usb_adapter *adap)
+       dib9000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, 0x80);
+       adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 
0x80, &nim9090md_config[0]);
+ 
+-      if (adap->fe_adap[0].fe == NULL)
++      if (!adap->fe_adap[0].fe) {
++              release_firmware(state->frontend_firmware);
+               return -ENODEV;
++      }
+ 
+       i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, 
DIBX000_I2C_INTERFACE_GPIO_3_4, 0);
+       dib9000_i2c_enumeration(i2c, 1, 0x12, 0x82);
+@@ -2501,7 +2508,12 @@ static int nim9090md_frontend_attach(struct 
dvb_usb_adapter *adap)
+       fe_slave = dvb_attach(dib9000_attach, i2c, 0x82, &nim9090md_config[1]);
+       dib9000_set_slave_frontend(adap->fe_adap[0].fe, fe_slave);
+ 
+-      return fe_slave == NULL ?  -ENODEV : 0;
++      if (!fe_slave) {
++              release_firmware(state->frontend_firmware);
++              return -ENODEV;
++      }
++
++      return 0;
+ }
+ 
+ static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap)
+diff --git a/drivers/media/usb/dvb-usb/dw2102.c 
b/drivers/media/usb/dvb-usb/dw2102.c
+index 356fc728d59a8..5ff08dd04b5b3 100644
+--- a/drivers/media/usb/dvb-usb/dw2102.c
++++ b/drivers/media/usb/dvb-usb/dw2102.c
+@@ -716,6 +716,7 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, 
struct i2c_msg msg[],
+ {
+       struct dvb_usb_device *d = i2c_get_adapdata(adap);
+       struct dw2102_state *state;
++      int j;
+ 
+       if (!d)
+               return -ENODEV;
+@@ -729,11 +730,11 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, 
struct i2c_msg msg[],
+               return -EAGAIN;
+       }
+ 
+-      switch (num) {
+-      case 1:
+-              switch (msg[0].addr) {
++      j = 0;
++      while (j < num) {
++              switch (msg[j].addr) {
+               case SU3000_STREAM_CTRL:
+-                      state->data[0] = msg[0].buf[0] + 0x36;
++                      state->data[0] = msg[j].buf[0] + 0x36;
+                       state->data[1] = 3;
+                       state->data[2] = 0;
+                       if (dvb_usb_generic_rw(d, state->data, 3,
+@@ -745,61 +746,86 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, 
struct i2c_msg msg[],
+                       if (dvb_usb_generic_rw(d, state->data, 1,
+                                       state->data, 2, 0) < 0)
+                               err("i2c transfer failed.");
+-                      msg[0].buf[1] = state->data[0];
+-                      msg[0].buf[0] = state->data[1];
++                      msg[j].buf[1] = state->data[0];
++                      msg[j].buf[0] = state->data[1];
+                       break;
+               default:
+-                      if (3 + msg[0].len > sizeof(state->data)) {
+-                              warn("i2c wr: len=%d is too big!\n",
+-                                   msg[0].len);
++                      /* if the current write msg is followed by a another
++                       * read msg to/from the same address
++                       */
++                      if ((j+1 < num) && (msg[j+1].flags & I2C_M_RD) &&
++                          (msg[j].addr == msg[j+1].addr)) {
++                              /* join both i2c msgs to one usb read command */
++                              if (4 + msg[j].len > sizeof(state->data)) {
++                                      warn("i2c combined wr/rd: write len=%d 
is too big!\n",
++                                          msg[j].len);
++                                      num = -EOPNOTSUPP;
++                                      break;
++                              }
++                              if (1 + msg[j+1].len > sizeof(state->data)) {
++                                      warn("i2c combined wr/rd: read len=%d 
is too big!\n",
++                                          msg[j+1].len);
++                                      num = -EOPNOTSUPP;
++                                      break;
++                              }
++
++                              state->data[0] = 0x09;
++                              state->data[1] = msg[j].len;
++                              state->data[2] = msg[j+1].len;
++                              state->data[3] = msg[j].addr;
++                              memcpy(&state->data[4], msg[j].buf, msg[j].len);
++
++                              if (dvb_usb_generic_rw(d, state->data, 
msg[j].len + 4,
++                                      state->data, msg[j+1].len + 1, 0) < 0)
++                                      err("i2c transfer failed.");
++
++                              memcpy(msg[j+1].buf, &state->data[1], 
msg[j+1].len);
++                              j++;
++                              break;
++                      }
++
++                      if (msg[j].flags & I2C_M_RD) {
++                              /* single read */
++                              if (4 + msg[j].len > sizeof(state->data)) {
++                                      warn("i2c rd: len=%d is too big!\n", 
msg[j].len);
++                                      num = -EOPNOTSUPP;
++                                      break;
++                              }
++
++                              state->data[0] = 0x09;
++                              state->data[1] = 0;
++                              state->data[2] = msg[j].len;
++                              state->data[3] = msg[j].addr;
++                              memcpy(&state->data[4], msg[j].buf, msg[j].len);
++
++                              if (dvb_usb_generic_rw(d, state->data, 4,
++                                      state->data, msg[j].len + 1, 0) < 0)
++                                      err("i2c transfer failed.");
++
++                              memcpy(msg[j].buf, &state->data[1], msg[j].len);
++                              break;
++                      }
++
++                      /* single write */
++                      if (3 + msg[j].len > sizeof(state->data)) {
++                              warn("i2c wr: len=%d is too big!\n", 
msg[j].len);
+                               num = -EOPNOTSUPP;
+                               break;
+                       }
+ 
+-                      /* always i2c write*/
+                       state->data[0] = 0x08;
+-                      state->data[1] = msg[0].addr;
+-                      state->data[2] = msg[0].len;
++                      state->data[1] = msg[j].addr;
++                      state->data[2] = msg[j].len;
+ 
+-                      memcpy(&state->data[3], msg[0].buf, msg[0].len);
++                      memcpy(&state->data[3], msg[j].buf, msg[j].len);
+ 
+-                      if (dvb_usb_generic_rw(d, state->data, msg[0].len + 3,
++                      if (dvb_usb_generic_rw(d, state->data, msg[j].len + 3,
+                                               state->data, 1, 0) < 0)
+                               err("i2c transfer failed.");
++              } // switch
++              j++;
+ 
+-              }
+-              break;
+-      case 2:
+-              /* always i2c read */
+-              if (4 + msg[0].len > sizeof(state->data)) {
+-                      warn("i2c rd: len=%d is too big!\n",
+-                           msg[0].len);
+-                      num = -EOPNOTSUPP;
+-                      break;
+-              }
+-              if (1 + msg[1].len > sizeof(state->data)) {
+-                      warn("i2c rd: len=%d is too big!\n",
+-                           msg[1].len);
+-                      num = -EOPNOTSUPP;
+-                      break;
+-              }
+-
+-              state->data[0] = 0x09;
+-              state->data[1] = msg[0].len;
+-              state->data[2] = msg[1].len;
+-              state->data[3] = msg[0].addr;
+-              memcpy(&state->data[4], msg[0].buf, msg[0].len);
+-
+-              if (dvb_usb_generic_rw(d, state->data, msg[0].len + 4,
+-                                      state->data, msg[1].len + 1, 0) < 0)
+-                      err("i2c transfer failed.");
+-
+-              memcpy(msg[1].buf, &state->data[1], msg[1].len);
+-              break;
+-      default:
+-              warn("more than 2 i2c messages at a time is not handled yet.");
+-              break;
+-      }
++      } // while
+       mutex_unlock(&d->data_mutex);
+       mutex_unlock(&d->i2c_mutex);
+       return num;
+diff --git a/drivers/media/usb/s2255/s2255drv.c 
b/drivers/media/usb/s2255/s2255drv.c
+index acf18e2251a52..6c9870541c53d 100644
+--- a/drivers/media/usb/s2255/s2255drv.c
++++ b/drivers/media/usb/s2255/s2255drv.c
+@@ -247,7 +247,7 @@ struct s2255_vc {
+ struct s2255_dev {
+       struct s2255_vc         vc[MAX_CHANNELS];
+       struct v4l2_device      v4l2_dev;
+-      atomic_t                num_channels;
++      refcount_t              num_channels;
+       int                     frames;
+       struct mutex            lock;   /* channels[].vdev.lock */
+       struct mutex            cmdlock; /* protects cmdbuf */
+@@ -1550,11 +1550,11 @@ static void s2255_video_device_release(struct 
video_device *vdev)
+               container_of(vdev, struct s2255_vc, vdev);
+ 
+       dprintk(dev, 4, "%s, chnls: %d\n", __func__,
+-              atomic_read(&dev->num_channels));
++              refcount_read(&dev->num_channels));
+ 
+       v4l2_ctrl_handler_free(&vc->hdl);
+ 
+-      if (atomic_dec_and_test(&dev->num_channels))
++      if (refcount_dec_and_test(&dev->num_channels))
+               s2255_destroy(dev);
+       return;
+ }
+@@ -1659,7 +1659,7 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
+                               "failed to register video device!\n");
+                       break;
+               }
+-              atomic_inc(&dev->num_channels);
++              refcount_inc(&dev->num_channels);
+               v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
+                         video_device_node_name(&vc->vdev));
+ 
+@@ -1667,11 +1667,11 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
+       pr_info("Sensoray 2255 V4L driver Revision: %s\n",
+               S2255_VERSION);
+       /* if no channels registered, return error and probe will fail*/
+-      if (atomic_read(&dev->num_channels) == 0) {
++      if (refcount_read(&dev->num_channels) == 0) {
+               v4l2_device_unregister(&dev->v4l2_dev);
+               return ret;
+       }
+-      if (atomic_read(&dev->num_channels) != MAX_CHANNELS)
++      if (refcount_read(&dev->num_channels) != MAX_CHANNELS)
+               pr_warn("s2255: Not all channels available.\n");
+       return 0;
+ }
+@@ -2220,7 +2220,7 @@ static int s2255_probe(struct usb_interface *interface,
+               goto errorFWDATA1;
+       }
+ 
+-      atomic_set(&dev->num_channels, 0);
++      refcount_set(&dev->num_channels, 0);
+       dev->pid = id->idProduct;
+       dev->fw_data = kzalloc(sizeof(struct s2255_fw), GFP_KERNEL);
+       if (!dev->fw_data)
+@@ -2340,12 +2340,12 @@ static void s2255_disconnect(struct usb_interface 
*interface)
+ {
+       struct s2255_dev *dev = to_s2255_dev(usb_get_intfdata(interface));
+       int i;
+-      int channels = atomic_read(&dev->num_channels);
++      int channels = refcount_read(&dev->num_channels);
+       mutex_lock(&dev->lock);
+       v4l2_device_disconnect(&dev->v4l2_dev);
+       mutex_unlock(&dev->lock);
+       /*see comments in the uvc_driver.c usb disconnect function */
+-      atomic_inc(&dev->num_channels);
++      refcount_inc(&dev->num_channels);
+       /* unregister each video device. */
+       for (i = 0; i < channels; i++)
+               video_unregister_device(&dev->vc[i].vdev);
+@@ -2358,7 +2358,7 @@ static void s2255_disconnect(struct usb_interface 
*interface)
+               dev->vc[i].vidstatus_ready = 1;
+               wake_up(&dev->vc[i].wait_vidstatus);
+       }
+-      if (atomic_dec_and_test(&dev->num_channels))
++      if (refcount_dec_and_test(&dev->num_channels))
+               s2255_destroy(dev);
+       dev_info(&interface->dev, "%s\n", __func__);
+ }
+diff --git a/drivers/mtd/nand/raw/nand_base.c 
b/drivers/mtd/nand/raw/nand_base.c
+index c3cc660399255..ea7e37a6e4c07 100644
+--- a/drivers/mtd/nand/raw/nand_base.c
++++ b/drivers/mtd/nand/raw/nand_base.c
+@@ -1090,28 +1090,32 @@ static int nand_fill_column_cycles(struct nand_chip 
*chip, u8 *addrs,
+                                  unsigned int offset_in_page)
+ {
+       struct mtd_info *mtd = nand_to_mtd(chip);
++      bool ident_stage = !mtd->writesize;
+ 
+-      /* Make sure the offset is less than the actual page size. */
+-      if (offset_in_page > mtd->writesize + mtd->oobsize)
+-              return -EINVAL;
++      /* Bypass all checks during NAND identification */
++      if (likely(!ident_stage)) {
++              /* Make sure the offset is less than the actual page size. */
++              if (offset_in_page > mtd->writesize + mtd->oobsize)
++                      return -EINVAL;
+ 
+-      /*
+-       * On small page NANDs, there's a dedicated command to access the OOB
+-       * area, and the column address is relative to the start of the OOB
+-       * area, not the start of the page. Asjust the address accordingly.
+-       */
+-      if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
+-              offset_in_page -= mtd->writesize;
++              /*
++               * On small page NANDs, there's a dedicated command to access 
the OOB
++               * area, and the column address is relative to the start of the 
OOB
++               * area, not the start of the page. Asjust the address 
accordingly.
++               */
++              if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
++                      offset_in_page -= mtd->writesize;
+ 
+-      /*
+-       * The offset in page is expressed in bytes, if the NAND bus is 16-bit
+-       * wide, then it must be divided by 2.
+-       */
+-      if (chip->options & NAND_BUSWIDTH_16) {
+-              if (WARN_ON(offset_in_page % 2))
+-                      return -EINVAL;
++              /*
++               * The offset in page is expressed in bytes, if the NAND bus is 
16-bit
++               * wide, then it must be divided by 2.
++               */
++              if (chip->options & NAND_BUSWIDTH_16) {
++                      if (WARN_ON(offset_in_page % 2))
++                              return -EINVAL;
+ 
+-              offset_in_page /= 2;
++                      offset_in_page /= 2;
++              }
+       }
+ 
+       addrs[0] = offset_in_page;
+@@ -1120,7 +1124,7 @@ static int nand_fill_column_cycles(struct nand_chip 
*chip, u8 *addrs,
+        * Small page NANDs use 1 cycle for the columns, while large page NANDs
+        * need 2
+        */
+-      if (mtd->writesize <= 512)
++      if (!ident_stage && mtd->writesize <= 512)
+               return 1;
+ 
+       addrs[1] = offset_in_page >> 8;
+@@ -1316,16 +1320,19 @@ int nand_change_read_column_op(struct nand_chip *chip,
+                              unsigned int len, bool force_8bit)
+ {
+       struct mtd_info *mtd = nand_to_mtd(chip);
++      bool ident_stage = !mtd->writesize;
+ 
+       if (len && !buf)
+               return -EINVAL;
+ 
+-      if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+-              return -EINVAL;
++      if (!ident_stage) {
++              if (offset_in_page + len > mtd->writesize + mtd->oobsize)
++                      return -EINVAL;
+ 
+-      /* Small page NANDs do not support column change. */
+-      if (mtd->writesize <= 512)
+-              return -ENOTSUPP;
++              /* Small page NANDs do not support column change. */
++              if (mtd->writesize <= 512)
++                      return -ENOTSUPP;
++      }
+ 
+       if (nand_has_exec_op(chip)) {
+               const struct nand_interface_config *conf =
+@@ -6062,6 +6069,7 @@ static const struct nand_ops rawnand_ops = {
+ static int nand_scan_tail(struct nand_chip *chip)
+ {
+       struct mtd_info *mtd = nand_to_mtd(chip);
++      struct nand_device *base = &chip->base;
+       struct nand_ecc_ctrl *ecc = &chip->ecc;
+       int ret, i;
+ 
+@@ -6206,9 +6214,13 @@ static int nand_scan_tail(struct nand_chip *chip)
+       if (!ecc->write_oob_raw)
+               ecc->write_oob_raw = ecc->write_oob;
+ 
+-      /* propagate ecc info to mtd_info */
++      /* Propagate ECC info to the generic NAND and MTD layers */
+       mtd->ecc_strength = ecc->strength;
++      if (!base->ecc.ctx.conf.strength)
++              base->ecc.ctx.conf.strength = ecc->strength;
+       mtd->ecc_step_size = ecc->size;
++      if (!base->ecc.ctx.conf.step_size)
++              base->ecc.ctx.conf.step_size = ecc->size;
+ 
+       /*
+        * Set the number of read / write steps for one page depending on ECC
+@@ -6216,6 +6228,8 @@ static int nand_scan_tail(struct nand_chip *chip)
+        */
+       if (!ecc->steps)
+               ecc->steps = mtd->writesize / ecc->size;
++      if (!base->ecc.ctx.nsteps)
++              base->ecc.ctx.nsteps = ecc->steps;
+       if (ecc->steps * ecc->size != mtd->writesize) {
+               WARN(1, "Invalid ECC parameters\n");
+               ret = -EINVAL;
+diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c 
b/drivers/mtd/nand/raw/rockchip-nand-controller.c
+index c9c4e9ffcae18..d8456b849c13d 100644
+--- a/drivers/mtd/nand/raw/rockchip-nand-controller.c
++++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
+@@ -421,13 +421,13 @@ static int rk_nfc_setup_interface(struct nand_chip 
*chip, int target,
+       u32 rate, tc2rw, trwpw, trw2c;
+       u32 temp;
+ 
+-      if (target < 0)
+-              return 0;
+-
+       timings = nand_get_sdr_timings(conf);
+       if (IS_ERR(timings))
+               return -EOPNOTSUPP;
+ 
++      if (target < 0)
++              return 0;
++
+       if (IS_ERR(nfc->nfc_clk))
+               rate = clk_get_rate(nfc->ahb_clk);
+       else
+diff --git a/drivers/net/bonding/bond_options.c 
b/drivers/net/bonding/bond_options.c
+index 5bb2c098bf4df..685fb4703ee1f 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -1210,9 +1210,9 @@ static int bond_option_arp_ip_targets_set(struct bonding 
*bond,
+       __be32 target;
+ 
+       if (newval->string) {
+-              if (!in4_pton(newval->string+1, -1, (u8 *)&target, -1, NULL)) {
+-                      netdev_err(bond->dev, "invalid ARP target %pI4 
specified\n",
+-                                 &target);
++              if (strlen(newval->string) < 1 ||
++                  !in4_pton(newval->string + 1, -1, (u8 *)&target, -1, NULL)) 
{
++                      netdev_err(bond->dev, "invalid ARP target specified\n");
+                       return ret;
+               }
+               if (newval->string[0] == '+')
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c 
b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+index 3a2bfaad14065..5136d1e161181 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+@@ -124,6 +124,7 @@ static const struct kvaser_usb_driver_info 
kvaser_usb_driver_info_leaf_err_liste
+ 
+ static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = {
+       .quirks = 0,
++      .family = KVASER_LEAF,
+       .ops = &kvaser_usb_leaf_dev_ops,
+ };
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c 
b/drivers/net/dsa/mv88e6xxx/chip.c
+index dc4ff8a6d0bf5..4938550a67c02 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -131,8 +131,8 @@ struct mii_bus *mv88e6xxx_default_mdio_bus(struct 
mv88e6xxx_chip *chip)
+ {
+       struct mv88e6xxx_mdio_bus *mdio_bus;
+ 
+-      mdio_bus = list_first_entry(&chip->mdios, struct mv88e6xxx_mdio_bus,
+-                                  list);
++      mdio_bus = list_first_entry_or_null(&chip->mdios,
++                                          struct mv88e6xxx_mdio_bus, list);
+       if (!mdio_bus)
+               return NULL;
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h 
b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+index dd5945c4bfec2..375ad57fca9b4 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+@@ -1262,7 +1262,7 @@ enum {
+ 
+ struct bnx2x_fw_stats_req {
+       struct stats_query_header hdr;
+-      struct stats_query_entry query[FP_SB_MAX_E1x+
++      struct stats_query_entry query[FP_SB_MAX_E2 +
+               BNX2X_FIRST_QUEUE_QUERY_IDX];
+ };
+ 
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c 
b/drivers/net/ethernet/intel/e1000e/netdev.c
+index db8e06157da29..cbd8357c61edc 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -6369,49 +6369,49 @@ static void e1000e_s0ix_entry_flow(struct 
e1000_adapter *adapter)
+               mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+               ew32(EXTCNF_CTRL, mac_data);
+ 
+-              /* Enable the Dynamic Power Gating in the MAC */
+-              mac_data = er32(FEXTNVM7);
+-              mac_data |= BIT(22);
+-              ew32(FEXTNVM7, mac_data);
+-
+               /* Disable disconnected cable conditioning for Power Gating */
+               mac_data = er32(DPGFR);
+               mac_data |= BIT(2);
+               ew32(DPGFR, mac_data);
+ 
+-              /* Don't wake from dynamic Power Gating with clock request */
+-              mac_data = er32(FEXTNVM12);
+-              mac_data |= BIT(12);
+-              ew32(FEXTNVM12, mac_data);
+-
+-              /* Ungate PGCB clock */
+-              mac_data = er32(FEXTNVM9);
+-              mac_data &= ~BIT(28);
+-              ew32(FEXTNVM9, mac_data);
+-
+-              /* Enable K1 off to enable mPHY Power Gating */
+-              mac_data = er32(FEXTNVM6);
+-              mac_data |= BIT(31);
+-              ew32(FEXTNVM6, mac_data);
+-
+-              /* Enable mPHY power gating for any link and speed */
+-              mac_data = er32(FEXTNVM8);
+-              mac_data |= BIT(9);
+-              ew32(FEXTNVM8, mac_data);
+-
+               /* Enable the Dynamic Clock Gating in the DMA and MAC */
+               mac_data = er32(CTRL_EXT);
+               mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN;
+               ew32(CTRL_EXT, mac_data);
+-
+-              /* No MAC DPG gating SLP_S0 in modern standby
+-               * Switch the logic of the lanphypc to use PMC counter
+-               */
+-              mac_data = er32(FEXTNVM5);
+-              mac_data |= BIT(7);
+-              ew32(FEXTNVM5, mac_data);
+       }
+ 
++      /* Enable the Dynamic Power Gating in the MAC */
++      mac_data = er32(FEXTNVM7);
++      mac_data |= BIT(22);
++      ew32(FEXTNVM7, mac_data);
++
++      /* Don't wake from dynamic Power Gating with clock request */
++      mac_data = er32(FEXTNVM12);
++      mac_data |= BIT(12);
++      ew32(FEXTNVM12, mac_data);
++
++      /* Ungate PGCB clock */
++      mac_data = er32(FEXTNVM9);
++      mac_data &= ~BIT(28);
++      ew32(FEXTNVM9, mac_data);
++
++      /* Enable K1 off to enable mPHY Power Gating */
++      mac_data = er32(FEXTNVM6);
++      mac_data |= BIT(31);
++      ew32(FEXTNVM6, mac_data);
++
++      /* Enable mPHY power gating for any link and speed */
++      mac_data = er32(FEXTNVM8);
++      mac_data |= BIT(9);
++      ew32(FEXTNVM8, mac_data);
++
++      /* No MAC DPG gating SLP_S0 in modern standby
++       * Switch the logic of the lanphypc to use PMC counter
++       */
++      mac_data = er32(FEXTNVM5);
++      mac_data |= BIT(7);
++      ew32(FEXTNVM5, mac_data);
++
+       /* Disable the time synchronization clock */
+       mac_data = er32(FEXTNVM7);
+       mac_data |= BIT(31);
+@@ -6503,33 +6503,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter 
*adapter)
+       } else {
+               /* Request driver unconfigure the device from S0ix */
+ 
+-              /* Disable the Dynamic Power Gating in the MAC */
+-              mac_data = er32(FEXTNVM7);
+-              mac_data &= 0xFFBFFFFF;
+-              ew32(FEXTNVM7, mac_data);
+-
+-              /* Disable mPHY power gating for any link and speed */
+-              mac_data = er32(FEXTNVM8);
+-              mac_data &= ~BIT(9);
+-              ew32(FEXTNVM8, mac_data);
+-
+-              /* Disable K1 off */
+-              mac_data = er32(FEXTNVM6);
+-              mac_data &= ~BIT(31);
+-              ew32(FEXTNVM6, mac_data);
+-
+-              /* Disable Ungate PGCB clock */
+-              mac_data = er32(FEXTNVM9);
+-              mac_data |= BIT(28);
+-              ew32(FEXTNVM9, mac_data);
+-
+-              /* Cancel not waking from dynamic
+-               * Power Gating with clock request
+-               */
+-              mac_data = er32(FEXTNVM12);
+-              mac_data &= ~BIT(12);
+-              ew32(FEXTNVM12, mac_data);
+-
+               /* Cancel disable disconnected cable conditioning
+                * for Power Gating
+                */
+@@ -6542,13 +6515,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter 
*adapter)
+               mac_data &= 0xFFF7FFFF;
+               ew32(CTRL_EXT, mac_data);
+ 
+-              /* Revert the lanphypc logic to use the internal Gbe counter
+-               * and not the PMC counter
+-               */
+-              mac_data = er32(FEXTNVM5);
+-              mac_data &= 0xFFFFFF7F;
+-              ew32(FEXTNVM5, mac_data);
+-
+               /* Enable the periodic inband message,
+                * Request PCIe clock in K1 page770_17[10:9] =01b
+                */
+@@ -6586,6 +6552,40 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter 
*adapter)
+       mac_data &= ~BIT(31);
+       mac_data |= BIT(0);
+       ew32(FEXTNVM7, mac_data);
++
++      /* Disable the Dynamic Power Gating in the MAC */
++      mac_data = er32(FEXTNVM7);
++      mac_data &= 0xFFBFFFFF;
++      ew32(FEXTNVM7, mac_data);
++
++      /* Disable mPHY power gating for any link and speed */
++      mac_data = er32(FEXTNVM8);
++      mac_data &= ~BIT(9);
++      ew32(FEXTNVM8, mac_data);
++
++      /* Disable K1 off */
++      mac_data = er32(FEXTNVM6);
++      mac_data &= ~BIT(31);
++      ew32(FEXTNVM6, mac_data);
++
++      /* Disable Ungate PGCB clock */
++      mac_data = er32(FEXTNVM9);
++      mac_data |= BIT(28);
++      ew32(FEXTNVM9, mac_data);
++
++      /* Cancel not waking from dynamic
++       * Power Gating with clock request
++       */
++      mac_data = er32(FEXTNVM12);
++      mac_data &= ~BIT(12);
++      ew32(FEXTNVM12, mac_data);
++
++      /* Revert the lanphypc logic to use the internal Gbe counter
++       * and not the PMC counter
++       */
++      mac_data = er32(FEXTNVM5);
++      mac_data &= 0xFFFFFF7F;
++      ew32(FEXTNVM5, mac_data);
+ }
+ 
+ static int e1000e_pm_freeze(struct device *dev)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 4c0eac83546de..385904502a6be 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -5584,6 +5584,11 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
+               kfree(priv->htb_qos_sq_stats[i]);
+       kvfree(priv->htb_qos_sq_stats);
+ 
++      if (priv->mqprio_rl) {
++              mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
++              mlx5e_mqprio_rl_free(priv->mqprio_rl);
++      }
++
+       memset(priv, 0, sizeof(*priv));
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c 
b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+index db578a7e7008a..59fb31201c35e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+@@ -6,6 +6,9 @@
+ #include "helper.h"
+ #include "ofld.h"
+ 
++static int
++acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
++
+ static bool
+ esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw,
+                                const struct mlx5_vport *vport)
+@@ -123,18 +126,31 @@ static int esw_acl_ingress_src_port_drop_create(struct 
mlx5_eswitch *esw,
+ {
+       struct mlx5_flow_act flow_act = {};
+       struct mlx5_flow_handle *flow_rule;
++      bool created = false;
+       int err = 0;
+ 
++      if (!vport->ingress.acl) {
++              err = acl_ingress_ofld_setup(esw, vport);
++              if (err)
++                      return err;
++              created = true;
++      }
++
+       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+       flow_act.fg = vport->ingress.offloads.drop_grp;
+       flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, 
NULL, 0);
+       if (IS_ERR(flow_rule)) {
+               err = PTR_ERR(flow_rule);
+-              goto out;
++              goto err_out;
+       }
+ 
+       vport->ingress.offloads.drop_rule = flow_rule;
+-out:
++
++      return 0;
++err_out:
++      /* Only destroy ingress acl created in this function. */
++      if (created)
++              esw_acl_ingress_ofld_cleanup(esw, vport);
+       return err;
+ }
+ 
+@@ -299,16 +315,12 @@ static void esw_acl_ingress_ofld_groups_destroy(struct 
mlx5_vport *vport)
+       }
+ }
+ 
+-int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
+-                             struct mlx5_vport *vport)
++static int
++acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+ {
+       int num_ftes = 0;
+       int err;
+ 
+-      if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+-          !esw_acl_ingress_prio_tag_enabled(esw, vport))
+-              return 0;
+-
+       esw_acl_ingress_allow_rule_destroy(vport);
+ 
+       if (mlx5_eswitch_vport_match_metadata_enabled(esw))
+@@ -347,6 +359,15 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
+       return err;
+ }
+ 
++int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport 
*vport)
++{
++      if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
++          !esw_acl_ingress_prio_tag_enabled(esw, vport))
++              return 0;
++
++      return acl_ingress_ofld_setup(esw, vport);
++}
++
+ void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw,
+                                 struct mlx5_vport *vport)
+ {
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c 
b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+index 83d2dc91ba2c8..99196333d1324 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+@@ -1484,6 +1484,7 @@ static int mlxsw_linecard_types_init(struct mlxsw_core 
*mlxsw_core,
+       vfree(types_info->data);
+ err_data_alloc:
+       kfree(types_info);
++      linecards->types_info = NULL;
+       return err;
+ }
+ 
+diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
+index 85dbe7f73e319..535dc5b2901fc 100644
+--- a/drivers/net/ntb_netdev.c
++++ b/drivers/net/ntb_netdev.c
+@@ -119,7 +119,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp 
*qp, void *qp_data,
+       skb->protocol = eth_type_trans(skb, ndev);
+       skb->ip_summed = CHECKSUM_NONE;
+ 
+-      if (__netif_rx(skb) == NET_RX_DROP) {
++      if (netif_rx(skb) == NET_RX_DROP) {
+               ndev->stats.rx_errors++;
+               ndev->stats.rx_dropped++;
+       } else {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c 
b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+index 546cbe21aab31..6b5bfdbec8b11 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+@@ -254,7 +254,7 @@ mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int 
tag, int len,
+       };
+       u16 ntlv;
+ 
+-      ptlv = skb_put(skb, len);
++      ptlv = skb_put_zero(skb, len);
+       memcpy(ptlv, &tlv, sizeof(tlv));
+ 
+       ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
+@@ -1532,7 +1532,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct 
ieee80211_vif *vif,
+       set_bit(MT76_HW_SCANNING, &phy->state);
+       mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
+ 
+-      req = (struct mt76_connac_hw_scan_req *)skb_put(skb, sizeof(*req));
++      req = (struct mt76_connac_hw_scan_req *)skb_put_zero(skb, sizeof(*req));
+ 
+       req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
+       req->bss_idx = mvif->idx;
+@@ -1660,7 +1660,7 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
+ 
+       mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
+ 
+-      req = (struct mt76_connac_sched_scan_req *)skb_put(skb, sizeof(*req));
++      req = (struct mt76_connac_sched_scan_req *)skb_put_zero(skb, 
sizeof(*req));
+       req->version = 1;
+       req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
+ 
+@@ -2294,7 +2294,7 @@ int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw 
*hw,
+               return -ENOMEM;
+ 
+       skb_put_data(skb, &hdr, sizeof(hdr));
+-      gtk_tlv = (struct mt76_connac_gtk_rekey_tlv *)skb_put(skb,
++      gtk_tlv = (struct mt76_connac_gtk_rekey_tlv *)skb_put_zero(skb,
+                                                        sizeof(*gtk_tlv));
+       gtk_tlv->tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY);
+       gtk_tlv->len = cpu_to_le16(sizeof(*gtk_tlv));
+@@ -2417,7 +2417,7 @@ mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev,
+               return -ENOMEM;
+ 
+       skb_put_data(skb, &hdr, sizeof(hdr));
+-      ptlv = (struct mt76_connac_wow_pattern_tlv *)skb_put(skb, 
sizeof(*ptlv));
++      ptlv = (struct mt76_connac_wow_pattern_tlv *)skb_put_zero(skb, 
sizeof(*ptlv));
+       ptlv->tag = cpu_to_le16(UNI_SUSPEND_WOW_PATTERN);
+       ptlv->len = cpu_to_le16(sizeof(*ptlv));
+       ptlv->data_len = pattern->pattern_len;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c 
b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+index b2ea539f697f7..65f07cc2acdd4 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+@@ -395,7 +395,7 @@ mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int 
sub_tag, int sub_len,
+               .len = cpu_to_le16(sub_len),
+       };
+ 
+-      ptlv = skb_put(skb, sub_len);
++      ptlv = skb_put_zero(skb, sub_len);
+       memcpy(ptlv, &tlv, sizeof(tlv));
+ 
+       le16_add_cpu(sub_ntlv, 1);
+diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c 
b/drivers/net/wireless/microchip/wilc1000/hif.c
+index 5eb02902e875a..13853fda3e047 100644
+--- a/drivers/net/wireless/microchip/wilc1000/hif.c
++++ b/drivers/net/wireless/microchip/wilc1000/hif.c
+@@ -379,7 +379,8 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+       struct ieee80211_p2p_noa_attr noa_attr;
+       const struct cfg80211_bss_ies *ies;
+       struct wilc_join_bss_param *param;
+-      u8 rates_len = 0, ies_len;
++      u8 rates_len = 0;
++      int ies_len;
+       int ret;
+ 
+       param = kzalloc(sizeof(*param), GFP_KERNEL);
+diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c
+index 85c06dbb2c449..9fffd4421ad5b 100644
+--- a/drivers/nfc/virtual_ncidev.c
++++ b/drivers/nfc/virtual_ncidev.c
+@@ -121,6 +121,10 @@ static ssize_t virtual_ncidev_write(struct file *file,
+               kfree_skb(skb);
+               return -EFAULT;
+       }
++      if (strnlen(skb->data, count) != count) {
++              kfree_skb(skb);
++              return -EINVAL;
++      }
+ 
+       nci_recv_frame(ndev, skb);
+       return count;
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 6cf0ce7aff678..d0154859421db 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -559,7 +559,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
+               int node, srcu_idx;
+ 
+               srcu_idx = srcu_read_lock(&head->srcu);
+-              for_each_node(node)
++              for_each_online_node(node)
+                       __nvme_find_path(head, node);
+               srcu_read_unlock(&head->srcu, srcu_idx);
+       }
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 5ff09f2cacab7..32e89ea853a47 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -824,7 +824,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, 
struct request *req,
+               struct bio_vec bv = req_bvec(req);
+ 
+               if (!is_pci_p2pdma_page(bv.bv_page)) {
+-                      if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
++                      if ((bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) +
++                           bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
+                               return nvme_setup_prp_simple(dev, req,
+                                                            &cmnd->rw, &bv);
+ 
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 7b74926c50f9b..d2954406b2297 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -805,6 +805,15 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
+       percpu_ref_exit(&sq->ref);
+       nvmet_auth_sq_free(sq);
+ 
++      /*
++       * we must reference the ctrl again after waiting for inflight IO
++       * to complete. Because admin connect may have sneaked in after we
++       * store sq->ctrl locally, but before we killed the percpu_ref. the
++       * admin connect allocates and assigns sq->ctrl, which now needs a
++       * final ref put, as this ctrl is going away.
++       */
++      ctrl = sq->ctrl;
++
+       if (ctrl) {
+               /*
+                * The teardown flow may take some time, and the host may not
+diff --git a/drivers/platform/x86/toshiba_acpi.c 
b/drivers/platform/x86/toshiba_acpi.c
+index f10994b94a33a..1a8cb8eb22829 100644
+--- a/drivers/platform/x86/toshiba_acpi.c
++++ b/drivers/platform/x86/toshiba_acpi.c
+@@ -3277,7 +3277,7 @@ static const char *find_hci_method(acpi_handle handle)
+  */
+ #define QUIRK_HCI_HOTKEY_QUICKSTART           BIT(1)
+ 
+-static const struct dmi_system_id toshiba_dmi_quirks[] = {
++static const struct dmi_system_id toshiba_dmi_quirks[] __initconst = {
+       {
+        /* Toshiba Portégé R700 */
+        /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
+@@ -3312,8 +3312,6 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
+       struct toshiba_acpi_dev *dev;
+       const char *hci_method;
+       u32 dummy;
+-      const struct dmi_system_id *dmi_id;
+-      long quirks = 0;
+       int ret = 0;
+ 
+       if (toshiba_acpi)
+@@ -3466,16 +3464,6 @@ static int toshiba_acpi_add(struct acpi_device 
*acpi_dev)
+       }
+ #endif
+ 
+-      dmi_id = dmi_first_match(toshiba_dmi_quirks);
+-      if (dmi_id)
+-              quirks = (long)dmi_id->driver_data;
+-
+-      if (turn_on_panel_on_resume == -1)
+-              turn_on_panel_on_resume = !!(quirks & 
QUIRK_TURN_ON_PANEL_ON_RESUME);
+-
+-      if (hci_hotkey_quickstart == -1)
+-              hci_hotkey_quickstart = !!(quirks & 
QUIRK_HCI_HOTKEY_QUICKSTART);
+-
+       toshiba_wwan_available(dev);
+       if (dev->wwan_supported)
+               toshiba_acpi_setup_wwan_rfkill(dev);
+@@ -3624,10 +3612,27 @@ static struct acpi_driver toshiba_acpi_driver = {
+       .drv.pm = &toshiba_acpi_pm,
+ };
+ 
++static void __init toshiba_dmi_init(void)
++{
++      const struct dmi_system_id *dmi_id;
++      long quirks = 0;
++
++      dmi_id = dmi_first_match(toshiba_dmi_quirks);
++      if (dmi_id)
++              quirks = (long)dmi_id->driver_data;
++
++      if (turn_on_panel_on_resume == -1)
++              turn_on_panel_on_resume = !!(quirks & 
QUIRK_TURN_ON_PANEL_ON_RESUME);
++
++      if (hci_hotkey_quickstart == -1)
++              hci_hotkey_quickstart = !!(quirks & 
QUIRK_HCI_HOTKEY_QUICKSTART);
++}
++
+ static int __init toshiba_acpi_init(void)
+ {
+       int ret;
+ 
++      toshiba_dmi_init();
+       toshiba_proc_dir = proc_mkdir(PROC_TOSHIBA, acpi_root_dir);
+       if (!toshiba_proc_dir) {
+               pr_err("Unable to create proc dir " PROC_TOSHIBA "\n");
+diff --git a/drivers/platform/x86/touchscreen_dmi.c 
b/drivers/platform/x86/touchscreen_dmi.c
+index 399b97b54dd0f..029355a2f389d 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -871,6 +871,22 @@ static const struct ts_dmi_data schneider_sct101ctm_data 
= {
+       .properties     = schneider_sct101ctm_props,
+ };
+ 
++static const struct property_entry globalspace_solt_ivw116_props[] = {
++      PROPERTY_ENTRY_U32("touchscreen-min-x", 7),
++      PROPERTY_ENTRY_U32("touchscreen-min-y", 22),
++      PROPERTY_ENTRY_U32("touchscreen-size-x", 1723),
++      PROPERTY_ENTRY_U32("touchscreen-size-y", 1077),
++      PROPERTY_ENTRY_STRING("firmware-name", 
"gsl1680-globalspace-solt-ivw116.fw"),
++      PROPERTY_ENTRY_U32("silead,max-fingers", 10),
++      PROPERTY_ENTRY_BOOL("silead,home-button"),
++      { }
++};
++
++static const struct ts_dmi_data globalspace_solt_ivw116_data = {
++      .acpi_name      = "MSSL1680:00",
++      .properties     = globalspace_solt_ivw116_props,
++};
++
+ static const struct property_entry techbite_arc_11_6_props[] = {
+       PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
+       PROPERTY_ENTRY_U32("touchscreen-min-y", 7),
+@@ -1345,6 +1361,17 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+                       DMI_MATCH(DMI_BIOS_DATE, "04/24/2018"),
+               },
+       },
++      {
++              /* Jumper EZpad 6s Pro */
++              .driver_data = (void *)&jumper_ezpad_6_pro_b_data,
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "Ezpad"),
++                      /* Above matches are too generic, add bios match */
++                      DMI_MATCH(DMI_BIOS_VERSION, "E.WSA116_8.E1.042.bin"),
++                      DMI_MATCH(DMI_BIOS_DATE, "01/08/2020"),
++              },
++      },
+       {
+               /* Jumper EZpad 6 m4 */
+               .driver_data = (void *)&jumper_ezpad_6_m4_data,
+@@ -1584,6 +1611,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "SCT101CTM"),
+               },
+       },
++      {
++              /* GlobalSpace SoLT IVW 11.6" */
++              .driver_data = (void *)&globalspace_solt_ivw116_data,
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Globalspace Tech Pvt Ltd"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "SolTIVW"),
++                      DMI_MATCH(DMI_PRODUCT_SKU, "PN20170413488"),
++              },
++      },
+       {
+               /* Techbite Arc 11.6 */
+               .driver_data = (void *)&techbite_arc_11_6_data,
+diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
+index df0f19e6d9235..17885c9f55cb2 100644
+--- a/drivers/s390/crypto/pkey_api.c
++++ b/drivers/s390/crypto/pkey_api.c
+@@ -1191,7 +1191,7 @@ static long pkey_unlocked_ioctl(struct file *filp, 
unsigned int cmd,
+               if (rc)
+                       break;
+               if (copy_to_user(ucs, &kcs, sizeof(kcs)))
+-                      return -EFAULT;
++                      rc = -EFAULT;
+               memzero_explicit(&kcs, sizeof(kcs));
+               break;
+       }
+@@ -1223,7 +1223,7 @@ static long pkey_unlocked_ioctl(struct file *filp, 
unsigned int cmd,
+               if (rc)
+                       break;
+               if (copy_to_user(ucp, &kcp, sizeof(kcp)))
+-                      return -EFAULT;
++                      rc = -EFAULT;
+               memzero_explicit(&kcp, sizeof(kcp));
+               break;
+       }
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c 
b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+index 5748bd9369ff7..f5001fadd5b12 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+@@ -1355,11 +1355,21 @@ static struct mpi3mr_sas_port 
*mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc,
+       mpi3mr_sas_port_sanity_check(mrioc, mr_sas_node,
+           mr_sas_port->remote_identify.sas_address, hba_port);
+ 
++      if (mr_sas_node->num_phys > sizeof(mr_sas_port->phy_mask) * 8)
++              ioc_info(mrioc, "max port count %u could be too high\n",
++                  mr_sas_node->num_phys);
++
+       for (i = 0; i < mr_sas_node->num_phys; i++) {
+               if ((mr_sas_node->phy[i].remote_identify.sas_address !=
+                   mr_sas_port->remote_identify.sas_address) ||
+                   (mr_sas_node->phy[i].hba_port != hba_port))
+                       continue;
++
++              if (i > sizeof(mr_sas_port->phy_mask) * 8) {
++                      ioc_warn(mrioc, "skipping port %u, max allowed value is 
%zu\n",
++                          i, sizeof(mr_sas_port->phy_mask) * 8);
++                      goto out_fail;
++              }
+               list_add_tail(&mr_sas_node->phy[i].port_siblings,
+                   &mr_sas_port->phy_list);
+               mr_sas_port->num_phys++;
+diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
+index 10fe3383855c0..031e605b3f427 100644
+--- a/drivers/scsi/qedf/qedf_io.c
++++ b/drivers/scsi/qedf/qedf_io.c
+@@ -2331,9 +2331,6 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, 
struct scsi_cmnd *sc_cmd,
+       io_req->fcport = fcport;
+       io_req->cmd_type = QEDF_TASK_MGMT_CMD;
+ 
+-      /* Record which cpu this request is associated with */
+-      io_req->cpu = smp_processor_id();
+-
+       /* Set TM flags */
+       io_req->io_req_flags = QEDF_READ;
+       io_req->data_xfer_len = 0;
+@@ -2355,6 +2352,9 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, 
struct scsi_cmnd *sc_cmd,
+ 
+       spin_lock_irqsave(&fcport->rport_lock, flags);
+ 
++      /* Record which cpu this request is associated with */
++      io_req->cpu = smp_processor_id();
++
+       sqe_idx = qedf_get_sqe_idx(fcport);
+       sqe = &fcport->sq[sqe_idx];
+       memset(sqe, 0, sizeof(struct fcoe_wqe));
+diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
+index d28b8bd5b70bc..b8bed8f39d2ae 100644
+--- a/drivers/spi/spi-cadence-xspi.c
++++ b/drivers/spi/spi-cadence-xspi.c
+@@ -146,6 +146,9 @@
+ #define CDNS_XSPI_STIG_DONE_FLAG              BIT(0)
+ #define CDNS_XSPI_TRD_STATUS                  0x0104
+ 
++#define MODE_NO_OF_BYTES                      GENMASK(25, 24)
++#define MODEBYTES_COUNT                       1
++
+ /* Helper macros for filling command registers */
+ #define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase) ( \
+       FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, (data_phase) ? \
+@@ -158,9 +161,10 @@
+       FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR3, ((op)->addr.val >> 24) & 0xFF) | \
+       FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR4, ((op)->addr.val >> 32) & 0xFF))
+ 
+-#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op) ( \
++#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, modebytes) ( \
+       FIELD_PREP(CDNS_XSPI_CMD_P1_R3_ADDR5, ((op)->addr.val >> 40) & 0xFF) | \
+       FIELD_PREP(CDNS_XSPI_CMD_P1_R3_CMD, (op)->cmd.opcode) | \
++      FIELD_PREP(MODE_NO_OF_BYTES, modebytes) | \
+       FIELD_PREP(CDNS_XSPI_CMD_P1_R3_NUM_ADDR_BYTES, (op)->addr.nbytes))
+ 
+ #define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op, chipsel) ( \
+@@ -174,12 +178,12 @@
+ #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op) \
+       FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R2_DCNT_L, (op)->data.nbytes & 0xFFFF)
+ 
+-#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op) ( \
++#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op, dummybytes) ( \
+       FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, \
+               ((op)->data.nbytes >> 16) & 0xffff) | \
+       FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, \
+                 (op)->dummy.buswidth != 0 ? \
+-                (((op)->dummy.nbytes * 8) / (op)->dummy.buswidth) : \
++                (((dummybytes) * 8) / (op)->dummy.buswidth) : \
+                 0))
+ 
+ #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, chipsel) ( \
+@@ -352,6 +356,7 @@ static int cdns_xspi_send_stig_command(struct 
cdns_xspi_dev *cdns_xspi,
+       u32 cmd_regs[6];
+       u32 cmd_status;
+       int ret;
++      int dummybytes = op->dummy.nbytes;
+ 
+       ret = cdns_xspi_wait_for_controller_idle(cdns_xspi);
+       if (ret < 0)
+@@ -366,7 +371,12 @@ static int cdns_xspi_send_stig_command(struct 
cdns_xspi_dev *cdns_xspi,
+       memset(cmd_regs, 0, sizeof(cmd_regs));
+       cmd_regs[1] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase);
+       cmd_regs[2] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_2(op);
+-      cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op);
++      if (dummybytes != 0) {
++              cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, 1);
++              dummybytes--;
++      } else {
++              cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op, 0);
++      }
+       cmd_regs[4] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op,
+                                                      cdns_xspi->cur_cs);
+ 
+@@ -376,7 +386,7 @@ static int cdns_xspi_send_stig_command(struct 
cdns_xspi_dev *cdns_xspi,
+               cmd_regs[0] = CDNS_XSPI_STIG_DONE_FLAG;
+               cmd_regs[1] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_1(op);
+               cmd_regs[2] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op);
+-              cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op);
++              cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op, dummybytes);
+               cmd_regs[4] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op,
+                                                          cdns_xspi->cur_cs);
+ 
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 40e59e72d5e9e..5acbab0512b82 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -1303,7 +1303,7 @@ static void imx_uart_clear_rx_errors(struct imx_port 
*sport)
+ 
+ }
+ 
+-#define TXTL_DEFAULT 2 /* reset default */
++#define TXTL_DEFAULT 8
+ #define RXTL_DEFAULT 8 /* 8 characters or aging timer */
+ #define TXTL_DMA 8 /* DMA burst setting */
+ #define RXTL_DMA 9 /* DMA burst setting */
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 7549c430c4f01..be5b0ff2966fe 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2631,16 +2631,17 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+                       else
+                               xhci_handle_halted_endpoint(xhci, ep, 0, NULL,
+                                                           EP_SOFT_RESET);
+-                      goto cleanup;
++                      break;
+               case COMP_RING_UNDERRUN:
+               case COMP_RING_OVERRUN:
+               case COMP_STOPPED_LENGTH_INVALID:
+-                      goto cleanup;
++                      break;
+               default:
+                       xhci_err(xhci, "ERROR Transfer event for unknown stream 
ring slot %u ep %u\n",
+                                slot_id, ep_index);
+                       goto err_out;
+               }
++              return 0;
+       }
+ 
+       /* Count current td numbers if ep->skip is set */
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 77f24168c7ed2..676978f2e9944 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1720,8 +1720,17 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ next:
+               if (ret) {
+                       /* Refcount held by the reclaim_bgs list after splice. 
*/
+-                      btrfs_get_block_group(bg);
+-                      list_add_tail(&bg->bg_list, &retry_list);
++                      spin_lock(&fs_info->unused_bgs_lock);
++                      /*
++                       * This block group might be added to the unused list
++                       * during the above process. Move it back to the
++                       * reclaim list otherwise.
++                       */
++                      if (list_empty(&bg->bg_list)) {
++                              btrfs_get_block_group(bg);
++                              list_add_tail(&bg->bg_list, &retry_list);
++                      }
++                      spin_unlock(&fs_info->unused_bgs_lock);
+               }
+               btrfs_put_block_group(bg);
+ 
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 12a2b1e3f1e35..f48895a9b165e 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -3407,7 +3407,7 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
+       const u32 max_length = SZ_64K;
+       struct btrfs_path path = { 0 };
+       u64 cur_logical = logical_start;
+-      int ret;
++      int ret = 0;
+ 
+       /* The range must be inside the bg */
+       ASSERT(logical_start >= bg->start && logical_end <= bg->start + 
bg->length);
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index b54d681c6457d..a02c748753161 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -68,7 +68,7 @@ enum {
+ 
+ struct f2fs_fault_info {
+       atomic_t inject_ops;
+-      unsigned int inject_rate;
++      int inject_rate;
+       unsigned int inject_type;
+ };
+ 
+@@ -4530,10 +4530,14 @@ static inline bool f2fs_need_verity(const struct inode 
*inode, pgoff_t idx)
+ }
+ 
+ #ifdef CONFIG_F2FS_FAULT_INJECTION
+-extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+-                                                      unsigned int type);
++extern int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
++                                                      unsigned long type);
+ #else
+-#define f2fs_build_fault_attr(sbi, rate, type)                do { } while (0)
++static inline int f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
++                                      unsigned long rate, unsigned long type)
++{
++      return 0;
++}
+ #endif
+ 
+ static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index f496622921843..6bd8c231069ad 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -63,21 +63,31 @@ const char *f2fs_fault_name[FAULT_MAX] = {
+       [FAULT_LOCK_OP]         = "lock_op",
+ };
+ 
+-void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+-                                                      unsigned int type)
++int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
++                                                      unsigned long type)
+ {
+       struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
+ 
+       if (rate) {
++              if (rate > INT_MAX)
++                      return -EINVAL;
+               atomic_set(&ffi->inject_ops, 0);
+-              ffi->inject_rate = rate;
++              ffi->inject_rate = (int)rate;
+       }
+ 
+-      if (type)
+-              ffi->inject_type = type;
++      if (type) {
++              if (type >= BIT(FAULT_MAX))
++                      return -EINVAL;
++              ffi->inject_type = (unsigned int)type;
++      }
+ 
+       if (!rate && !type)
+               memset(ffi, 0, sizeof(struct f2fs_fault_info));
++      else
++              f2fs_info(sbi,
++                      "build fault injection attr: rate: %lu, type: 0x%lx",
++                                                              rate, type);
++      return 0;
+ }
+ #endif
+ 
+@@ -916,14 +926,17 @@ static int parse_options(struct super_block *sb, char 
*options, bool is_remount)
+               case Opt_fault_injection:
+                       if (args->from && match_int(args, &arg))
+                               return -EINVAL;
+-                      f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
++                      if (f2fs_build_fault_attr(sbi, arg,
++                                      F2FS_ALL_FAULT_TYPE))
++                              return -EINVAL;
+                       set_opt(sbi, FAULT_INJECTION);
+                       break;
+ 
+               case Opt_fault_type:
+                       if (args->from && match_int(args, &arg))
+                               return -EINVAL;
+-                      f2fs_build_fault_attr(sbi, 0, arg);
++                      if (f2fs_build_fault_attr(sbi, 0, arg))
++                              return -EINVAL;
+                       set_opt(sbi, FAULT_INJECTION);
+                       break;
+ #else
+diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
+index 751a108e612ff..06d5791afe90e 100644
+--- a/fs/f2fs/sysfs.c
++++ b/fs/f2fs/sysfs.c
+@@ -451,10 +451,16 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
+       if (ret < 0)
+               return ret;
+ #ifdef CONFIG_F2FS_FAULT_INJECTION
+-      if (a->struct_type == FAULT_INFO_TYPE && t >= BIT(FAULT_MAX))
+-              return -EINVAL;
+-      if (a->struct_type == FAULT_INFO_RATE && t >= UINT_MAX)
+-              return -EINVAL;
++      if (a->struct_type == FAULT_INFO_TYPE) {
++              if (f2fs_build_fault_attr(sbi, 0, t))
++                      return -EINVAL;
++              return count;
++      }
++      if (a->struct_type == FAULT_INFO_RATE) {
++              if (f2fs_build_fault_attr(sbi, t, 0))
++                      return -EINVAL;
++              return count;
++      }
+ #endif
+       if (a->struct_type == RESERVED_BLOCKS) {
+               spin_lock(&sbi->stat_lock);
+diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
+index 7ea37f49f1e18..e71f4c94c4483 100644
+--- a/fs/jffs2/super.c
++++ b/fs/jffs2/super.c
+@@ -58,6 +58,7 @@ static void jffs2_i_init_once(void *foo)
+       struct jffs2_inode_info *f = foo;
+ 
+       mutex_init(&f->sem);
++      f->target = NULL;
+       inode_init_once(&f->vfs_inode);
+ }
+ 
+diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
+index 7342de296ec3c..25881bdd212b8 100644
+--- a/fs/nilfs2/alloc.c
++++ b/fs/nilfs2/alloc.c
+@@ -377,11 +377,12 @@ void *nilfs_palloc_block_get_entry(const struct inode 
*inode, __u64 nr,
+  * @target: offset number of an entry in the group (start point)
+  * @bsize: size in bits
+  * @lock: spin lock protecting @bitmap
++ * @wrap: whether to wrap around
+  */
+ static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
+                                           unsigned long target,
+                                           unsigned int bsize,
+-                                          spinlock_t *lock)
++                                          spinlock_t *lock, bool wrap)
+ {
+       int pos, end = bsize;
+ 
+@@ -397,6 +398,8 @@ static int nilfs_palloc_find_available_slot(unsigned char 
*bitmap,
+ 
+               end = target;
+       }
++      if (!wrap)
++              return -ENOSPC;
+ 
+       /* wrap around */
+       for (pos = 0; pos < end; pos++) {
+@@ -495,9 +498,10 @@ int nilfs_palloc_count_max_entries(struct inode *inode, 
u64 nused, u64 *nmaxp)
+  * nilfs_palloc_prepare_alloc_entry - prepare to allocate a persistent object
+  * @inode: inode of metadata file using this allocator
+  * @req: nilfs_palloc_req structure exchanged for the allocation
++ * @wrap: whether to wrap around
+  */
+ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
+-                                   struct nilfs_palloc_req *req)
++                                   struct nilfs_palloc_req *req, bool wrap)
+ {
+       struct buffer_head *desc_bh, *bitmap_bh;
+       struct nilfs_palloc_group_desc *desc;
+@@ -516,7 +520,7 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
+       entries_per_group = nilfs_palloc_entries_per_group(inode);
+ 
+       for (i = 0; i < ngroups; i += n) {
+-              if (group >= ngroups) {
++              if (group >= ngroups && wrap) {
+                       /* wrap around */
+                       group = 0;
+                       maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr,
+@@ -541,7 +545,13 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
+                               bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
+                               pos = nilfs_palloc_find_available_slot(
+                                       bitmap, group_offset,
+-                                      entries_per_group, lock);
++                                      entries_per_group, lock, wrap);
++                              /*
++                               * Since the search for a free slot in the
++                               * second and subsequent bitmap blocks always
++                               * starts from the beginning, the wrap flag
++                               * only has an effect on the first search.
++                               */
+                               if (pos >= 0) {
+                                       /* found a free entry */
+                                       nilfs_palloc_group_desc_add_entries(
+diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
+index b667e869ac076..d825a9faca6d9 100644
+--- a/fs/nilfs2/alloc.h
++++ b/fs/nilfs2/alloc.h
+@@ -50,8 +50,8 @@ struct nilfs_palloc_req {
+       struct buffer_head *pr_entry_bh;
+ };
+ 
+-int nilfs_palloc_prepare_alloc_entry(struct inode *,
+-                                   struct nilfs_palloc_req *);
++int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
++                                   struct nilfs_palloc_req *req, bool wrap);
+ void nilfs_palloc_commit_alloc_entry(struct inode *,
+                                    struct nilfs_palloc_req *);
+ void nilfs_palloc_abort_alloc_entry(struct inode *, struct nilfs_palloc_req 
*);
+diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
+index 1e7f653c1df7e..242cc36bf1e97 100644
+--- a/fs/nilfs2/dat.c
++++ b/fs/nilfs2/dat.c
+@@ -75,7 +75,7 @@ int nilfs_dat_prepare_alloc(struct inode *dat, struct 
nilfs_palloc_req *req)
+ {
+       int ret;
+ 
+-      ret = nilfs_palloc_prepare_alloc_entry(dat, req);
++      ret = nilfs_palloc_prepare_alloc_entry(dat, req, true);
+       if (ret < 0)
+               return ret;
+ 
+diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
+index e9668e455a35e..4bba1970ad333 100644
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -143,6 +143,9 @@ static bool nilfs_check_page(struct page *page)
+                       goto Enamelen;
+               if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
+                       goto Espan;
++              if (unlikely(p->inode &&
++                           NILFS_PRIVATE_INODE(le64_to_cpu(p->inode))))
++                      goto Einumber;
+       }
+       if (offs != limit)
+               goto Eend;
+@@ -168,6 +171,9 @@ static bool nilfs_check_page(struct page *page)
+       goto bad_entry;
+ Espan:
+       error = "directory entry across blocks";
++      goto bad_entry;
++Einumber:
++      error = "disallowed inode number";
+ bad_entry:
+       nilfs_error(sb,
+                   "bad entry in directory #%lu: %s - offset=%lu, inode=%lu, 
rec_len=%d, name_len=%d",
+diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
+index a8a4bc8490b4d..ac10a62a41e98 100644
+--- a/fs/nilfs2/ifile.c
++++ b/fs/nilfs2/ifile.c
+@@ -55,13 +55,10 @@ int nilfs_ifile_create_inode(struct inode *ifile, ino_t 
*out_ino,
+       struct nilfs_palloc_req req;
+       int ret;
+ 
+-      req.pr_entry_nr = 0;  /*
+-                             * 0 says find free inode from beginning
+-                             * of a group. dull code!!
+-                             */
++      req.pr_entry_nr = NILFS_FIRST_INO(ifile->i_sb);
+       req.pr_entry_bh = NULL;
+ 
+-      ret = nilfs_palloc_prepare_alloc_entry(ifile, &req);
++      ret = nilfs_palloc_prepare_alloc_entry(ifile, &req, false);
+       if (!ret) {
+               ret = nilfs_palloc_get_entry_block(ifile, req.pr_entry_nr, 1,
+                                                  &req.pr_entry_bh);
+diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
+index aecda4fc95f5f..a1ff52265e1b0 100644
+--- a/fs/nilfs2/nilfs.h
++++ b/fs/nilfs2/nilfs.h
+@@ -116,9 +116,15 @@ enum {
+ #define NILFS_FIRST_INO(sb) (((struct the_nilfs 
*)sb->s_fs_info)->ns_first_ino)
+ 
+ #define NILFS_MDT_INODE(sb, ino) \
+-      ((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & BIT(ino)))
++      ((ino) < NILFS_USER_INO && (NILFS_MDT_INO_BITS & BIT(ino)))
+ #define NILFS_VALID_INODE(sb, ino) \
+-      ((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & BIT(ino)))
++      ((ino) >= NILFS_FIRST_INO(sb) ||                                \
++       ((ino) < NILFS_USER_INO && (NILFS_SYS_INO_BITS & BIT(ino))))
++
++#define NILFS_PRIVATE_INODE(ino) ({                                   \
++      ino_t __ino = (ino);                                            \
++      ((__ino) < NILFS_USER_INO && (__ino) != NILFS_ROOT_INO &&       \
++       (__ino) != NILFS_SKETCH_INO); })
+ 
+ /**
+  * struct nilfs_transaction_info: context information for synchronization
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index 71400496ed365..be41e26b78246 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -452,6 +452,12 @@ static int nilfs_store_disk_layout(struct the_nilfs 
*nilfs,
+       }
+ 
+       nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino);
++      if (nilfs->ns_first_ino < NILFS_USER_INO) {
++              nilfs_err(nilfs->ns_sb,
++                        "too small lower limit for non-reserved inode 
numbers: %u",
++                        nilfs->ns_first_ino);
++              return -EINVAL;
++      }
+ 
+       nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment);
+       if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) {
+diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
+index cd4ae1b8ae165..17fee562ee503 100644
+--- a/fs/nilfs2/the_nilfs.h
++++ b/fs/nilfs2/the_nilfs.h
+@@ -182,7 +182,7 @@ struct the_nilfs {
+       unsigned long           ns_nrsvsegs;
+       unsigned long           ns_first_data_block;
+       int                     ns_inode_size;
+-      int                     ns_first_ino;
++      unsigned int            ns_first_ino;
+       u32                     ns_crc_seed;
+ 
+       /* /sys/fs/<nilfs>/<device> */
+diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
+index d98cf7b382bcc..2e4eea854bda5 100644
+--- a/fs/ntfs3/xattr.c
++++ b/fs/ntfs3/xattr.c
+@@ -217,8 +217,11 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char 
*buffer,
+               if (!ea->name_len)
+                       break;
+ 
+-              if (ea->name_len > ea_size)
++              if (ea->name_len > ea_size) {
++                      ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
++                      err = -EINVAL; /* corrupted fs */
+                       break;
++              }
+ 
+               if (buffer) {
+                       /* Check if we can use field ea->name */
+diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
+index 4ca8ed410c3cf..24e028c119c1b 100644
+--- a/fs/orangefs/super.c
++++ b/fs/orangefs/super.c
+@@ -201,7 +201,8 @@ static int orangefs_statfs(struct dentry *dentry, struct 
kstatfs *buf)
+                    (long)new_op->downcall.resp.statfs.files_avail);
+ 
+       buf->f_type = sb->s_magic;
+-      memcpy(&buf->f_fsid, &ORANGEFS_SB(sb)->fs_id, sizeof(buf->f_fsid));
++      buf->f_fsid.val[0] = ORANGEFS_SB(sb)->fs_id;
++      buf->f_fsid.val[1] = ORANGEFS_SB(sb)->id;
+       buf->f_bsize = new_op->downcall.resp.statfs.block_size;
+       buf->f_namelen = ORANGEFS_NAME_MAX;
+ 
+diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
+index bb8467cd11ae2..34f242105be23 100644
+--- a/include/linux/fsnotify.h
++++ b/include/linux/fsnotify.h
+@@ -93,7 +93,13 @@ static inline int fsnotify_file(struct file *file, __u32 
mask)
+ {
+       const struct path *path = &file->f_path;
+ 
+-      if (file->f_mode & FMODE_NONOTIFY)
++      /*
++       * FMODE_NONOTIFY are fds generated by fanotify itself which should not
++       * generate new events. We also don't want to generate events for
++       * FMODE_PATH fds (involves open & close events) as they are just
++       * handle creation / destruction events and not "real" file events.
++       */
++      if (file->f_mode & (FMODE_NONOTIFY | FMODE_PATH))
+               return 0;
+ 
+       return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
+diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
+index 6239a378c0ea8..0b4f7289951a4 100644
+--- a/include/linux/lsm_hook_defs.h
++++ b/include/linux/lsm_hook_defs.h
+@@ -382,7 +382,7 @@ LSM_HOOK(int, 0, key_getsecurity, struct key *key, char 
**_buffer)
+ 
+ #ifdef CONFIG_AUDIT
+ LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr,
+-       void **lsmrule)
++       void **lsmrule, gfp_t gfp)
+ LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule)
+ LSM_HOOK(int, 0, audit_rule_match, u32 secid, u32 field, u32 op, void 
*lsmrule)
+ LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule)
+diff --git a/include/linux/mutex.h b/include/linux/mutex.h
+index a33aa9eb9fc3b..5b5630e58407a 100644
+--- a/include/linux/mutex.h
++++ b/include/linux/mutex.h
+@@ -21,6 +21,8 @@
+ #include <linux/debug_locks.h>
+ #include <linux/cleanup.h>
+ 
++struct device;
++
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ # define __DEP_MAP_MUTEX_INITIALIZER(lockname)                        \
+               , .dep_map = {                                  \
+@@ -171,6 +173,31 @@ do {                                                      
\
+ } while (0)
+ #endif /* CONFIG_PREEMPT_RT */
+ 
++#ifdef CONFIG_DEBUG_MUTEXES
++
++int __devm_mutex_init(struct device *dev, struct mutex *lock);
++
++#else
++
++static inline int __devm_mutex_init(struct device *dev, struct mutex *lock)
++{
++      /*
++       * When CONFIG_DEBUG_MUTEXES is off mutex_destroy() is just a nop so
++       * no really need to register it in the devm subsystem.
++       */
++      return 0;
++}
++
++#endif
++
++#define devm_mutex_init(dev, mutex)                   \
++({                                                    \
++      typeof(mutex) mutex_ = (mutex);                 \
++                                                      \
++      mutex_init(mutex_);                             \
++      __devm_mutex_init(dev, mutex_);                 \
++})
++
+ /*
+  * See kernel/locking/mutex.c for detailed documentation of these APIs.
+  * Also see Documentation/locking/mutex-design.rst.
+diff --git a/include/linux/security.h b/include/linux/security.h
+index 2772f6375f140..c33c95f409eb6 100644
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -1921,7 +1921,8 @@ static inline int security_key_getsecurity(struct key 
*key, char **_buffer)
+ 
+ #ifdef CONFIG_AUDIT
+ #ifdef CONFIG_SECURITY
+-int security_audit_rule_init(u32 field, u32 op, char *rulestr, void 
**lsmrule);
++int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule,
++                           gfp_t gfp);
+ int security_audit_rule_known(struct audit_krule *krule);
+ int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule);
+ void security_audit_rule_free(void *lsmrule);
+@@ -1929,7 +1930,7 @@ void security_audit_rule_free(void *lsmrule);
+ #else
+ 
+ static inline int security_audit_rule_init(u32 field, u32 op, char *rulestr,
+-                                         void **lsmrule)
++                                         void **lsmrule, gfp_t gfp)
+ {
+       return 0;
+ }
+diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
+index 42d99896e7a6e..6c9a4d322309f 100644
+--- a/kernel/auditfilter.c
++++ b/kernel/auditfilter.c
+@@ -529,7 +529,8 @@ static struct audit_entry *audit_data_to_entry(struct 
audit_rule_data *data,
+                       entry->rule.buflen += f_val;
+                       f->lsm_str = str;
+                       err = security_audit_rule_init(f->type, f->op, str,
+-                                                     (void **)&f->lsm_rule);
++                                                     (void **)&f->lsm_rule,
++                                                     GFP_KERNEL);
+                       /* Keep currently invalid fields around in case they
+                        * become valid after a policy reload. */
+                       if (err == -EINVAL) {
+@@ -798,7 +799,7 @@ static inline int audit_dupe_lsm_field(struct audit_field 
*df,
+ 
+       /* our own (refreshed) copy of lsm_rule */
+       ret = security_audit_rule_init(df->type, df->op, df->lsm_str,
+-                                     (void **)&df->lsm_rule);
++                                     (void **)&df->lsm_rule, GFP_KERNEL);
+       /* Keep currently invalid fields around in case they
+        * become valid after a policy reload. */
+       if (ret == -EINVAL) {
+diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
+index af661734e8f90..dafdc47ae5fcc 100644
+--- a/kernel/dma/map_benchmark.c
++++ b/kernel/dma/map_benchmark.c
+@@ -252,6 +252,9 @@ static long map_benchmark_ioctl(struct file *file, 
unsigned int cmd,
+                * dma_mask changed by benchmark
+                */
+               dma_set_mask(map->dev, old_dma_mask);
++
++              if (ret)
++                      return ret;
+               break;
+       default:
+               return -EINVAL;
+diff --git a/kernel/exit.c b/kernel/exit.c
+index bccfa4218356e..156283b3c1bf6 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -481,6 +481,8 @@ void mm_update_next_owner(struct mm_struct *mm)
+        * Search through everything else, we should not get here often.
+        */
+       for_each_process(g) {
++              if (atomic_read(&mm->mm_users) <= 1)
++                      break;
+               if (g->flags & PF_KTHREAD)
+                       continue;
+               for_each_thread(g, c) {
+diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
+index bc8abb8549d20..6e6f6071cfa27 100644
+--- a/kernel/locking/mutex-debug.c
++++ b/kernel/locking/mutex-debug.c
+@@ -12,6 +12,7 @@
+  */
+ #include <linux/mutex.h>
+ #include <linux/delay.h>
++#include <linux/device.h>
+ #include <linux/export.h>
+ #include <linux/poison.h>
+ #include <linux/sched.h>
+@@ -89,6 +90,17 @@ void debug_mutex_init(struct mutex *lock, const char *name,
+       lock->magic = lock;
+ }
+ 
++static void devm_mutex_release(void *res)
++{
++      mutex_destroy(res);
++}
++
++int __devm_mutex_init(struct device *dev, struct mutex *lock)
++{
++      return devm_add_action_or_reset(dev, devm_mutex_release, lock);
++}
++EXPORT_SYMBOL_GPL(__devm_mutex_init);
++
+ /***
+  * mutex_destroy - mark a mutex unusable
+  * @lock: the mutex to be destroyed
+diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c
+index d9d1df28cc52e..9c9e4dcf06d96 100644
+--- a/lib/kunit/try-catch.c
++++ b/lib/kunit/try-catch.c
+@@ -78,7 +78,6 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, 
void *context)
+       time_remaining = wait_for_completion_timeout(&try_completion,
+                                                    kunit_test_timeout());
+       if (time_remaining == 0) {
+-              kunit_err(test, "try timed out\n");
+               try_catch->try_result = -ETIMEDOUT;
+               kthread_stop(task_struct);
+       }
+@@ -93,6 +92,8 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, 
void *context)
+               try_catch->try_result = 0;
+       else if (exit_code == -EINTR)
+               kunit_err(test, "wake_up_process() was never called\n");
++      else if (exit_code == -ETIMEDOUT)
++              kunit_err(test, "try timed out\n");
+       else if (exit_code)
+               kunit_err(test, "Unknown error: %d\n", exit_code);
+ 
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index d3e9d12860b9f..7dbac6ede7242 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -414,13 +414,20 @@ static void domain_dirty_limits(struct 
dirty_throttle_control *dtc)
+       else
+               bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
+ 
+-      if (bg_thresh >= thresh)
+-              bg_thresh = thresh / 2;
+       tsk = current;
+       if (rt_task(tsk)) {
+               bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
+               thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
+       }
++      /*
++       * Dirty throttling logic assumes the limits in page units fit into
++       * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
++       */
++      if (thresh > UINT_MAX)
++              thresh = UINT_MAX;
++      /* This makes sure bg_thresh is within 32-bits as well */
++      if (bg_thresh >= thresh)
++              bg_thresh = thresh / 2;
+       dtc->thresh = thresh;
+       dtc->bg_thresh = bg_thresh;
+ 
+@@ -470,7 +477,11 @@ static unsigned long node_dirty_limit(struct pglist_data 
*pgdat)
+       if (rt_task(tsk))
+               dirty += dirty / 4;
+ 
+-      return dirty;
++      /*
++       * Dirty throttling logic assumes the limits in page units fit into
++       * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
++       */
++      return min_t(unsigned long, dirty, UINT_MAX);
+ }
+ 
+ /**
+@@ -507,10 +518,17 @@ static int dirty_background_bytes_handler(struct 
ctl_table *table, int write,
+               void *buffer, size_t *lenp, loff_t *ppos)
+ {
+       int ret;
++      unsigned long old_bytes = dirty_background_bytes;
+ 
+       ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
+-      if (ret == 0 && write)
++      if (ret == 0 && write) {
++              if (DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE) >
++                                                              UINT_MAX) {
++                      dirty_background_bytes = old_bytes;
++                      return -ERANGE;
++              }
+               dirty_background_ratio = 0;
++      }
+       return ret;
+ }
+ 
+@@ -536,6 +554,10 @@ static int dirty_bytes_handler(struct ctl_table *table, 
int write,
+ 
+       ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
+       if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
++              if (DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) > UINT_MAX) {
++                      vm_dirty_bytes = old_bytes;
++                      return -ERANGE;
++              }
+               writeback_set_ratelimit();
+               vm_dirty_ratio = 0;
+       }
+@@ -1526,7 +1548,7 @@ static inline void wb_dirty_limits(struct 
dirty_throttle_control *dtc)
+        */
+       dtc->wb_thresh = __wb_calc_thresh(dtc);
+       dtc->wb_bg_thresh = dtc->thresh ?
+-              div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
++              div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
+ 
+       /*
+        * In order to avoid the stacked BDI deadlock we need
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index 8dabb9a74cb17..cdd65ca3124a4 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -434,15 +434,22 @@ static int __skb_datagram_iter(const struct sk_buff 
*skb, int offset,
+ 
+               end = start + skb_frag_size(frag);
+               if ((copy = end - offset) > 0) {
+-                      struct page *page = skb_frag_page(frag);
+-                      u8 *vaddr = kmap(page);
++                      u32 p_off, p_len, copied;
++                      struct page *p;
++                      u8 *vaddr;
+ 
+                       if (copy > len)
+                               copy = len;
+-                      n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
+-                                      vaddr + skb_frag_off(frag) + offset - 
start,
+-                                      copy, data, to);
+-                      kunmap(page);
++
++                      skb_frag_foreach_page(frag,
++                                            skb_frag_off(frag) + offset - 
start,
++                                            copy, p, p_off, p_len, copied) {
++                              vaddr = kmap_local_page(p);
++                              n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
++                                      vaddr + p_off, p_len, data, to);
++                              kunmap_local(vaddr);
++                      }
++
+                       offset += n;
+                       if (n != copy)
+                               goto short_copy;
+diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
+index 8f690a6e61baa..e4e1999d93f50 100644
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -1281,6 +1281,7 @@ static int inet_diag_dump_compat(struct sk_buff *skb,
+       req.sdiag_family = AF_UNSPEC; /* compatibility */
+       req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
+       req.idiag_ext = rc->idiag_ext;
++      req.pad = 0;
+       req.idiag_states = rc->idiag_states;
+       req.id = rc->id;
+ 
+@@ -1296,6 +1297,7 @@ static int inet_diag_get_exact_compat(struct sk_buff 
*in_skb,
+       req.sdiag_family = rc->idiag_family;
+       req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
+       req.idiag_ext = rc->idiag_ext;
++      req.pad = 0;
+       req.idiag_states = rc->idiag_states;
+       req.id = rc->id;
+ 
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 2146299016eda..317cb90d77102 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3041,7 +3041,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const 
u32 prior_snd_una,
+                       return;
+ 
+               if (tcp_try_undo_dsack(sk))
+-                      tcp_try_keep_open(sk);
++                      tcp_try_to_open(sk, flag);
+ 
+               tcp_identify_packet_loss(sk, ack_flag);
+               if (icsk->icsk_ca_state != TCP_CA_Recovery) {
+diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
+index a7364ff8b558d..a4e03a7a2c030 100644
+--- a/net/ipv4/tcp_metrics.c
++++ b/net/ipv4/tcp_metrics.c
+@@ -619,6 +619,7 @@ static const struct nla_policy 
tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] =
+       [TCP_METRICS_ATTR_ADDR_IPV4]    = { .type = NLA_U32, },
+       [TCP_METRICS_ATTR_ADDR_IPV6]    = { .type = NLA_BINARY,
+                                           .len = sizeof(struct in6_addr), },
++      [TCP_METRICS_ATTR_SADDR_IPV4]   = { .type = NLA_U32, },
+       /* Following attributes are not received for GET/DEL,
+        * we keep them for reference
+        */
+diff --git a/net/mac802154/main.c b/net/mac802154/main.c
+index bd7bdb1219dd8..356e86c3c9b15 100644
+--- a/net/mac802154/main.c
++++ b/net/mac802154/main.c
+@@ -152,8 +152,10 @@ void ieee802154_configure_durations(struct wpan_phy *phy)
+       }
+ 
+       phy->symbol_duration = duration;
+-      phy->lifs_period = (IEEE802154_LIFS_PERIOD * phy->symbol_duration) / 
NSEC_PER_SEC;
+-      phy->sifs_period = (IEEE802154_SIFS_PERIOD * phy->symbol_duration) / 
NSEC_PER_SEC;
++      phy->lifs_period =
++              (IEEE802154_LIFS_PERIOD * phy->symbol_duration) / NSEC_PER_USEC;
++      phy->sifs_period =
++              (IEEE802154_SIFS_PERIOD * phy->symbol_duration) / NSEC_PER_USEC;
+ }
+ EXPORT_SYMBOL(ieee802154_configure_durations);
+ 
+@@ -175,10 +177,10 @@ static void ieee802154_setup_wpan_phy_pib(struct 
wpan_phy *wpan_phy)
+        * Should be done when all drivers sets this value.
+        */
+ 
+-      wpan_phy->lifs_period =
+-              (IEEE802154_LIFS_PERIOD * wpan_phy->symbol_duration) / 1000;
+-      wpan_phy->sifs_period =
+-              (IEEE802154_SIFS_PERIOD * wpan_phy->symbol_duration) / 1000;
++      wpan_phy->lifs_period = (IEEE802154_LIFS_PERIOD *
++                               wpan_phy->symbol_duration) / NSEC_PER_USEC;
++      wpan_phy->sifs_period = (IEEE802154_SIFS_PERIOD *
++                               wpan_phy->symbol_duration) / NSEC_PER_USEC;
+ }
+ 
+ int ieee802154_register_hw(struct ieee802154_hw *hw)
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 97ea72d31bd35..d18b698139caf 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -10858,8 +10858,7 @@ static int nft_rcv_nl_event(struct notifier_block 
*this, unsigned long event,
+ 
+       gc_seq = nft_gc_seq_begin(nft_net);
+ 
+-      if (!list_empty(&nf_tables_destroy_list))
+-              nf_tables_trans_destroy_flush_work();
++      nf_tables_trans_destroy_flush_work();
+ again:
+       list_for_each_entry(table, &nft_net->tables, list) {
+               if (nft_table_has_owner(table) &&
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 237a6b04adf6f..9689d2f2d91f9 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -7116,6 +7116,7 @@ static int sctp_getsockopt_assoc_ids(struct sock *sk, 
int len,
+       struct sctp_sock *sp = sctp_sk(sk);
+       struct sctp_association *asoc;
+       struct sctp_assoc_ids *ids;
++      size_t ids_size;
+       u32 num = 0;
+ 
+       if (sctp_style(sk, TCP))
+@@ -7128,11 +7129,11 @@ static int sctp_getsockopt_assoc_ids(struct sock *sk, 
int len,
+               num++;
+       }
+ 
+-      if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num)
++      ids_size = struct_size(ids, gaids_assoc_id, num);
++      if (len < ids_size)
+               return -EINVAL;
+ 
+-      len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
+-
++      len = ids_size;
+       ids = kmalloc(len, GFP_USER | __GFP_NOWARN);
+       if (unlikely(!ids))
+               return -ENOMEM;
+diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
+index 458b2948b580d..019560548ac98 100755
+--- a/scripts/link-vmlinux.sh
++++ b/scripts/link-vmlinux.sh
+@@ -182,7 +182,7 @@ kallsyms_step()
+       mksysmap ${kallsyms_vmlinux} ${kallsyms_vmlinux}.syms
+       kallsyms ${kallsyms_vmlinux}.syms ${kallsyms_S}
+ 
+-      info AS ${kallsyms_S}
++      info AS ${kallsymso}
+       ${CC} ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS} \
+             ${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
+             -c -o ${kallsymso} ${kallsyms_S}
+diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
+index 704b0c895605a..963df28584eed 100644
+--- a/security/apparmor/audit.c
++++ b/security/apparmor/audit.c
+@@ -173,7 +173,7 @@ void aa_audit_rule_free(void *vrule)
+       }
+ }
+ 
+-int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
++int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, gfp_t 
gfp)
+ {
+       struct aa_audit_rule *rule;
+ 
+@@ -186,14 +186,14 @@ int aa_audit_rule_init(u32 field, u32 op, char *rulestr, 
void **vrule)
+               return -EINVAL;
+       }
+ 
+-      rule = kzalloc(sizeof(struct aa_audit_rule), GFP_KERNEL);
++      rule = kzalloc(sizeof(struct aa_audit_rule), gfp);
+ 
+       if (!rule)
+               return -ENOMEM;
+ 
+       /* Currently rules are treated as coming from the root ns */
+       rule->label = aa_label_parse(&root_ns->unconfined->label, rulestr,
+-                                   GFP_KERNEL, true, false);
++                                   gfp, true, false);
+       if (IS_ERR(rule->label)) {
+               int err = PTR_ERR(rule->label);
+               aa_audit_rule_free(rule);
+diff --git a/security/apparmor/include/audit.h 
b/security/apparmor/include/audit.h
+index 18519a4eb67e3..f325f1bef8d6d 100644
+--- a/security/apparmor/include/audit.h
++++ b/security/apparmor/include/audit.h
+@@ -186,7 +186,7 @@ static inline int complain_error(int error)
+ }
+ 
+ void aa_audit_rule_free(void *vrule);
+-int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule);
++int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, gfp_t 
gfp);
+ int aa_audit_rule_known(struct audit_krule *rule);
+ int aa_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule);
+ 
+diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
+index be965a8715e4e..c80bc15b4486e 100644
+--- a/security/integrity/ima/ima.h
++++ b/security/integrity/ima/ima.h
+@@ -428,7 +428,7 @@ static inline void ima_free_modsig(struct modsig *modsig)
+ #else
+ 
+ static inline int ima_filter_rule_init(u32 field, u32 op, char *rulestr,
+-                                     void **lsmrule)
++                                     void **lsmrule, gfp_t gfp)
+ {
+       return -EINVAL;
+ }
+diff --git a/security/integrity/ima/ima_policy.c 
b/security/integrity/ima/ima_policy.c
+index bdc40535ff489..1f930711db769 100644
+--- a/security/integrity/ima/ima_policy.c
++++ b/security/integrity/ima/ima_policy.c
+@@ -377,7 +377,8 @@ static void ima_free_rule(struct ima_rule_entry *entry)
+       kfree(entry);
+ }
+ 
+-static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
++static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry,
++                                              gfp_t gfp)
+ {
+       struct ima_rule_entry *nentry;
+       int i;
+@@ -386,7 +387,7 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct 
ima_rule_entry *entry)
+        * Immutable elements are copied over as pointers and data; only
+        * lsm rules can change
+        */
+-      nentry = kmemdup(entry, sizeof(*nentry), GFP_KERNEL);
++      nentry = kmemdup(entry, sizeof(*nentry), gfp);
+       if (!nentry)
+               return NULL;
+ 
+@@ -401,7 +402,8 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct 
ima_rule_entry *entry)
+ 
+               ima_filter_rule_init(nentry->lsm[i].type, Audit_equal,
+                                    nentry->lsm[i].args_p,
+-                                   &nentry->lsm[i].rule);
++                                   &nentry->lsm[i].rule,
++                                   gfp);
+               if (!nentry->lsm[i].rule)
+                       pr_warn("rule for LSM \'%s\' is undefined\n",
+                               nentry->lsm[i].args_p);
+@@ -414,7 +416,7 @@ static int ima_lsm_update_rule(struct ima_rule_entry 
*entry)
+       int i;
+       struct ima_rule_entry *nentry;
+ 
+-      nentry = ima_lsm_copy_rule(entry);
++      nentry = ima_lsm_copy_rule(entry, GFP_KERNEL);
+       if (!nentry)
+               return -ENOMEM;
+ 
+@@ -638,7 +640,7 @@ static bool ima_match_rules(struct ima_rule_entry *rule,
+               }
+ 
+               if (rc == -ESTALE && !rule_reinitialized) {
+-                      lsm_rule = ima_lsm_copy_rule(rule);
++                      lsm_rule = ima_lsm_copy_rule(rule, GFP_ATOMIC);
+                       if (lsm_rule) {
+                               rule_reinitialized = true;
+                               goto retry;
+@@ -1113,7 +1115,8 @@ static int ima_lsm_rule_init(struct ima_rule_entry 
*entry,
+       entry->lsm[lsm_rule].type = audit_type;
+       result = ima_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal,
+                                     entry->lsm[lsm_rule].args_p,
+-                                    &entry->lsm[lsm_rule].rule);
++                                    &entry->lsm[lsm_rule].rule,
++                                    GFP_KERNEL);
+       if (!entry->lsm[lsm_rule].rule) {
+               pr_warn("rule for LSM \'%s\' is undefined\n",
+                       entry->lsm[lsm_rule].args_p);
+diff --git a/security/security.c b/security/security.c
+index 1b504c296551c..fb1692254851b 100644
+--- a/security/security.c
++++ b/security/security.c
+@@ -2617,9 +2617,11 @@ int security_key_getsecurity(struct key *key, char 
**_buffer)
+ 
+ #ifdef CONFIG_AUDIT
+ 
+-int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule)
++int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule,
++                           gfp_t gfp)
+ {
+-      return call_int_hook(audit_rule_init, 0, field, op, rulestr, lsmrule);
++      return call_int_hook(audit_rule_init, 0, field, op, rulestr, lsmrule,
++                           gfp);
+ }
+ 
+ int security_audit_rule_known(struct audit_krule *krule)
+diff --git a/security/selinux/include/audit.h 
b/security/selinux/include/audit.h
+index 406bceb90c6cd..ef54f61a30669 100644
+--- a/security/selinux/include/audit.h
++++ b/security/selinux/include/audit.h
+@@ -21,12 +21,14 @@
+  *    @op: the operator the rule uses
+  *    @rulestr: the text "target" of the rule
+  *    @rule: pointer to the new rule structure returned via this
++ *    @gfp: GFP flag used for kmalloc
+  *
+  *    Returns 0 if successful, -errno if not.  On success, the rule structure
+  *    will be allocated internally.  The caller must free this structure with
+  *    selinux_audit_rule_free() after use.
+  */
+-int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **rule);
++int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **rule,
++                          gfp_t gfp);
+ 
+ /**
+  *    selinux_audit_rule_free - free an selinux audit rule structure.
+diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
+index 64a6a37dc36d9..2b8ebd390e375 100644
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -3563,7 +3563,8 @@ void selinux_audit_rule_free(void *vrule)
+       }
+ }
+ 
+-int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
++int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule,
++                          gfp_t gfp)
+ {
+       struct selinux_state *state = &selinux_state;
+       struct selinux_policy *policy;
+@@ -3604,7 +3605,7 @@ int selinux_audit_rule_init(u32 field, u32 op, char 
*rulestr, void **vrule)
+               return -EINVAL;
+       }
+ 
+-      tmprule = kzalloc(sizeof(struct selinux_audit_rule), GFP_KERNEL);
++      tmprule = kzalloc(sizeof(struct selinux_audit_rule), gfp);
+       if (!tmprule)
+               return -ENOMEM;
+ 
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index feba69549d086..b0a483e40c827 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -4552,11 +4552,13 @@ static int smack_post_notification(const struct cred 
*w_cred,
+  * @op: required testing operator (=, !=, >, <, ...)
+  * @rulestr: smack label to be audited
+  * @vrule: pointer to save our own audit rule representation
++ * @gfp: type of the memory for the allocation
+  *
+  * Prepare to audit cases where (@field @op @rulestr) is true.
+  * The label to be audited is created if necessay.
+  */
+-static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void 
**vrule)
++static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void 
**vrule,
++                               gfp_t gfp)
+ {
+       struct smack_known *skp;
+       char **rule = (char **)vrule;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 11ec5783a2f17..4635dc70a8404 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -11584,6 +11584,7 @@ enum {
+       ALC897_FIXUP_LENOVO_HEADSET_MODE,
+       ALC897_FIXUP_HEADSET_MIC_PIN2,
+       ALC897_FIXUP_UNIS_H3C_X500S,
++      ALC897_FIXUP_HEADSET_MIC_PIN3,
+ };
+ 
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -12030,10 +12031,18 @@ static const struct hda_fixup alc662_fixups[] = {
+                       {}
+               },
+       },
++      [ALC897_FIXUP_HEADSET_MIC_PIN3] = {
++              .type = HDA_FIXUP_PINS,
++              .v.pins = (const struct hda_pintbl[]) {
++                      { 0x19, 0x03a11050 }, /* use as headset mic */
++                      { }
++              },
++      },
+ };
+ 
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2),
++      SND_PCI_QUIRK(0x1019, 0x9859, "JP-IK LEAP W502", 
ALC897_FIXUP_HEADSET_MIC_PIN3),
+       SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC),
+       SND_PCI_QUIRK(0x1025, 0x0241, "Packard Bell DOTS", 
ALC662_FIXUP_INV_DMIC),
+       SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
+diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
+index 496e6a8ee0dc9..41740ae8aad73 100644
+--- a/tools/lib/bpf/bpf_core_read.h
++++ b/tools/lib/bpf/bpf_core_read.h
+@@ -102,6 +102,7 @@ enum bpf_enum_value_kind {
+       case 2: val = *(const unsigned short *)p; break;                      \
+       case 4: val = *(const unsigned int *)p; break;                        \
+       case 8: val = *(const unsigned long long *)p; break;                  \
++      default: val = 0; break;                                              \
+       }                                                                     \
+       val <<= __CORE_RELO(s, field, LSHIFT_U64);                            \
+       if (__CORE_RELO(s, field, SIGNED))                                    \
+diff --git a/tools/power/x86/turbostat/turbostat.c 
b/tools/power/x86/turbostat/turbostat.c
+index a41bad8e653bb..66e31da942588 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -566,6 +566,7 @@ struct topo_params {
+       int num_cpus;
+       int num_cores;
+       int max_cpu_num;
++      int max_die_id;
+       int max_node_num;
+       int nodes_per_pkg;
+       int cores_per_node;
+@@ -5864,7 +5865,6 @@ void topology_probe()
+       int i;
+       int max_core_id = 0;
+       int max_package_id = 0;
+-      int max_die_id = 0;
+       int max_siblings = 0;
+ 
+       /* Initialize num_cpus, max_cpu_num */
+@@ -5933,8 +5933,8 @@ void topology_probe()
+ 
+               /* get die information */
+               cpus[i].die_id = get_die_id(i);
+-              if (cpus[i].die_id > max_die_id)
+-                      max_die_id = cpus[i].die_id;
++              if (cpus[i].die_id > topo.max_die_id)
++                      topo.max_die_id = cpus[i].die_id;
+ 
+               /* get numa node information */
+               cpus[i].physical_node_id = get_physical_node_id(&cpus[i]);
+@@ -5960,9 +5960,9 @@ void topology_probe()
+       if (!summary_only && topo.cores_per_node > 1)
+               BIC_PRESENT(BIC_Core);
+ 
+-      topo.num_die = max_die_id + 1;
++      topo.num_die = topo.max_die_id + 1;
+       if (debug > 1)
+-              fprintf(outf, "max_die_id %d, sizing for %d die\n", max_die_id, 
topo.num_die);
++              fprintf(outf, "max_die_id %d, sizing for %d die\n", 
topo.max_die_id, topo.num_die);
+       if (!summary_only && topo.num_die > 1)
+               BIC_PRESENT(BIC_Die);
+ 
+diff --git a/tools/testing/selftests/net/msg_zerocopy.c 
b/tools/testing/selftests/net/msg_zerocopy.c
+index bdc03a2097e85..7ea5fb28c93db 100644
+--- a/tools/testing/selftests/net/msg_zerocopy.c
++++ b/tools/testing/selftests/net/msg_zerocopy.c
+@@ -85,6 +85,7 @@ static bool cfg_rx;
+ static int  cfg_runtime_ms    = 4200;
+ static int  cfg_verbose;
+ static int  cfg_waittime_ms   = 500;
++static int  cfg_notification_limit = 32;
+ static bool cfg_zerocopy;
+ 
+ static socklen_t cfg_alen;
+@@ -95,6 +96,7 @@ static char payload[IP_MAXPACKET];
+ static long packets, bytes, completions, expected_completions;
+ static int  zerocopied = -1;
+ static uint32_t next_completion;
++static uint32_t sends_since_notify;
+ 
+ static unsigned long gettimeofday_ms(void)
+ {
+@@ -208,6 +210,7 @@ static bool do_sendmsg(int fd, struct msghdr *msg, bool 
do_zerocopy, int domain)
+               error(1, errno, "send");
+       if (cfg_verbose && ret != len)
+               fprintf(stderr, "send: ret=%u != %u\n", ret, len);
++      sends_since_notify++;
+ 
+       if (len) {
+               packets++;
+@@ -435,7 +438,7 @@ static bool do_recv_completion(int fd, int domain)
+       /* Detect notification gaps. These should not happen often, if at all.
+        * Gaps can occur due to drops, reordering and retransmissions.
+        */
+-      if (lo != next_completion)
++      if (cfg_verbose && lo != next_completion)
+               fprintf(stderr, "gap: %u..%u does not append to %u\n",
+                       lo, hi, next_completion);
+       next_completion = hi + 1;
+@@ -460,6 +463,7 @@ static bool do_recv_completion(int fd, int domain)
+ static void do_recv_completions(int fd, int domain)
+ {
+       while (do_recv_completion(fd, domain)) {}
++      sends_since_notify = 0;
+ }
+ 
+ /* Wait for all remaining completions on the errqueue */
+@@ -549,6 +553,9 @@ static void do_tx(int domain, int type, int protocol)
+               else
+                       do_sendmsg(fd, &msg, cfg_zerocopy, domain);
+ 
++              if (cfg_zerocopy && sends_since_notify >= 
cfg_notification_limit)
++                      do_recv_completions(fd, domain);
++
+               while (!do_poll(fd, POLLOUT)) {
+                       if (cfg_zerocopy)
+                               do_recv_completions(fd, domain);
+@@ -708,7 +715,7 @@ static void parse_opts(int argc, char **argv)
+ 
+       cfg_payload_len = max_payload_len;
+ 
+-      while ((c = getopt(argc, argv, "46c:C:D:i:mp:rs:S:t:vz")) != -1) {
++      while ((c = getopt(argc, argv, "46c:C:D:i:l:mp:rs:S:t:vz")) != -1) {
+               switch (c) {
+               case '4':
+                       if (cfg_family != PF_UNSPEC)
+@@ -736,6 +743,9 @@ static void parse_opts(int argc, char **argv)
+                       if (cfg_ifindex == 0)
+                               error(1, errno, "invalid iface: %s", optarg);
+                       break;
++              case 'l':
++                      cfg_notification_limit = strtoul(optarg, NULL, 0);
++                      break;
+               case 'm':
+                       cfg_cork_mixed = true;
+                       break;

Reply via email to