commit:     cfcdf3b0507246ef8ff1a4291c90feedb92166f9
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jul  7 16:26:00 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jul  7 16:26:00 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cfcdf3b0

Linux patch 5.18.10

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1009_linux-5.18.10.patch | 3905 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3909 insertions(+)

diff --git a/0000_README b/0000_README
index e3668eda..557264e1 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch:  1008_linux-5.18.9.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.18.9
 
+Patch:  1009_linux-5.18.10.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.18.10
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1009_linux-5.18.10.patch b/1009_linux-5.18.10.patch
new file mode 100644
index 00000000..064488e3
--- /dev/null
+++ b/1009_linux-5.18.10.patch
@@ -0,0 +1,3905 @@
+diff --git a/Makefile b/Makefile
+index 751cfd786c8c0..088b84f99203c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 18
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Superb Owl
+ 
+diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
+index 84a1cea1f43b9..309648c17f486 100644
+--- a/arch/arm/xen/p2m.c
++++ b/arch/arm/xen/p2m.c
+@@ -63,11 +63,12 @@ out:
+ 
+ unsigned long __pfn_to_mfn(unsigned long pfn)
+ {
+-      struct rb_node *n = phys_to_mach.rb_node;
++      struct rb_node *n;
+       struct xen_p2m_entry *entry;
+       unsigned long irqflags;
+ 
+       read_lock_irqsave(&p2m_lock, irqflags);
++      n = phys_to_mach.rb_node;
+       while (n) {
+               entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
+               if (entry->pfn <= pfn &&
+@@ -152,10 +153,11 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
+       int rc;
+       unsigned long irqflags;
+       struct xen_p2m_entry *p2m_entry;
+-      struct rb_node *n = phys_to_mach.rb_node;
++      struct rb_node *n;
+ 
+       if (mfn == INVALID_P2M_ENTRY) {
+               write_lock_irqsave(&p2m_lock, irqflags);
++              n = phys_to_mach.rb_node;
+               while (n) {
+                       p2m_entry = rb_entry(n, struct xen_p2m_entry, 
rbnode_phys);
+                       if (p2m_entry->pfn <= pfn &&
+diff --git a/arch/parisc/kernel/asm-offsets.c 
b/arch/parisc/kernel/asm-offsets.c
+index 2673d57eeb008..94652e13c2603 100644
+--- a/arch/parisc/kernel/asm-offsets.c
++++ b/arch/parisc/kernel/asm-offsets.c
+@@ -224,8 +224,13 @@ int main(void)
+       BLANK();
+       DEFINE(ASM_SIGFRAME_SIZE, PARISC_RT_SIGFRAME_SIZE);
+       DEFINE(SIGFRAME_CONTEXT_REGS, offsetof(struct rt_sigframe, 
uc.uc_mcontext) - PARISC_RT_SIGFRAME_SIZE);
++#ifdef CONFIG_64BIT
+       DEFINE(ASM_SIGFRAME_SIZE32, PARISC_RT_SIGFRAME_SIZE32);
+       DEFINE(SIGFRAME_CONTEXT_REGS32, offsetof(struct compat_rt_sigframe, 
uc.uc_mcontext) - PARISC_RT_SIGFRAME_SIZE32);
++#else
++      DEFINE(ASM_SIGFRAME_SIZE32, PARISC_RT_SIGFRAME_SIZE);
++      DEFINE(SIGFRAME_CONTEXT_REGS32, offsetof(struct rt_sigframe, 
uc.uc_mcontext) - PARISC_RT_SIGFRAME_SIZE);
++#endif
+       BLANK();
+       DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
+       DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
+diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
+index ed1e88a74dc42..bac581b5ecfc5 100644
+--- a/arch/parisc/kernel/unaligned.c
++++ b/arch/parisc/kernel/unaligned.c
+@@ -146,7 +146,7 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, 
int flop)
+ "     depw    %%r0,31,2,%4\n"
+ "1:   ldw     0(%%sr1,%4),%0\n"
+ "2:   ldw     4(%%sr1,%4),%3\n"
+-"     subi    32,%4,%2\n"
++"     subi    32,%2,%2\n"
+ "     mtctl   %2,11\n"
+ "     vshd    %0,%3,%0\n"
+ "3:   \n"
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 500f2a0831ef8..45e471516c3c3 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -358,6 +358,10 @@ config ARCH_SUSPEND_NONZERO_CPU
+       def_bool y
+       depends on PPC_POWERNV || PPC_PSERIES
+ 
++config ARCH_HAS_ADD_PAGES
++      def_bool y
++      depends on ARCH_ENABLE_MEMORY_HOTPLUG
++
+ config PPC_DCR_NATIVE
+       bool
+ 
+diff --git a/arch/powerpc/include/asm/bpf_perf_event.h 
b/arch/powerpc/include/asm/bpf_perf_event.h
+new file mode 100644
+index 0000000000000..e8a7b4ffb58c2
+--- /dev/null
++++ b/arch/powerpc/include/asm/bpf_perf_event.h
+@@ -0,0 +1,9 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_POWERPC_BPF_PERF_EVENT_H
++#define _ASM_POWERPC_BPF_PERF_EVENT_H
++
++#include <asm/ptrace.h>
++
++typedef struct user_pt_regs bpf_user_pt_regs_t;
++
++#endif /* _ASM_POWERPC_BPF_PERF_EVENT_H */
+diff --git a/arch/powerpc/include/uapi/asm/bpf_perf_event.h 
b/arch/powerpc/include/uapi/asm/bpf_perf_event.h
+deleted file mode 100644
+index 5e1e648aeec4c..0000000000000
+--- a/arch/powerpc/include/uapi/asm/bpf_perf_event.h
++++ /dev/null
+@@ -1,9 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+-#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+-#define _UAPI__ASM_BPF_PERF_EVENT_H__
+-
+-#include <asm/ptrace.h>
+-
+-typedef struct user_pt_regs bpf_user_pt_regs_t;
+-
+-#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
+diff --git a/arch/powerpc/kernel/prom_init_check.sh 
b/arch/powerpc/kernel/prom_init_check.sh
+index b183ab9c5107c..dfa5f729f774d 100644
+--- a/arch/powerpc/kernel/prom_init_check.sh
++++ b/arch/powerpc/kernel/prom_init_check.sh
+@@ -13,7 +13,7 @@
+ # If you really need to reference something from prom_init.o add
+ # it to the list below:
+ 
+-grep "^CONFIG_KASAN=y$" .config >/dev/null
++grep "^CONFIG_KASAN=y$" ${KCONFIG_CONFIG} >/dev/null
+ if [ $? -eq 0 ]
+ then
+       MEM_FUNCS="__memcpy __memset"
+diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
+index 149635e5c1653..5dd434d80f445 100644
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -103,6 +103,37 @@ void __ref arch_remove_linear_mapping(u64 start, u64 size)
+       vm_unmap_aliases();
+ }
+ 
++/*
++ * After memory hotplug the variables max_pfn, max_low_pfn and high_memory 
need
++ * updating.
++ */
++static void update_end_of_memory_vars(u64 start, u64 size)
++{
++      unsigned long end_pfn = PFN_UP(start + size);
++
++      if (end_pfn > max_pfn) {
++              max_pfn = end_pfn;
++              max_low_pfn = end_pfn;
++              high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
++      }
++}
++
++int __ref add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
++                  struct mhp_params *params)
++{
++      int ret;
++
++      ret = __add_pages(nid, start_pfn, nr_pages, params);
++      if (ret)
++              return ret;
++
++      /* update max_pfn, max_low_pfn and high_memory */
++      update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
++                                nr_pages << PAGE_SHIFT);
++
++      return ret;
++}
++
+ int __ref arch_add_memory(int nid, u64 start, u64 size,
+                         struct mhp_params *params)
+ {
+@@ -113,7 +144,7 @@ int __ref arch_add_memory(int nid, u64 start, u64 size,
+       rc = arch_create_linear_mapping(nid, start, size, params);
+       if (rc)
+               return rc;
+-      rc = __add_pages(nid, start_pfn, nr_pages, params);
++      rc = add_pages(nid, start_pfn, nr_pages, params);
+       if (rc)
+               arch_remove_linear_mapping(start, size);
+       return rc;
+diff --git a/arch/powerpc/mm/nohash/book3e_pgtable.c 
b/arch/powerpc/mm/nohash/book3e_pgtable.c
+index 7d4368d055a68..b80fc4a91a534 100644
+--- a/arch/powerpc/mm/nohash/book3e_pgtable.c
++++ b/arch/powerpc/mm/nohash/book3e_pgtable.c
+@@ -96,8 +96,8 @@ int __ref map_kernel_page(unsigned long ea, unsigned long 
pa, pgprot_t prot)
+               pgdp = pgd_offset_k(ea);
+               p4dp = p4d_offset(pgdp, ea);
+               if (p4d_none(*p4dp)) {
+-                      pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
+-                      p4d_populate(&init_mm, p4dp, pmdp);
++                      pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
++                      p4d_populate(&init_mm, p4dp, pudp);
+               }
+               pudp = pud_offset(p4dp, ea);
+               if (pud_none(*pudp)) {
+@@ -106,7 +106,7 @@ int __ref map_kernel_page(unsigned long ea, unsigned long 
pa, pgprot_t prot)
+               }
+               pmdp = pmd_offset(pudp, ea);
+               if (!pmd_present(*pmdp)) {
+-                      ptep = early_alloc_pgtable(PAGE_SIZE);
++                      ptep = early_alloc_pgtable(PTE_TABLE_SIZE);
+                       pmd_populate_kernel(&init_mm, pmdp, ptep);
+               }
+               ptep = pte_offset_kernel(pmdp, ea);
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index 359b0cc0dc35d..d7dcad9011468 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -487,7 +487,6 @@ config KEXEC
+ config KEXEC_FILE
+       bool "kexec file based system call"
+       select KEXEC_CORE
+-      select BUILD_BIN2C
+       depends on CRYPTO
+       depends on CRYPTO_SHA256
+       depends on CRYPTO_SHA256_S390
+diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c
+index 56007c763902a..1f2d40993c4d2 100644
+--- a/arch/s390/crypto/arch_random.c
++++ b/arch/s390/crypto/arch_random.c
+@@ -4,232 +4,15 @@
+  *
+  * Copyright IBM Corp. 2017, 2020
+  * Author(s): Harald Freudenberger
+- *
+- * The s390_arch_random_generate() function may be called from random.c
+- * in interrupt context. So this implementation does the best to be very
+- * fast. There is a buffer of random data which is asynchronously checked
+- * and filled by a workqueue thread.
+- * If there are enough bytes in the buffer the s390_arch_random_generate()
+- * just delivers these bytes. Otherwise false is returned until the
+- * worker thread refills the buffer.
+- * The worker fills the rng buffer by pulling fresh entropy from the
+- * high quality (but slow) true hardware random generator. This entropy
+- * is then spread over the buffer with an pseudo random generator PRNG.
+- * As the arch_get_random_seed_long() fetches 8 bytes and the calling
+- * function add_interrupt_randomness() counts this as 1 bit entropy the
+- * distribution needs to make sure there is in fact 1 bit entropy contained
+- * in 8 bytes of the buffer. The current values pull 32 byte entropy
+- * and scatter this into a 2048 byte buffer. So 8 byte in the buffer
+- * will contain 1 bit of entropy.
+- * The worker thread is rescheduled based on the charge level of the
+- * buffer but at least with 500 ms delay to avoid too much CPU consumption.
+- * So the max. amount of rng data delivered via arch_get_random_seed is
+- * limited to 4k bytes per second.
+  */
+ 
+ #include <linux/kernel.h>
+ #include <linux/atomic.h>
+ #include <linux/random.h>
+-#include <linux/slab.h>
+ #include <linux/static_key.h>
+-#include <linux/workqueue.h>
+-#include <linux/moduleparam.h>
+ #include <asm/cpacf.h>
+ 
+ DEFINE_STATIC_KEY_FALSE(s390_arch_random_available);
+ 
+ atomic64_t s390_arch_random_counter = ATOMIC64_INIT(0);
+ EXPORT_SYMBOL(s390_arch_random_counter);
+-
+-#define ARCH_REFILL_TICKS (HZ/2)
+-#define ARCH_PRNG_SEED_SIZE 32
+-#define ARCH_RNG_BUF_SIZE 2048
+-
+-static DEFINE_SPINLOCK(arch_rng_lock);
+-static u8 *arch_rng_buf;
+-static unsigned int arch_rng_buf_idx;
+-
+-static void arch_rng_refill_buffer(struct work_struct *);
+-static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
+-
+-bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
+-{
+-      /* max hunk is ARCH_RNG_BUF_SIZE */
+-      if (nbytes > ARCH_RNG_BUF_SIZE)
+-              return false;
+-
+-      /* lock rng buffer */
+-      if (!spin_trylock(&arch_rng_lock))
+-              return false;
+-
+-      /* try to resolve the requested amount of bytes from the buffer */
+-      arch_rng_buf_idx -= nbytes;
+-      if (arch_rng_buf_idx < ARCH_RNG_BUF_SIZE) {
+-              memcpy(buf, arch_rng_buf + arch_rng_buf_idx, nbytes);
+-              atomic64_add(nbytes, &s390_arch_random_counter);
+-              spin_unlock(&arch_rng_lock);
+-              return true;
+-      }
+-
+-      /* not enough bytes in rng buffer, refill is done asynchronously */
+-      spin_unlock(&arch_rng_lock);
+-
+-      return false;
+-}
+-EXPORT_SYMBOL(s390_arch_random_generate);
+-
+-static void arch_rng_refill_buffer(struct work_struct *unused)
+-{
+-      unsigned int delay = ARCH_REFILL_TICKS;
+-
+-      spin_lock(&arch_rng_lock);
+-      if (arch_rng_buf_idx > ARCH_RNG_BUF_SIZE) {
+-              /* buffer is exhausted and needs refill */
+-              u8 seed[ARCH_PRNG_SEED_SIZE];
+-              u8 prng_wa[240];
+-              /* fetch ARCH_PRNG_SEED_SIZE bytes of entropy */
+-              cpacf_trng(NULL, 0, seed, sizeof(seed));
+-              /* blow this entropy up to ARCH_RNG_BUF_SIZE with PRNG */
+-              memset(prng_wa, 0, sizeof(prng_wa));
+-              cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
+-                         &prng_wa, NULL, 0, seed, sizeof(seed));
+-              cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN,
+-                         &prng_wa, arch_rng_buf, ARCH_RNG_BUF_SIZE, NULL, 0);
+-              arch_rng_buf_idx = ARCH_RNG_BUF_SIZE;
+-      }
+-      delay += (ARCH_REFILL_TICKS * arch_rng_buf_idx) / ARCH_RNG_BUF_SIZE;
+-      spin_unlock(&arch_rng_lock);
+-
+-      /* kick next check */
+-      queue_delayed_work(system_long_wq, &arch_rng_work, delay);
+-}
+-
+-/*
+- * Here follows the implementation of s390_arch_get_random_long().
+- *
+- * The random longs to be pulled by arch_get_random_long() are
+- * prepared in an 4K buffer which is filled from the NIST 800-90
+- * compliant s390 drbg. By default the random long buffer is refilled
+- * 256 times before the drbg itself needs a reseed. The reseed of the
+- * drbg is done with 32 bytes fetched from the high quality (but slow)
+- * trng which is assumed to deliver 100% entropy. So the 32 * 8 = 256
+- * bits of entropy are spread over 256 * 4KB = 1MB serving 131072
+- * arch_get_random_long() invocations before reseeded.
+- *
+- * How often the 4K random long buffer is refilled with the drbg
+- * before the drbg is reseeded can be adjusted. There is a module
+- * parameter 's390_arch_rnd_long_drbg_reseed' accessible via
+- *   /sys/module/arch_random/parameters/rndlong_drbg_reseed
+- * or as kernel command line parameter
+- *   arch_random.rndlong_drbg_reseed=<value>
+- * This parameter tells how often the drbg fills the 4K buffer before
+- * it is re-seeded by fresh entropy from the trng.
+- * A value of 16 results in reseeding the drbg at every 16 * 4 KB = 64
+- * KB with 32 bytes of fresh entropy pulled from the trng. So a value
+- * of 16 would result in 256 bits entropy per 64 KB.
+- * A value of 256 results in 1MB of drbg output before a reseed of the
+- * drbg is done. So this would spread the 256 bits of entropy among 1MB.
+- * Setting this parameter to 0 forces the reseed to take place every
+- * time the 4K buffer is depleted, so the entropy rises to 256 bits
+- * entropy per 4K or 0.5 bit entropy per arch_get_random_long().  With
+- * setting this parameter to negative values all this effort is
+- * disabled, arch_get_random long() returns false and thus indicating
+- * that the arch_get_random_long() feature is disabled at all.
+- */
+-
+-static unsigned long rndlong_buf[512];
+-static DEFINE_SPINLOCK(rndlong_lock);
+-static int rndlong_buf_index;
+-
+-static int rndlong_drbg_reseed = 256;
+-module_param_named(rndlong_drbg_reseed, rndlong_drbg_reseed, int, 0600);
+-MODULE_PARM_DESC(rndlong_drbg_reseed, "s390 arch_get_random_long() drbg 
reseed");
+-
+-static inline void refill_rndlong_buf(void)
+-{
+-      static u8 prng_ws[240];
+-      static int drbg_counter;
+-
+-      if (--drbg_counter < 0) {
+-              /* need to re-seed the drbg */
+-              u8 seed[32];
+-
+-              /* fetch seed from trng */
+-              cpacf_trng(NULL, 0, seed, sizeof(seed));
+-              /* seed drbg */
+-              memset(prng_ws, 0, sizeof(prng_ws));
+-              cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
+-                         &prng_ws, NULL, 0, seed, sizeof(seed));
+-              /* re-init counter for drbg */
+-              drbg_counter = rndlong_drbg_reseed;
+-      }
+-
+-      /* fill the arch_get_random_long buffer from drbg */
+-      cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prng_ws,
+-                 (u8 *) rndlong_buf, sizeof(rndlong_buf),
+-                 NULL, 0);
+-}
+-
+-bool s390_arch_get_random_long(unsigned long *v)
+-{
+-      bool rc = false;
+-      unsigned long flags;
+-
+-      /* arch_get_random_long() disabled ? */
+-      if (rndlong_drbg_reseed < 0)
+-              return false;
+-
+-      /* try to lock the random long lock */
+-      if (!spin_trylock_irqsave(&rndlong_lock, flags))
+-              return false;
+-
+-      if (--rndlong_buf_index >= 0) {
+-              /* deliver next long value from the buffer */
+-              *v = rndlong_buf[rndlong_buf_index];
+-              rc = true;
+-              goto out;
+-      }
+-
+-      /* buffer is depleted and needs refill */
+-      if (in_interrupt()) {
+-              /* delay refill in interrupt context to next caller */
+-              rndlong_buf_index = 0;
+-              goto out;
+-      }
+-
+-      /* refill random long buffer */
+-      refill_rndlong_buf();
+-      rndlong_buf_index = ARRAY_SIZE(rndlong_buf);
+-
+-      /* and provide one random long */
+-      *v = rndlong_buf[--rndlong_buf_index];
+-      rc = true;
+-
+-out:
+-      spin_unlock_irqrestore(&rndlong_lock, flags);
+-      return rc;
+-}
+-EXPORT_SYMBOL(s390_arch_get_random_long);
+-
+-static int __init s390_arch_random_init(void)
+-{
+-      /* all the needed PRNO subfunctions available ? */
+-      if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG) &&
+-          cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN)) {
+-
+-              /* alloc arch random working buffer */
+-              arch_rng_buf = kmalloc(ARCH_RNG_BUF_SIZE, GFP_KERNEL);
+-              if (!arch_rng_buf)
+-                      return -ENOMEM;
+-
+-              /* kick worker queue job to fill the random buffer */
+-              queue_delayed_work(system_long_wq,
+-                                 &arch_rng_work, ARCH_REFILL_TICKS);
+-
+-              /* enable arch random to the outside world */
+-              static_branch_enable(&s390_arch_random_available);
+-      }
+-
+-      return 0;
+-}
+-arch_initcall(s390_arch_random_init);
+diff --git a/arch/s390/include/asm/archrandom.h 
b/arch/s390/include/asm/archrandom.h
+index 5dc712fde3c7f..2c6e1c6ecbe78 100644
+--- a/arch/s390/include/asm/archrandom.h
++++ b/arch/s390/include/asm/archrandom.h
+@@ -15,17 +15,13 @@
+ 
+ #include <linux/static_key.h>
+ #include <linux/atomic.h>
++#include <asm/cpacf.h>
+ 
+ DECLARE_STATIC_KEY_FALSE(s390_arch_random_available);
+ extern atomic64_t s390_arch_random_counter;
+ 
+-bool s390_arch_get_random_long(unsigned long *v);
+-bool s390_arch_random_generate(u8 *buf, unsigned int nbytes);
+-
+ static inline bool __must_check arch_get_random_long(unsigned long *v)
+ {
+-      if (static_branch_likely(&s390_arch_random_available))
+-              return s390_arch_get_random_long(v);
+       return false;
+ }
+ 
+@@ -37,7 +33,9 @@ static inline bool __must_check arch_get_random_int(unsigned 
int *v)
+ static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
+ {
+       if (static_branch_likely(&s390_arch_random_available)) {
+-              return s390_arch_random_generate((u8 *)v, sizeof(*v));
++              cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
++              atomic64_add(sizeof(*v), &s390_arch_random_counter);
++              return true;
+       }
+       return false;
+ }
+@@ -45,7 +43,9 @@ static inline bool __must_check 
arch_get_random_seed_long(unsigned long *v)
+ static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
+ {
+       if (static_branch_likely(&s390_arch_random_available)) {
+-              return s390_arch_random_generate((u8 *)v, sizeof(*v));
++              cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
++              atomic64_add(sizeof(*v), &s390_arch_random_counter);
++              return true;
+       }
+       return false;
+ }
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index d860ac3009197..2cef49983e9e7 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -875,6 +875,11 @@ static void __init setup_randomness(void)
+       if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
+               add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * 
vmms->count);
+       memblock_free(vmms, PAGE_SIZE);
++
++#ifdef CONFIG_ARCH_RANDOM
++      if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
++              static_branch_enable(&s390_arch_random_available);
++#endif
+ }
+ 
+ /*
+diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
+index 990ff5b0aeb87..e4ea42b83b512 100644
+--- a/drivers/acpi/acpi_video.c
++++ b/drivers/acpi/acpi_video.c
+@@ -73,6 +73,7 @@ module_param(device_id_scheme, bool, 0444);
+ static int only_lcd = -1;
+ module_param(only_lcd, int, 0444);
+ 
++static bool has_backlight;
+ static int register_count;
+ static DEFINE_MUTEX(register_count_mutex);
+ static DEFINE_MUTEX(video_list_lock);
+@@ -1222,6 +1223,9 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
+       acpi_video_device_bind(video, data);
+       acpi_video_device_find_cap(data);
+ 
++      if (data->cap._BCM && data->cap._BCL)
++              has_backlight = true;
++
+       mutex_lock(&video->device_list_lock);
+       list_add_tail(&data->entry, &video->video_device_list);
+       mutex_unlock(&video->device_list_lock);
+@@ -2250,6 +2254,7 @@ void acpi_video_unregister(void)
+       if (register_count) {
+               acpi_bus_unregister_driver(&acpi_video_bus);
+               register_count = 0;
++              has_backlight = false;
+       }
+       mutex_unlock(&register_count_mutex);
+ }
+@@ -2271,13 +2276,7 @@ void acpi_video_unregister_backlight(void)
+ 
+ bool acpi_video_handles_brightness_key_presses(void)
+ {
+-      bool have_video_busses;
+-
+-      mutex_lock(&video_list_lock);
+-      have_video_busses = !list_empty(&video_bus_head);
+-      mutex_unlock(&video_list_lock);
+-
+-      return have_video_busses &&
++      return has_backlight &&
+              (report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS);
+ }
+ EXPORT_SYMBOL(acpi_video_handles_brightness_key_presses);
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index 966a6bf4c1627..cf9cfc40a0283 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -152,6 +152,10 @@ static unsigned int xen_blkif_max_ring_order;
+ module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
+ MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for 
the shared ring");
+ 
++static bool __read_mostly xen_blkif_trusted = true;
++module_param_named(trusted, xen_blkif_trusted, bool, 0644);
++MODULE_PARM_DESC(trusted, "Is the backend trusted");
++
+ #define BLK_RING_SIZE(info)   \
+       __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
+ 
+@@ -210,6 +214,7 @@ struct blkfront_info
+       unsigned int feature_discard:1;
+       unsigned int feature_secdiscard:1;
+       unsigned int feature_persistent:1;
++      unsigned int bounce:1;
+       unsigned int discard_granularity;
+       unsigned int discard_alignment;
+       /* Number of 4KB segments handled */
+@@ -312,8 +317,8 @@ static int fill_grant_buffer(struct blkfront_ring_info 
*rinfo, int num)
+               if (!gnt_list_entry)
+                       goto out_of_memory;
+ 
+-              if (info->feature_persistent) {
+-                      granted_page = alloc_page(GFP_NOIO);
++              if (info->bounce) {
++                      granted_page = alloc_page(GFP_NOIO | __GFP_ZERO);
+                       if (!granted_page) {
+                               kfree(gnt_list_entry);
+                               goto out_of_memory;
+@@ -332,7 +337,7 @@ out_of_memory:
+       list_for_each_entry_safe(gnt_list_entry, n,
+                                &rinfo->grants, node) {
+               list_del(&gnt_list_entry->node);
+-              if (info->feature_persistent)
++              if (info->bounce)
+                       __free_page(gnt_list_entry->page);
+               kfree(gnt_list_entry);
+               i--;
+@@ -378,7 +383,7 @@ static struct grant *get_grant(grant_ref_t *gref_head,
+       /* Assign a gref to this page */
+       gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
+       BUG_ON(gnt_list_entry->gref == -ENOSPC);
+-      if (info->feature_persistent)
++      if (info->bounce)
+               grant_foreign_access(gnt_list_entry, info);
+       else {
+               /* Grant access to the GFN passed by the caller */
+@@ -402,7 +407,7 @@ static struct grant *get_indirect_grant(grant_ref_t 
*gref_head,
+       /* Assign a gref to this page */
+       gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
+       BUG_ON(gnt_list_entry->gref == -ENOSPC);
+-      if (!info->feature_persistent) {
++      if (!info->bounce) {
+               struct page *indirect_page;
+ 
+               /* Fetch a pre-allocated page to use for indirect grefs */
+@@ -705,7 +710,7 @@ static int blkif_queue_rw_req(struct request *req, struct 
blkfront_ring_info *ri
+               .grant_idx = 0,
+               .segments = NULL,
+               .rinfo = rinfo,
+-              .need_copy = rq_data_dir(req) && info->feature_persistent,
++              .need_copy = rq_data_dir(req) && info->bounce,
+       };
+ 
+       /*
+@@ -983,11 +988,12 @@ static void xlvbd_flush(struct blkfront_info *info)
+ {
+       blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
+                             info->feature_fua ? true : false);
+-      pr_info("blkfront: %s: %s %s %s %s %s\n",
++      pr_info("blkfront: %s: %s %s %s %s %s %s %s\n",
+               info->gd->disk_name, flush_info(info),
+               "persistent grants:", info->feature_persistent ?
+               "enabled;" : "disabled;", "indirect descriptors:",
+-              info->max_indirect_segments ? "enabled;" : "disabled;");
++              info->max_indirect_segments ? "enabled;" : "disabled;",
++              "bounce buffer:", info->bounce ? "enabled" : "disabled;");
+ }
+ 
+ static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
+@@ -1209,7 +1215,7 @@ static void blkif_free_ring(struct blkfront_ring_info 
*rinfo)
+       if (!list_empty(&rinfo->indirect_pages)) {
+               struct page *indirect_page, *n;
+ 
+-              BUG_ON(info->feature_persistent);
++              BUG_ON(info->bounce);
+               list_for_each_entry_safe(indirect_page, n, 
&rinfo->indirect_pages, lru) {
+                       list_del(&indirect_page->lru);
+                       __free_page(indirect_page);
+@@ -1226,7 +1232,7 @@ static void blkif_free_ring(struct blkfront_ring_info 
*rinfo)
+                                                         0UL);
+                               rinfo->persistent_gnts_c--;
+                       }
+-                      if (info->feature_persistent)
++                      if (info->bounce)
+                               __free_page(persistent_gnt->page);
+                       kfree(persistent_gnt);
+               }
+@@ -1247,7 +1253,7 @@ static void blkif_free_ring(struct blkfront_ring_info 
*rinfo)
+               for (j = 0; j < segs; j++) {
+                       persistent_gnt = rinfo->shadow[i].grants_used[j];
+                       gnttab_end_foreign_access(persistent_gnt->gref, 0UL);
+-                      if (info->feature_persistent)
++                      if (info->bounce)
+                               __free_page(persistent_gnt->page);
+                       kfree(persistent_gnt);
+               }
+@@ -1437,7 +1443,7 @@ static int blkif_completion(unsigned long *id,
+       data.s = s;
+       num_sg = s->num_sg;
+ 
+-      if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
++      if (bret->operation == BLKIF_OP_READ && info->bounce) {
+               for_each_sg(s->sg, sg, num_sg, i) {
+                       BUG_ON(sg->offset + sg->length > PAGE_SIZE);
+ 
+@@ -1496,7 +1502,7 @@ static int blkif_completion(unsigned long *id,
+                                * Add the used indirect page back to the list 
of
+                                * available pages for indirect grefs.
+                                */
+-                              if (!info->feature_persistent) {
++                              if (!info->bounce) {
+                                       indirect_page = 
s->indirect_grants[i]->page;
+                                       list_add(&indirect_page->lru, 
&rinfo->indirect_pages);
+                               }
+@@ -1689,7 +1695,7 @@ static int setup_blkring(struct xenbus_device *dev,
+       for (i = 0; i < info->nr_ring_pages; i++)
+               rinfo->ring_ref[i] = GRANT_INVALID_REF;
+ 
+-      sring = alloc_pages_exact(ring_size, GFP_NOIO);
++      sring = alloc_pages_exact(ring_size, GFP_NOIO | __GFP_ZERO);
+       if (!sring) {
+               xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
+               return -ENOMEM;
+@@ -1787,6 +1793,10 @@ static int talk_to_blkback(struct xenbus_device *dev,
+       if (!info)
+               return -ENODEV;
+ 
++      /* Check if backend is trusted. */
++      info->bounce = !xen_blkif_trusted ||
++                     !xenbus_read_unsigned(dev->nodename, "trusted", 1);
++
+       max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
+                                             "max-ring-page-order", 0);
+       ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
+@@ -2196,17 +2206,18 @@ static int blkfront_setup_indirect(struct 
blkfront_ring_info *rinfo)
+       if (err)
+               goto out_of_memory;
+ 
+-      if (!info->feature_persistent && info->max_indirect_segments) {
++      if (!info->bounce && info->max_indirect_segments) {
+               /*
+-               * We are using indirect descriptors but not persistent
+-               * grants, we need to allocate a set of pages that can be
++               * We are using indirect descriptors but don't have a bounce
++               * buffer, we need to allocate a set of pages that can be
+                * used for mapping indirect grefs
+                */
+               int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
+ 
+               BUG_ON(!list_empty(&rinfo->indirect_pages));
+               for (i = 0; i < num; i++) {
+-                      struct page *indirect_page = alloc_page(GFP_KERNEL);
++                      struct page *indirect_page = alloc_page(GFP_KERNEL |
++                                                              __GFP_ZERO);
+                       if (!indirect_page)
+                               goto out_of_memory;
+                       list_add(&indirect_page->lru, &rinfo->indirect_pages);
+@@ -2299,6 +2310,8 @@ static void blkfront_gather_backend_features(struct 
blkfront_info *info)
+               info->feature_persistent =
+                       !!xenbus_read_unsigned(info->xbdev->otherend,
+                                              "feature-persistent", 0);
++      if (info->feature_persistent)
++              info->bounce = true;
+ 
+       indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
+                                       "feature-max-indirect-segments", 0);
+@@ -2570,6 +2583,13 @@ static void blkfront_delay_work(struct work_struct 
*work)
+       struct blkfront_info *info;
+       bool need_schedule_work = false;
+ 
++      /*
++       * Note that when using bounce buffers but not persistent grants
++       * there's no need to run blkfront_delay_work because grants are
++       * revoked in blkif_completion or else an error is reported and the
++       * connection is closed.
++       */
++
+       mutex_lock(&blkfront_mutex);
+ 
+       list_for_each_entry(info, &info_list, info_list) {
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index 7be38bc6a673b..9ac75c1cde9c2 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -566,6 +566,28 @@ static int amd_pstate_cpu_exit(struct cpufreq_policy 
*policy)
+       return 0;
+ }
+ 
++static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
++{
++      int ret;
++
++      ret = amd_pstate_enable(true);
++      if (ret)
++              pr_err("failed to enable amd-pstate during resume, return 
%d\n", ret);
++
++      return ret;
++}
++
++static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
++{
++      int ret;
++
++      ret = amd_pstate_enable(false);
++      if (ret)
++              pr_err("failed to disable amd-pstate during suspend, return 
%d\n", ret);
++
++      return ret;
++}
++
+ /* Sysfs attributes */
+ 
+ /*
+@@ -636,6 +658,8 @@ static struct cpufreq_driver amd_pstate_driver = {
+       .target         = amd_pstate_target,
+       .init           = amd_pstate_cpu_init,
+       .exit           = amd_pstate_cpu_exit,
++      .suspend        = amd_pstate_cpu_suspend,
++      .resume         = amd_pstate_cpu_resume,
+       .set_boost      = amd_pstate_set_boost,
+       .name           = "amd-pstate",
+       .attr           = amd_pstate_attr,
+diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c 
b/drivers/cpufreq/qcom-cpufreq-hw.c
+index 0253731d6d25d..36c79580fba25 100644
+--- a/drivers/cpufreq/qcom-cpufreq-hw.c
++++ b/drivers/cpufreq/qcom-cpufreq-hw.c
+@@ -442,6 +442,9 @@ static int qcom_cpufreq_hw_cpu_online(struct 
cpufreq_policy *policy)
+       struct platform_device *pdev = cpufreq_get_driver_data();
+       int ret;
+ 
++      if (data->throttle_irq <= 0)
++              return 0;
++
+       ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus);
+       if (ret)
+               dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
+@@ -469,6 +472,9 @@ static int qcom_cpufreq_hw_cpu_offline(struct 
cpufreq_policy *policy)
+ 
+ static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
+ {
++      if (data->throttle_irq <= 0)
++              return;
++
+       free_irq(data->throttle_irq, data);
+ }
+ 
+diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
+index 6b6b20da2bcfc..573b417e14833 100644
+--- a/drivers/cpufreq/qoriq-cpufreq.c
++++ b/drivers/cpufreq/qoriq-cpufreq.c
+@@ -275,6 +275,7 @@ static int qoriq_cpufreq_probe(struct platform_device 
*pdev)
+ 
+       np = of_find_matching_node(NULL, qoriq_cpufreq_blacklist);
+       if (np) {
++              of_node_put(np);
+               dev_info(&pdev->dev, "Disabling due to erratum A-008083");
+               return -ENODEV;
+       }
+diff --git a/drivers/devfreq/event/exynos-ppmu.c 
b/drivers/devfreq/event/exynos-ppmu.c
+index 9b849d7811167..a443e7c42dafa 100644
+--- a/drivers/devfreq/event/exynos-ppmu.c
++++ b/drivers/devfreq/event/exynos-ppmu.c
+@@ -519,15 +519,19 @@ static int of_get_devfreq_events(struct device_node *np,
+ 
+       count = of_get_child_count(events_np);
+       desc = devm_kcalloc(dev, count, sizeof(*desc), GFP_KERNEL);
+-      if (!desc)
++      if (!desc) {
++              of_node_put(events_np);
+               return -ENOMEM;
++      }
+       info->num_events = count;
+ 
+       of_id = of_match_device(exynos_ppmu_id_match, dev);
+       if (of_id)
+               info->ppmu_type = (enum exynos_ppmu_type)of_id->data;
+-      else
++      else {
++              of_node_put(events_np);
+               return -EINVAL;
++      }
+ 
+       j = 0;
+       for_each_child_of_node(events_np, node) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+index 6ca1db3c243f9..3610bcd29ad92 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -703,7 +703,8 @@ int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device 
*adev,
+ {
+       bool all_hub = false;
+ 
+-      if (adev->family == AMDGPU_FAMILY_AI)
++      if (adev->family == AMDGPU_FAMILY_AI ||
++          adev->family == AMDGPU_FAMILY_RV)
+               all_hub = true;
+ 
+       return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 49f734137f158..66e40cac5eb36 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -5140,7 +5140,7 @@ int amdgpu_device_gpu_recover_imp(struct amdgpu_device 
*adev,
+                */
+               amdgpu_unregister_gpu_instance(tmp_adev);
+ 
+-              
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
++              
drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
+ 
+               /* disable ras on ALL IPs */
+               if (!need_emergency_restart &&
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+index ea3e8c66211fd..ebd53dacfac4c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -333,6 +333,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
+       if (!amdgpu_device_has_dc_support(adev)) {
+               if (!adev->enable_virtual_display)
+                       /* Disable vblank IRQs aggressively for power-saving */
++                      /* XXX: can this be enabled for DC? */
+                       adev_to_drm(adev)->vblank_disable_immediate = true;
+ 
+               r = drm_vblank_init(adev_to_drm(adev), 
adev->mode_info.num_crtc);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 78a38c3b7d664..6dc9808760fc8 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4286,9 +4286,6 @@ static int amdgpu_dm_initialize_drm_device(struct 
amdgpu_device *adev)
+       }
+ #endif
+ 
+-      /* Disable vblank IRQs aggressively for power-saving. */
+-      adev_to_drm(adev)->vblank_disable_immediate = true;
+-
+       /* loops over all connectors on the board */
+       for (i = 0; i < link_cnt; i++) {
+               struct dc_link *link = NULL;
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c 
b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+index 9ae294eb7fb4b..12b7d4d392167 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+@@ -932,8 +932,9 @@ static int set_proto_ctx_param(struct 
drm_i915_file_private *fpriv,
+       case I915_CONTEXT_PARAM_PERSISTENCE:
+               if (args->size)
+                       ret = -EINVAL;
+-              ret = proto_context_set_persistence(fpriv->dev_priv, pc,
+-                                                  args->value);
++              else
++                      ret = proto_context_set_persistence(fpriv->dev_priv, pc,
++                                                          args->value);
+               break;
+ 
+       case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
+diff --git a/drivers/gpu/drm/i915/i915_driver.c 
b/drivers/gpu/drm/i915/i915_driver.c
+index 62b3f332bbf5c..0478fa6259ebf 100644
+--- a/drivers/gpu/drm/i915/i915_driver.c
++++ b/drivers/gpu/drm/i915/i915_driver.c
+@@ -538,6 +538,7 @@ mask_err:
+ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
+ {
+       struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
++      struct pci_dev *root_pdev;
+       int ret;
+ 
+       if (i915_inject_probe_failure(dev_priv))
+@@ -651,6 +652,15 @@ static int i915_driver_hw_probe(struct drm_i915_private 
*dev_priv)
+ 
+       intel_bw_init_hw(dev_priv);
+ 
++      /*
++       * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
++       * This should be totally removed when we handle the pci states properly
++       * on runtime PM and on s2idle cases.
++       */
++      root_pdev = pcie_find_root_port(pdev);
++      if (root_pdev)
++              pci_d3cold_disable(root_pdev);
++
+       return 0;
+ 
+ err_msi:
+@@ -674,11 +684,16 @@ err_perf:
+ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
+ {
+       struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
++      struct pci_dev *root_pdev;
+ 
+       i915_perf_fini(dev_priv);
+ 
+       if (pdev->msi_enabled)
+               pci_disable_msi(pdev);
++
++      root_pdev = pcie_find_root_port(pdev);
++      if (root_pdev)
++              pci_d3cold_enable(root_pdev);
+ }
+ 
+ /**
+@@ -1195,14 +1210,6 @@ static int i915_drm_suspend_late(struct drm_device 
*dev, bool hibernation)
+               goto out;
+       }
+ 
+-      /*
+-       * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
+-       * This should be totally removed when we handle the pci states properly
+-       * on runtime PM and on s2idle cases.
+-       */
+-      if (suspend_to_idle(dev_priv))
+-              pci_d3cold_disable(pdev);
+-
+       pci_disable_device(pdev);
+       /*
+        * During hibernation on some platforms the BIOS may try to access
+@@ -1367,8 +1374,6 @@ static int i915_drm_resume_early(struct drm_device *dev)
+ 
+       pci_set_master(pdev);
+ 
+-      pci_d3cold_enable(pdev);
+-
+       disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+ 
+       ret = vlv_resume_prepare(dev_priv, false);
+@@ -1545,7 +1550,6 @@ static int intel_runtime_suspend(struct device *kdev)
+ {
+       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+       struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+-      struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+       int ret;
+ 
+       if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
+@@ -1591,12 +1595,6 @@ static int intel_runtime_suspend(struct device *kdev)
+               drm_err(&dev_priv->drm,
+                       "Unclaimed access detected prior to suspending\n");
+ 
+-      /*
+-       * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
+-       * This should be totally removed when we handle the pci states properly
+-       * on runtime PM and on s2idle cases.
+-       */
+-      pci_d3cold_disable(pdev);
+       rpm->suspended = true;
+ 
+       /*
+@@ -1635,7 +1633,6 @@ static int intel_runtime_resume(struct device *kdev)
+ {
+       struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+       struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+-      struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+       int ret;
+ 
+       if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
+@@ -1648,7 +1645,6 @@ static int intel_runtime_resume(struct device *kdev)
+ 
+       intel_opregion_notify_adapter(dev_priv, PCI_D0);
+       rpm->suspended = false;
+-      pci_d3cold_enable(pdev);
+       if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
+               drm_dbg(&dev_priv->drm,
+                       "Unclaimed access during suspend, bios?\n");
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c 
b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+index 3940b9c6323be..fffd2ef897a00 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+@@ -1187,12 +1187,13 @@ static void dpu_encoder_vblank_callback(struct 
drm_encoder *drm_enc,
+       DPU_ATRACE_BEGIN("encoder_vblank_callback");
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+ 
++      atomic_inc(&phy_enc->vsync_cnt);
++
+       spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+       if (dpu_enc->crtc)
+               dpu_crtc_vblank_callback(dpu_enc->crtc);
+       spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+ 
+-      atomic_inc(&phy_enc->vsync_cnt);
+       DPU_ATRACE_END("encoder_vblank_callback");
+ }
+ 
+diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c 
b/drivers/gpu/drm/msm/msm_gem_submit.c
+index c6d60c8d286de..fec4e39732879 100644
+--- a/drivers/gpu/drm/msm/msm_gem_submit.c
++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
+@@ -913,7 +913,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void 
*data,
+                                                   INT_MAX, GFP_KERNEL);
+       }
+       if (submit->fence_id < 0) {
+-              ret = submit->fence_id = 0;
++              ret = submit->fence_id;
+               submit->fence_id = 0;
+       }
+ 
+diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
+index de6baf6ca3d1e..dab4908b78a88 100644
+--- a/drivers/hwmon/ibmaem.c
++++ b/drivers/hwmon/ibmaem.c
+@@ -550,7 +550,7 @@ static int aem_init_aem1_inst(struct aem_ipmi_data *probe, 
u8 module_handle)
+ 
+       res = platform_device_add(data->pdev);
+       if (res)
+-              goto ipmi_err;
++              goto dev_add_err;
+ 
+       platform_set_drvdata(data->pdev, data);
+ 
+@@ -598,7 +598,9 @@ hwmon_reg_err:
+       ipmi_destroy_user(data->ipmi.user);
+ ipmi_err:
+       platform_set_drvdata(data->pdev, NULL);
+-      platform_device_unregister(data->pdev);
++      platform_device_del(data->pdev);
++dev_add_err:
++      platform_device_put(data->pdev);
+ dev_err:
+       ida_simple_remove(&aem_ida, data->id);
+ id_err:
+@@ -690,7 +692,7 @@ static int aem_init_aem2_inst(struct aem_ipmi_data *probe,
+ 
+       res = platform_device_add(data->pdev);
+       if (res)
+-              goto ipmi_err;
++              goto dev_add_err;
+ 
+       platform_set_drvdata(data->pdev, data);
+ 
+@@ -738,7 +740,9 @@ hwmon_reg_err:
+       ipmi_destroy_user(data->ipmi.user);
+ ipmi_err:
+       platform_set_drvdata(data->pdev, NULL);
+-      platform_device_unregister(data->pdev);
++      platform_device_del(data->pdev);
++dev_add_err:
++      platform_device_put(data->pdev);
+ dev_err:
+       ida_simple_remove(&aem_ida, data->id);
+ id_err:
+diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
+index f00cd59f1d19f..1757f3ab842e1 100644
+--- a/drivers/hwmon/occ/common.c
++++ b/drivers/hwmon/occ/common.c
+@@ -145,7 +145,7 @@ static int occ_poll(struct occ *occ)
+       cmd[6] = 0;                     /* checksum lsb */
+ 
+       /* mutex should already be locked if necessary */
+-      rc = occ->send_cmd(occ, cmd, sizeof(cmd));
++      rc = occ->send_cmd(occ, cmd, sizeof(cmd), &occ->resp, 
sizeof(occ->resp));
+       if (rc) {
+               occ->last_error = rc;
+               if (occ->error_count++ > OCC_ERROR_COUNT_THRESHOLD)
+@@ -182,6 +182,7 @@ static int occ_set_user_power_cap(struct occ *occ, u16 
user_power_cap)
+ {
+       int rc;
+       u8 cmd[8];
++      u8 resp[8];
+       __be16 user_power_cap_be = cpu_to_be16(user_power_cap);
+ 
+       cmd[0] = 0;     /* sequence number */
+@@ -198,7 +199,7 @@ static int occ_set_user_power_cap(struct occ *occ, u16 
user_power_cap)
+       if (rc)
+               return rc;
+ 
+-      rc = occ->send_cmd(occ, cmd, sizeof(cmd));
++      rc = occ->send_cmd(occ, cmd, sizeof(cmd), resp, sizeof(resp));
+ 
+       mutex_unlock(&occ->lock);
+ 
+diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h
+index 2dd4a4d240c0f..726943af9a077 100644
+--- a/drivers/hwmon/occ/common.h
++++ b/drivers/hwmon/occ/common.h
+@@ -96,7 +96,8 @@ struct occ {
+ 
+       int powr_sample_time_us;        /* average power sample time */
+       u8 poll_cmd_data;               /* to perform OCC poll command */
+-      int (*send_cmd)(struct occ *occ, u8 *cmd, size_t len);
++      int (*send_cmd)(struct occ *occ, u8 *cmd, size_t len, void *resp,
++                      size_t resp_len);
+ 
+       unsigned long next_update;
+       struct mutex lock;              /* lock OCC access */
+diff --git a/drivers/hwmon/occ/p8_i2c.c b/drivers/hwmon/occ/p8_i2c.c
+index 9e61e1fb5142c..c35c07964d856 100644
+--- a/drivers/hwmon/occ/p8_i2c.c
++++ b/drivers/hwmon/occ/p8_i2c.c
+@@ -111,7 +111,8 @@ static int p8_i2c_occ_putscom_be(struct i2c_client 
*client, u32 address,
+                                     be32_to_cpu(data1));
+ }
+ 
+-static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len)
++static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len,
++                             void *resp, size_t resp_len)
+ {
+       int i, rc;
+       unsigned long start;
+@@ -120,7 +121,7 @@ static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, 
size_t len)
+       const long wait_time = msecs_to_jiffies(OCC_CMD_IN_PRG_WAIT_MS);
+       struct p8_i2c_occ *ctx = to_p8_i2c_occ(occ);
+       struct i2c_client *client = ctx->client;
+-      struct occ_response *resp = &occ->resp;
++      struct occ_response *or = (struct occ_response *)resp;
+ 
+       start = jiffies;
+ 
+@@ -151,7 +152,7 @@ static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, 
size_t len)
+                       return rc;
+ 
+               /* wait for OCC */
+-              if (resp->return_status == OCC_RESP_CMD_IN_PRG) {
++              if (or->return_status == OCC_RESP_CMD_IN_PRG) {
+                       rc = -EALREADY;
+ 
+                       if (time_after(jiffies, start + timeout))
+@@ -163,7 +164,7 @@ static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, 
size_t len)
+       } while (rc);
+ 
+       /* check the OCC response */
+-      switch (resp->return_status) {
++      switch (or->return_status) {
+       case OCC_RESP_CMD_IN_PRG:
+               rc = -ETIMEDOUT;
+               break;
+@@ -192,8 +193,8 @@ static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd, 
size_t len)
+       if (rc < 0)
+               return rc;
+ 
+-      data_length = get_unaligned_be16(&resp->data_length);
+-      if (data_length > OCC_RESP_DATA_BYTES)
++      data_length = get_unaligned_be16(&or->data_length);
++      if ((data_length + 7) > resp_len)
+               return -EMSGSIZE;
+ 
+       /* fetch the rest of the response data */
+diff --git a/drivers/hwmon/occ/p9_sbe.c b/drivers/hwmon/occ/p9_sbe.c
+index 49b13cc01073a..bad349bf9f339 100644
+--- a/drivers/hwmon/occ/p9_sbe.c
++++ b/drivers/hwmon/occ/p9_sbe.c
+@@ -78,11 +78,10 @@ done:
+       return notify;
+ }
+ 
+-static int p9_sbe_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len)
++static int p9_sbe_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len,
++                             void *resp, size_t resp_len)
+ {
+-      struct occ_response *resp = &occ->resp;
+       struct p9_sbe_occ *ctx = to_p9_sbe_occ(occ);
+-      size_t resp_len = sizeof(*resp);
+       int rc;
+ 
+       rc = fsi_occ_submit(ctx->sbe, cmd, len, resp, &resp_len);
+@@ -96,7 +95,7 @@ static int p9_sbe_occ_send_cmd(struct occ *occ, u8 *cmd, 
size_t len)
+               return rc;
+       }
+ 
+-      switch (resp->return_status) {
++      switch (((struct occ_response *)resp)->return_status) {
+       case OCC_RESP_CMD_IN_PRG:
+               rc = -ETIMEDOUT;
+               break;
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 1c107d6d03b99..b985e0d9bc05e 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -1252,8 +1252,10 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device 
*device,
+               return ERR_CAST(cm_id_priv);
+ 
+       err = cm_init_listen(cm_id_priv, service_id, 0);
+-      if (err)
++      if (err) {
++              ib_destroy_cm_id(&cm_id_priv->id);
+               return ERR_PTR(err);
++      }
+ 
+       spin_lock_irq(&cm_id_priv->lock);
+       listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler);
+diff --git a/drivers/infiniband/hw/qedr/qedr.h 
b/drivers/infiniband/hw/qedr/qedr.h
+index 8def88cfa3009..db9ef3e1eb97c 100644
+--- a/drivers/infiniband/hw/qedr/qedr.h
++++ b/drivers/infiniband/hw/qedr/qedr.h
+@@ -418,6 +418,7 @@ struct qedr_qp {
+       u32 sq_psn;
+       u32 qkey;
+       u32 dest_qp_num;
++      u8 timeout;
+ 
+       /* Relevant to qps created from kernel space only (ULPs) */
+       u8 prev_wqe_size;
+diff --git a/drivers/infiniband/hw/qedr/verbs.c 
b/drivers/infiniband/hw/qedr/verbs.c
+index a53476653b0d9..df4d7970c1ad5 100644
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -2612,6 +2612,8 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr 
*attr,
+                                       1 << max_t(int, attr->timeout - 8, 0);
+               else
+                       qp_params.ack_timeout = 0;
++
++              qp->timeout = attr->timeout;
+       }
+ 
+       if (attr_mask & IB_QP_RETRY_CNT) {
+@@ -2771,7 +2773,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
+       rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
+       rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
+       rdma_ah_set_sl(&qp_attr->ah_attr, 0);
+-      qp_attr->timeout = params.timeout;
++      qp_attr->timeout = qp->timeout;
+       qp_attr->rnr_retry = params.rnr_retry;
+       qp_attr->retry_cnt = params.retry_cnt;
+       qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index 2b26435a6946e..e362a7471512d 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -1001,12 +1001,13 @@ static int validate_region_size(struct raid_set *rs, 
unsigned long region_size)
+ static int validate_raid_redundancy(struct raid_set *rs)
+ {
+       unsigned int i, rebuild_cnt = 0;
+-      unsigned int rebuilds_per_group = 0, copies;
++      unsigned int rebuilds_per_group = 0, copies, raid_disks;
+       unsigned int group_size, last_group_start;
+ 
+-      for (i = 0; i < rs->md.raid_disks; i++)
+-              if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
+-                  !rs->dev[i].rdev.sb_page)
++      for (i = 0; i < rs->raid_disks; i++)
++              if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) &&
++                  ((!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
++                    !rs->dev[i].rdev.sb_page)))
+                       rebuild_cnt++;
+ 
+       switch (rs->md.level) {
+@@ -1046,8 +1047,9 @@ static int validate_raid_redundancy(struct raid_set *rs)
+                *          A    A    B    B    C
+                *          C    D    D    E    E
+                */
++              raid_disks = min(rs->raid_disks, rs->md.raid_disks);
+               if (__is_raid10_near(rs->md.new_layout)) {
+-                      for (i = 0; i < rs->md.raid_disks; i++) {
++                      for (i = 0; i < raid_disks; i++) {
+                               if (!(i % copies))
+                                       rebuilds_per_group = 0;
+                               if ((!rs->dev[i].rdev.sb_page ||
+@@ -1070,10 +1072,10 @@ static int validate_raid_redundancy(struct raid_set 
*rs)
+                * results in the need to treat the last (potentially larger)
+                * set differently.
+                */
+-              group_size = (rs->md.raid_disks / copies);
+-              last_group_start = (rs->md.raid_disks / group_size) - 1;
++              group_size = (raid_disks / copies);
++              last_group_start = (raid_disks / group_size) - 1;
+               last_group_start *= group_size;
+-              for (i = 0; i < rs->md.raid_disks; i++) {
++              for (i = 0; i < raid_disks; i++) {
+                       if (!(i % copies) && !(i > last_group_start))
+                               rebuilds_per_group = 0;
+                       if ((!rs->dev[i].rdev.sb_page ||
+@@ -1588,7 +1590,7 @@ static sector_t __rdev_sectors(struct raid_set *rs)
+ {
+       int i;
+ 
+-      for (i = 0; i < rs->md.raid_disks; i++) {
++      for (i = 0; i < rs->raid_disks; i++) {
+               struct md_rdev *rdev = &rs->dev[i].rdev;
+ 
+               if (!test_bit(Journal, &rdev->flags) &&
+@@ -3771,13 +3773,13 @@ static int raid_iterate_devices(struct dm_target *ti,
+       unsigned int i;
+       int r = 0;
+ 
+-      for (i = 0; !r && i < rs->md.raid_disks; i++)
+-              if (rs->dev[i].data_dev)
+-                      r = fn(ti,
+-                               rs->dev[i].data_dev,
+-                               0, /* No offset on data devs */
+-                               rs->md.dev_sectors,
+-                               data);
++      for (i = 0; !r && i < rs->raid_disks; i++) {
++              if (rs->dev[i].data_dev) {
++                      r = fn(ti, rs->dev[i].data_dev,
++                             0, /* No offset on data devs */
++                             rs->md.dev_sectors, data);
++              }
++      }
+ 
+       return r;
+ }
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index d6ce5a09fd358..0dd4679deb612 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -8023,6 +8023,7 @@ static int raid5_add_disk(struct mddev *mddev, struct 
md_rdev *rdev)
+        */
+       if (rdev->saved_raid_disk >= 0 &&
+           rdev->saved_raid_disk >= first &&
++          rdev->saved_raid_disk <= last &&
+           conf->disks[rdev->saved_raid_disk].rdev == NULL)
+               first = rdev->saved_raid_disk;
+ 
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index a86b1f71762ea..d7fb33c078e81 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -2228,7 +2228,8 @@ void bond_3ad_unbind_slave(struct slave *slave)
+                               temp_aggregator->num_of_ports--;
+                               if (__agg_active_ports(temp_aggregator) == 0) {
+                                       select_new_active_agg = 
temp_aggregator->is_active;
+-                                      ad_clear_agg(temp_aggregator);
++                                      if (temp_aggregator->num_of_ports == 0)
++                                              ad_clear_agg(temp_aggregator);
+                                       if (select_new_active_agg) {
+                                               slave_info(bond->dev, 
slave->dev, "Removing an active aggregator\n");
+                                               /* select new active aggregator 
*/
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
+index 303c8d32d451e..007d43e46dcb0 100644
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -1302,12 +1302,12 @@ int bond_alb_initialize(struct bonding *bond, int 
rlb_enabled)
+               return res;
+ 
+       if (rlb_enabled) {
+-              bond->alb_info.rlb_enabled = 1;
+               res = rlb_initialize(bond);
+               if (res) {
+                       tlb_deinitialize(bond);
+                       return res;
+               }
++              bond->alb_info.rlb_enabled = 1;
+       } else {
+               bond->alb_info.rlb_enabled = 0;
+       }
+diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
+index 444ef6a342f69..14c5b2db65f41 100644
+--- a/drivers/net/caif/caif_virtio.c
++++ b/drivers/net/caif/caif_virtio.c
+@@ -721,13 +721,21 @@ static int cfv_probe(struct virtio_device *vdev)
+       /* Carrier is off until netdevice is opened */
+       netif_carrier_off(netdev);
+ 
++      /* serialize netdev register + virtio_device_ready() with ndo_open() */
++      rtnl_lock();
++
+       /* register Netdev */
+-      err = register_netdev(netdev);
++      err = register_netdevice(netdev);
+       if (err) {
++              rtnl_unlock();
+               dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err);
+               goto err;
+       }
+ 
++      virtio_device_ready(vdev);
++
++      rtnl_unlock();
++
+       debugfs_init(cfv);
+ 
+       return 0;
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index 87e81c636339f..be0edfa093d04 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -878,6 +878,11 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, 
int port,
+               if (duplex == DUPLEX_FULL)
+                       reg |= DUPLX_MODE;
+ 
++              if (tx_pause)
++                      reg |= TXFLOW_CNTL;
++              if (rx_pause)
++                      reg |= RXFLOW_CNTL;
++
+               core_writel(priv, reg, offset);
+       }
+ 
+diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.c 
b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
+index 2572c6087bb5a..b28baab6d56a1 100644
+--- a/drivers/net/dsa/hirschmann/hellcreek_ptp.c
++++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
+@@ -300,6 +300,7 @@ static int hellcreek_led_setup(struct hellcreek *hellcreek)
+       const char *label, *state;
+       int ret = -EINVAL;
+ 
++      of_node_get(hellcreek->dev->of_node);
+       leds = of_find_node_by_name(hellcreek->dev->of_node, "leds");
+       if (!leds) {
+               dev_err(hellcreek->dev, "No LEDs specified in device tree!\n");
+diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c 
b/drivers/net/dsa/ocelot/felix_vsc9959.c
+index 52a8566071edd..4a071f96ea283 100644
+--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
+@@ -1883,6 +1883,8 @@ static void vsc9959_psfp_sgi_table_del(struct ocelot 
*ocelot,
+ static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
+                                     struct felix_stream_filter_counters 
*counters)
+ {
++      mutex_lock(&ocelot->stats_lock);
++
+       ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(index),
+                  SYS_STAT_CFG_STAT_VIEW_M,
+                  SYS_STAT_CFG);
+@@ -1897,6 +1899,8 @@ static void vsc9959_psfp_counters_get(struct ocelot 
*ocelot, u32 index,
+                    SYS_STAT_CFG_STAT_VIEW(index) |
+                    SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
+                    SYS_STAT_CFG);
++
++      mutex_unlock(&ocelot->stats_lock);
+ }
+ 
+ static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 
b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+index 79deb19e3a194..7ad663c5b1ab7 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+@@ -4418,6 +4418,8 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp 
*mlxsw_sp,
+       return 0;
+ 
+ err_nexthop_neigh_init:
++      list_del(&nh->router_list_node);
++      mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+       mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
+       return err;
+ }
+@@ -6743,6 +6745,7 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp 
*mlxsw_sp,
+                                 const struct fib6_info *rt)
+ {
+       struct net_device *dev = rt->fib6_nh->fib_nh_dev;
++      int err;
+ 
+       nh->nhgi = nh_grp->nhgi;
+       nh->nh_weight = rt->fib6_nh->fib_nh_weight;
+@@ -6758,7 +6761,16 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp 
*mlxsw_sp,
+               return 0;
+       nh->ifindex = dev->ifindex;
+ 
+-      return mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
++      err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
++      if (err)
++              goto err_nexthop_type_init;
++
++      return 0;
++
++err_nexthop_type_init:
++      list_del(&nh->router_list_node);
++      mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
++      return err;
+ }
+ 
+ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c 
b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
+index 5389fffc694ab..5edc8b7176c82 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
+@@ -396,6 +396,14 @@ static int sparx5_handle_port_mdb_add(struct net_device 
*dev,
+       u32 mact_entry;
+       int res, err;
+ 
++      if (!sparx5_netdevice_check(dev))
++              return -EOPNOTSUPP;
++
++      if (netif_is_bridge_master(v->obj.orig_dev)) {
++              sparx5_mact_learn(spx5, PGID_CPU, v->addr, v->vid);
++              return 0;
++      }
++
+       /* When VLAN unaware the vlan value is not parsed and we receive vid 0.
+        * Fall back to bridge vid 1.
+        */
+@@ -461,6 +469,14 @@ static int sparx5_handle_port_mdb_del(struct net_device 
*dev,
+       u32 mact_entry, res, pgid_entry[3];
+       int err;
+ 
++      if (!sparx5_netdevice_check(dev))
++              return -EOPNOTSUPP;
++
++      if (netif_is_bridge_master(v->obj.orig_dev)) {
++              sparx5_mact_forget(spx5, v->addr, v->vid);
++              return 0;
++      }
++
+       if (!br_vlan_enabled(spx5->hw_bridge_dev))
+               vid = 1;
+       else
+@@ -500,6 +516,7 @@ static int sparx5_handle_port_obj_add(struct net_device 
*dev,
+                                                 SWITCHDEV_OBJ_PORT_VLAN(obj));
+               break;
+       case SWITCHDEV_OBJ_ID_PORT_MDB:
++      case SWITCHDEV_OBJ_ID_HOST_MDB:
+               err = sparx5_handle_port_mdb_add(dev, nb,
+                                                SWITCHDEV_OBJ_PORT_MDB(obj));
+               break;
+@@ -552,6 +569,7 @@ static int sparx5_handle_port_obj_del(struct net_device 
*dev,
+                                                 
SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
+               break;
+       case SWITCHDEV_OBJ_ID_PORT_MDB:
++      case SWITCHDEV_OBJ_ID_HOST_MDB:
+               err = sparx5_handle_port_mdb_del(dev, nb,
+                                                SWITCHDEV_OBJ_PORT_MDB(obj));
+               break;
+diff --git a/drivers/net/ethernet/smsc/epic100.c 
b/drivers/net/ethernet/smsc/epic100.c
+index a0654e88444cf..0329caf63279c 100644
+--- a/drivers/net/ethernet/smsc/epic100.c
++++ b/drivers/net/ethernet/smsc/epic100.c
+@@ -1515,14 +1515,14 @@ static void epic_remove_one(struct pci_dev *pdev)
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct epic_private *ep = netdev_priv(dev);
+ 
++      unregister_netdev(dev);
+       dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
+                         ep->tx_ring_dma);
+       dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
+                         ep->rx_ring_dma);
+-      unregister_netdev(dev);
+       pci_iounmap(pdev, ep->ioaddr);
+-      pci_release_regions(pdev);
+       free_netdev(dev);
++      pci_release_regions(pdev);
+       pci_disable_device(pdev);
+       /* pci_power_off(pdev, -1); */
+ }
+diff --git a/drivers/net/phy/ax88796b.c b/drivers/net/phy/ax88796b.c
+index 4578963375055..0f1e617a26c91 100644
+--- a/drivers/net/phy/ax88796b.c
++++ b/drivers/net/phy/ax88796b.c
+@@ -88,8 +88,10 @@ static void asix_ax88772a_link_change_notify(struct 
phy_device *phydev)
+       /* Reset PHY, otherwise MII_LPA will provide outdated information.
+        * This issue is reproducible only with some link partner PHYs
+        */
+-      if (phydev->state == PHY_NOLINK && phydev->drv->soft_reset)
+-              phydev->drv->soft_reset(phydev);
++      if (phydev->state == PHY_NOLINK) {
++              phy_init_hw(phydev);
++              phy_start_aneg(phydev);
++      }
+ }
+ 
+ static struct phy_driver asix_driver[] = {
+diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
+index ce17b2af3218f..a792dd6d2ec33 100644
+--- a/drivers/net/phy/dp83822.c
++++ b/drivers/net/phy/dp83822.c
+@@ -228,9 +228,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
+               if (misr_status < 0)
+                       return misr_status;
+ 
+-              misr_status |= (DP83822_RX_ERR_HF_INT_EN |
+-                              DP83822_FALSE_CARRIER_HF_INT_EN |
+-                              DP83822_LINK_STAT_INT_EN |
++              misr_status |= (DP83822_LINK_STAT_INT_EN |
+                               DP83822_ENERGY_DET_INT_EN |
+                               DP83822_LINK_QUAL_INT_EN);
+ 
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index f122026c46826..2fc851082e7b4 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -31,6 +31,7 @@
+ #include <linux/io.h>
+ #include <linux/uaccess.h>
+ #include <linux/atomic.h>
++#include <linux/suspend.h>
+ #include <net/netlink.h>
+ #include <net/genetlink.h>
+ #include <net/sock.h>
+@@ -972,6 +973,28 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
+       struct phy_driver *drv = phydev->drv;
+       irqreturn_t ret;
+ 
++      /* Wakeup interrupts may occur during a system sleep transition.
++       * Postpone handling until the PHY has resumed.
++       */
++      if (IS_ENABLED(CONFIG_PM_SLEEP) && phydev->irq_suspended) {
++              struct net_device *netdev = phydev->attached_dev;
++
++              if (netdev) {
++                      struct device *parent = netdev->dev.parent;
++
++                      if (netdev->wol_enabled)
++                              pm_system_wakeup();
++                      else if (device_may_wakeup(&netdev->dev))
++                              pm_wakeup_dev_event(&netdev->dev, 0, true);
++                      else if (parent && device_may_wakeup(parent))
++                              pm_wakeup_dev_event(parent, 0, true);
++              }
++
++              phydev->irq_rerun = 1;
++              disable_irq_nosync(irq);
++              return IRQ_HANDLED;
++      }
++
+       mutex_lock(&phydev->lock);
+       ret = drv->handle_interrupt(phydev);
+       mutex_unlock(&phydev->lock);
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 8406ac739def8..2a53ae38a962b 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -277,6 +277,15 @@ static __maybe_unused int mdio_bus_phy_suspend(struct 
device *dev)
+       if (phydev->mac_managed_pm)
+               return 0;
+ 
++      /* Wakeup interrupts may occur during the system sleep transition when
++       * the PHY is inaccessible. Set flag to postpone handling until the PHY
++       * has resumed. Wait for concurrent interrupt handler to complete.
++       */
++      if (phy_interrupt_is_valid(phydev)) {
++              phydev->irq_suspended = 1;
++              synchronize_irq(phydev->irq);
++      }
++
+       /* We must stop the state machine manually, otherwise it stops out of
+        * control, possibly with the phydev->lock held. Upon resume, netdev
+        * may call phy routines that try to grab the same lock, and that may
+@@ -314,6 +323,20 @@ static __maybe_unused int mdio_bus_phy_resume(struct 
device *dev)
+       if (ret < 0)
+               return ret;
+ no_resume:
++      if (phy_interrupt_is_valid(phydev)) {
++              phydev->irq_suspended = 0;
++              synchronize_irq(phydev->irq);
++
++              /* Rerun interrupts which were postponed by phy_interrupt()
++               * because they occurred during the system sleep transition.
++               */
++              if (phydev->irq_rerun) {
++                      phydev->irq_rerun = 0;
++                      enable_irq(phydev->irq);
++                      irq_wake_thread(phydev->irq, phydev);
++              }
++      }
++
+       if (phydev->attached_dev && phydev->adjust_link)
+               phy_start_machine(phydev);
+ 
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index dbe4c0a4be2cd..4ebf83bb9888b 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -274,6 +274,12 @@ static void tun_napi_init(struct tun_struct *tun, struct 
tun_file *tfile,
+       }
+ }
+ 
++static void tun_napi_enable(struct tun_file *tfile)
++{
++      if (tfile->napi_enabled)
++              napi_enable(&tfile->napi);
++}
++
+ static void tun_napi_disable(struct tun_file *tfile)
+ {
+       if (tfile->napi_enabled)
+@@ -635,7 +641,8 @@ static void __tun_detach(struct tun_file *tfile, bool 
clean)
+       tun = rtnl_dereference(tfile->tun);
+ 
+       if (tun && clean) {
+-              tun_napi_disable(tfile);
++              if (!tfile->detached)
++                      tun_napi_disable(tfile);
+               tun_napi_del(tfile);
+       }
+ 
+@@ -654,8 +661,10 @@ static void __tun_detach(struct tun_file *tfile, bool 
clean)
+               if (clean) {
+                       RCU_INIT_POINTER(tfile->tun, NULL);
+                       sock_put(&tfile->sk);
+-              } else
++              } else {
+                       tun_disable_queue(tun, tfile);
++                      tun_napi_disable(tfile);
++              }
+ 
+               synchronize_net();
+               tun_flow_delete_by_queue(tun, tun->numqueues + 1);
+@@ -728,6 +737,7 @@ static void tun_detach_all(struct net_device *dev)
+               sock_put(&tfile->sk);
+       }
+       list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
++              tun_napi_del(tfile);
+               tun_enable_queue(tfile);
+               tun_queue_purge(tfile);
+               xdp_rxq_info_unreg(&tfile->xdp_rxq);
+@@ -808,6 +818,7 @@ static int tun_attach(struct tun_struct *tun, struct file 
*file,
+ 
+       if (tfile->detached) {
+               tun_enable_queue(tfile);
++              tun_napi_enable(tfile);
+       } else {
+               sock_hold(&tfile->sk);
+               tun_napi_init(tun, tfile, napi, napi_frags);
+diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
+index 2c81236c6c7c6..45d3cc5cc355e 100644
+--- a/drivers/net/usb/asix.h
++++ b/drivers/net/usb/asix.h
+@@ -126,8 +126,7 @@
+        AX_MEDIUM_RE)
+ 
+ #define AX88772_MEDIUM_DEFAULT        \
+-      (AX_MEDIUM_FD | AX_MEDIUM_RFC | \
+-       AX_MEDIUM_TFC | AX_MEDIUM_PS | \
++      (AX_MEDIUM_FD | AX_MEDIUM_PS | \
+        AX_MEDIUM_AC | AX_MEDIUM_RE)
+ 
+ /* AX88772 & AX88178 RX_CTL values */
+diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
+index 632fa6c1d5e30..b4a1b7abcfc97 100644
+--- a/drivers/net/usb/asix_common.c
++++ b/drivers/net/usb/asix_common.c
+@@ -431,6 +431,7 @@ void asix_adjust_link(struct net_device *netdev)
+ 
+       asix_write_medium_mode(dev, mode, 0);
+       phy_print_status(phydev);
++      usbnet_link_change(dev, phydev->link, 0);
+ }
+ 
+ int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm)
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index e2fa56b926853..873f6deabbd1a 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -1472,6 +1472,42 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct 
sk_buff *skb)
+        * are bundled into this buffer and where we can find an array of
+        * per-packet metadata (which contains elements encoded into u16).
+        */
++
++      /* SKB contents for current firmware:
++       *   <packet 1> <padding>
++       *   ...
++       *   <packet N> <padding>
++       *   <per-packet metadata entry 1> <dummy header>
++       *   ...
++       *   <per-packet metadata entry N> <dummy header>
++       *   <padding2> <rx_hdr>
++       *
++       * where:
++       *   <packet N> contains pkt_len bytes:
++       *              2 bytes of IP alignment pseudo header
++       *              packet received
++       *   <per-packet metadata entry N> contains 4 bytes:
++       *              pkt_len and fields AX_RXHDR_*
++       *   <padding>  0-7 bytes to terminate at
++       *              8 bytes boundary (64-bit).
++       *   <padding2> 4 bytes to make rx_hdr terminate at
++       *              8 bytes boundary (64-bit)
++       *   <dummy-header> contains 4 bytes:
++       *              pkt_len=0 and AX_RXHDR_DROP_ERR
++       *   <rx-hdr>   contains 4 bytes:
++       *              pkt_cnt and hdr_off (offset of
++       *                <per-packet metadata entry 1>)
++       *
++       * pkt_cnt is number of entrys in the per-packet metadata.
++       * In current firmware there is 2 entrys per packet.
++       * The first points to the packet and the
++       *  second is a dummy header.
++       * This was done probably to align fields in 64-bit and
++       *  maintain compatibility with old firmware.
++       * This code assumes that <dummy header> and <padding2> are
++       *  optional.
++       */
++
+       if (skb->len < 4)
+               return 0;
+       skb_trim(skb, skb->len - 4);
+@@ -1485,51 +1521,66 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct 
sk_buff *skb)
+       /* Make sure that the bounds of the metadata array are inside the SKB
+        * (and in front of the counter at the end).
+        */
+-      if (pkt_cnt * 2 + hdr_off > skb->len)
++      if (pkt_cnt * 4 + hdr_off > skb->len)
+               return 0;
+       pkt_hdr = (u32 *)(skb->data + hdr_off);
+ 
+       /* Packets must not overlap the metadata array */
+       skb_trim(skb, hdr_off);
+ 
+-      for (; ; pkt_cnt--, pkt_hdr++) {
++      for (; pkt_cnt > 0; pkt_cnt--, pkt_hdr++) {
++              u16 pkt_len_plus_padd;
+               u16 pkt_len;
+ 
+               le32_to_cpus(pkt_hdr);
+               pkt_len = (*pkt_hdr >> 16) & 0x1fff;
++              pkt_len_plus_padd = (pkt_len + 7) & 0xfff8;
+ 
+-              if (pkt_len > skb->len)
++              /* Skip dummy header used for alignment
++               */
++              if (pkt_len == 0)
++                      continue;
++
++              if (pkt_len_plus_padd > skb->len)
+                       return 0;
+ 
+               /* Check CRC or runt packet */
+-              if (((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) == 0) 
&&
+-                  pkt_len >= 2 + ETH_HLEN) {
+-                      bool last = (pkt_cnt == 0);
+-
+-                      if (last) {
+-                              ax_skb = skb;
+-                      } else {
+-                              ax_skb = skb_clone(skb, GFP_ATOMIC);
+-                              if (!ax_skb)
+-                                      return 0;
+-                      }
+-                      ax_skb->len = pkt_len;
+-                      /* Skip IP alignment pseudo header */
+-                      skb_pull(ax_skb, 2);
+-                      skb_set_tail_pointer(ax_skb, ax_skb->len);
+-                      ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
+-                      ax88179_rx_checksum(ax_skb, pkt_hdr);
++              if ((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) ||
++                  pkt_len < 2 + ETH_HLEN) {
++                      dev->net->stats.rx_errors++;
++                      skb_pull(skb, pkt_len_plus_padd);
++                      continue;
++              }
+ 
+-                      if (last)
+-                              return 1;
++              /* last packet */
++              if (pkt_len_plus_padd == skb->len) {
++                      skb_trim(skb, pkt_len);
+ 
+-                      usbnet_skb_return(dev, ax_skb);
++                      /* Skip IP alignment pseudo header */
++                      skb_pull(skb, 2);
++
++                      skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd);
++                      ax88179_rx_checksum(skb, pkt_hdr);
++                      return 1;
+               }
+ 
+-              /* Trim this packet away from the SKB */
+-              if (!skb_pull(skb, (pkt_len + 7) & 0xFFF8))
++              ax_skb = skb_clone(skb, GFP_ATOMIC);
++              if (!ax_skb)
+                       return 0;
++              skb_trim(ax_skb, pkt_len);
++
++              /* Skip IP alignment pseudo header */
++              skb_pull(ax_skb, 2);
++
++              skb->truesize = pkt_len_plus_padd +
++                              SKB_DATA_ALIGN(sizeof(struct sk_buff));
++              ax88179_rx_checksum(ax_skb, pkt_hdr);
++              usbnet_skb_return(dev, ax_skb);
++
++              skb_pull(skb, pkt_len_plus_padd);
+       }
++
++      return 0;
+ }
+ 
+ static struct sk_buff *
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 36b24ec116504..2ea81931543c1 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -2004,7 +2004,7 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, 
u8 reqtype,
+                  cmd, reqtype, value, index, size);
+ 
+       if (size) {
+-              buf = kmalloc(size, GFP_KERNEL);
++              buf = kmalloc(size, GFP_NOIO);
+               if (!buf)
+                       goto out;
+       }
+@@ -2036,7 +2036,7 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 
cmd, u8 reqtype,
+                  cmd, reqtype, value, index, size);
+ 
+       if (data) {
+-              buf = kmemdup(data, size, GFP_KERNEL);
++              buf = kmemdup(data, size, GFP_NOIO);
+               if (!buf)
+                       goto out;
+       } else {
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 10d548b07b9c6..c7804fce204cc 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -3641,14 +3641,20 @@ static int virtnet_probe(struct virtio_device *vdev)
+       if (vi->has_rss || vi->has_rss_hash_report)
+               virtnet_init_default_rss(vi);
+ 
+-      err = register_netdev(dev);
++      /* serialize netdev register + virtio_device_ready() with ndo_open() */
++      rtnl_lock();
++
++      err = register_netdevice(dev);
+       if (err) {
+               pr_debug("virtio_net: registering device failed\n");
++              rtnl_unlock();
+               goto free_failover;
+       }
+ 
+       virtio_device_ready(vdev);
+ 
++      rtnl_unlock();
++
+       err = virtnet_cpu_notif_add(vi);
+       if (err) {
+               pr_debug("virtio_net: registering cpu notifier failed\n");
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index e2b4a1893a132..97c7633070a3c 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -66,6 +66,10 @@ module_param_named(max_queues, xennet_max_queues, uint, 
0644);
+ MODULE_PARM_DESC(max_queues,
+                "Maximum number of queues per virtual interface");
+ 
++static bool __read_mostly xennet_trusted = true;
++module_param_named(trusted, xennet_trusted, bool, 0644);
++MODULE_PARM_DESC(trusted, "Is the backend trusted");
++
+ #define XENNET_TIMEOUT  (5 * HZ)
+ 
+ static const struct ethtool_ops xennet_ethtool_ops;
+@@ -175,6 +179,9 @@ struct netfront_info {
+       /* Is device behaving sane? */
+       bool broken;
+ 
++      /* Should skbs be bounced into a zeroed buffer? */
++      bool bounce;
++
+       atomic_t rx_gso_checksum_fixup;
+ };
+ 
+@@ -273,7 +280,8 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct 
netfront_queue *queue)
+       if (unlikely(!skb))
+               return NULL;
+ 
+-      page = page_pool_dev_alloc_pages(queue->page_pool);
++      page = page_pool_alloc_pages(queue->page_pool,
++                                   GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO);
+       if (unlikely(!page)) {
+               kfree_skb(skb);
+               return NULL;
+@@ -667,6 +675,33 @@ static int xennet_xdp_xmit(struct net_device *dev, int n,
+       return nxmit;
+ }
+ 
++struct sk_buff *bounce_skb(const struct sk_buff *skb)
++{
++      unsigned int headerlen = skb_headroom(skb);
++      /* Align size to allocate full pages and avoid contiguous data leaks */
++      unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len,
++                                XEN_PAGE_SIZE);
++      struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO);
++
++      if (!n)
++              return NULL;
++
++      if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) {
++              WARN_ONCE(1, "misaligned skb allocated\n");
++              kfree_skb(n);
++              return NULL;
++      }
++
++      /* Set the data pointer */
++      skb_reserve(n, headerlen);
++      /* Set the tail pointer and length */
++      skb_put(n, skb->len);
++
++      BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
++
++      skb_copy_header(n, skb);
++      return n;
++}
+ 
+ #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
+ 
+@@ -720,9 +755,13 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, 
struct net_device *dev
+ 
+       /* The first req should be at least ETH_HLEN size or the packet will be
+        * dropped by netback.
++       *
++       * If the backend is not trusted bounce all data to zeroed pages to
++       * avoid exposing contiguous data on the granted page not belonging to
++       * the skb.
+        */
+-      if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
+-              nskb = skb_copy(skb, GFP_ATOMIC);
++      if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
++              nskb = bounce_skb(skb);
+               if (!nskb)
+                       goto drop;
+               dev_consume_skb_any(skb);
+@@ -1055,8 +1094,10 @@ static int xennet_get_responses(struct netfront_queue 
*queue,
+                       }
+               }
+               rcu_read_unlock();
+-next:
++
+               __skb_queue_tail(list, skb);
++
++next:
+               if (!(rx->flags & XEN_NETRXF_more_data))
+                       break;
+ 
+@@ -2246,6 +2287,10 @@ static int talk_to_netback(struct xenbus_device *dev,
+ 
+       info->netdev->irq = 0;
+ 
++      /* Check if backend is trusted. */
++      info->bounce = !xennet_trusted ||
++                     !xenbus_read_unsigned(dev->nodename, "trusted", 1);
++
+       /* Check if backend supports multiple queues */
+       max_queues = xenbus_read_unsigned(info->xbdev->otherend,
+                                         "multi-queue-max-queues", 1);
+@@ -2413,6 +2458,9 @@ static int xennet_connect(struct net_device *dev)
+               return err;
+       if (np->netback_has_xdp_headroom)
+               pr_info("backend supports XDP headroom\n");
++      if (np->bounce)
++              dev_info(&np->xbdev->dev,
++                       "bouncing transmitted data to zeroed pages\n");
+ 
+       /* talk_to_netback() sets the correct number of queues */
+       num_queues = dev->real_num_tx_queues;
+diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
+index ceef81d93ac99..01329b91d59d5 100644
+--- a/drivers/nfc/nfcmrvl/i2c.c
++++ b/drivers/nfc/nfcmrvl/i2c.c
+@@ -167,9 +167,9 @@ static int nfcmrvl_i2c_parse_dt(struct device_node *node,
+               pdata->irq_polarity = IRQF_TRIGGER_RISING;
+ 
+       ret = irq_of_parse_and_map(node, 0);
+-      if (ret < 0) {
+-              pr_err("Unable to get irq, error: %d\n", ret);
+-              return ret;
++      if (!ret) {
++              pr_err("Unable to get irq\n");
++              return -EINVAL;
+       }
+       pdata->irq = ret;
+ 
+diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c
+index a38e2fcdfd39f..ad3359a4942c7 100644
+--- a/drivers/nfc/nfcmrvl/spi.c
++++ b/drivers/nfc/nfcmrvl/spi.c
+@@ -115,9 +115,9 @@ static int nfcmrvl_spi_parse_dt(struct device_node *node,
+       }
+ 
+       ret = irq_of_parse_and_map(node, 0);
+-      if (ret < 0) {
+-              pr_err("Unable to get irq, error: %d\n", ret);
+-              return ret;
++      if (!ret) {
++              pr_err("Unable to get irq\n");
++              return -EINVAL;
+       }
+       pdata->irq = ret;
+ 
+diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
+index 7e451c10985df..e8f3b35afbee4 100644
+--- a/drivers/nfc/nxp-nci/i2c.c
++++ b/drivers/nfc/nxp-nci/i2c.c
+@@ -162,6 +162,9 @@ static int nxp_nci_i2c_nci_read(struct nxp_nci_i2c_phy 
*phy,
+ 
+       skb_put_data(*skb, (void *)&header, NCI_CTRL_HDR_SIZE);
+ 
++      if (!header.plen)
++              return 0;
++
+       r = i2c_master_recv(client, skb_put(*skb, header.plen), header.plen);
+       if (r != header.plen) {
+               nfc_err(&client->dev,
+diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
+index 7b0d1443217a3..5db16857b80e3 100644
+--- a/drivers/nvdimm/bus.c
++++ b/drivers/nvdimm/bus.c
+@@ -182,8 +182,8 @@ static int nvdimm_clear_badblocks_region(struct device 
*dev, void *data)
+       ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1;
+ 
+       /* make sure we are in the region */
+-      if (ctx->phys < nd_region->ndr_start
+-                      || (ctx->phys + ctx->cleared) > ndr_end)
++      if (ctx->phys < nd_region->ndr_start ||
++          (ctx->phys + ctx->cleared - 1) > ndr_end)
+               return 0;
+ 
+       sector = (ctx->phys - nd_region->ndr_start) / 512;
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index ddea0fb90c288..fe829377c7c2a 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3436,8 +3436,11 @@ static const struct pci_device_id nvme_id_table[] = {
+       { PCI_DEVICE(0x1b4b, 0x1092),   /* Lexar 256 GB SSD */
+               .driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
+                               NVME_QUIRK_IGNORE_DEV_SUBNQN, },
++      { PCI_DEVICE(0x1cc1, 0x33f8),   /* ADATA IM2P33F8ABR1 1 TB */
++              .driver_data = NVME_QUIRK_BOGUS_NID, },
+       { PCI_DEVICE(0x10ec, 0x5762),   /* ADATA SX6000LNP */
+-              .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
++              .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
++                              NVME_QUIRK_BOGUS_NID, },
+       { PCI_DEVICE(0x1cc1, 0x8201),   /* ADATA SX8200PNP 512GB */
+               .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+                               NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
+index e44b2988759ec..ff77c3d2354fa 100644
+--- a/drivers/nvme/target/configfs.c
++++ b/drivers/nvme/target/configfs.c
+@@ -773,11 +773,31 @@ static ssize_t nvmet_passthru_io_timeout_store(struct 
config_item *item,
+ }
+ CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
+ 
++static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
++              char *page)
++{
++      return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
++}
++
++static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
++              const char *page, size_t count)
++{
++      struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
++      unsigned int clear_ids;
++
++      if (kstrtouint(page, 0, &clear_ids))
++              return -EINVAL;
++      subsys->clear_ids = clear_ids;
++      return count;
++}
++CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
++
+ static struct configfs_attribute *nvmet_passthru_attrs[] = {
+       &nvmet_passthru_attr_device_path,
+       &nvmet_passthru_attr_enable,
+       &nvmet_passthru_attr_admin_timeout,
+       &nvmet_passthru_attr_io_timeout,
++      &nvmet_passthru_attr_clear_ids,
+       NULL,
+ };
+ 
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 90e75324dae05..c27660a660d9a 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -1374,6 +1374,12 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char 
*hostnqn,
+       ctrl->port = req->port;
+       ctrl->ops = req->ops;
+ 
++#ifdef CONFIG_NVME_TARGET_PASSTHRU
++      /* By default, set loop targets to clear IDS by default */
++      if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP)
++              subsys->clear_ids = 1;
++#endif
++
+       INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
+       INIT_LIST_HEAD(&ctrl->async_events);
+       INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
+diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
+index 69818752a33a5..2b3e5719f24e4 100644
+--- a/drivers/nvme/target/nvmet.h
++++ b/drivers/nvme/target/nvmet.h
+@@ -249,6 +249,7 @@ struct nvmet_subsys {
+       struct config_group     passthru_group;
+       unsigned int            admin_timeout;
+       unsigned int            io_timeout;
++      unsigned int            clear_ids;
+ #endif /* CONFIG_NVME_TARGET_PASSTHRU */
+ 
+ #ifdef CONFIG_BLK_DEV_ZONED
+diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
+index 5247c24538eba..6506831cb0121 100644
+--- a/drivers/nvme/target/passthru.c
++++ b/drivers/nvme/target/passthru.c
+@@ -30,6 +30,53 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
+               ctrl->cap &= ~(1ULL << 43);
+ }
+ 
++static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
++{
++      struct nvmet_ctrl *ctrl = req->sq->ctrl;
++      u16 status = NVME_SC_SUCCESS;
++      int pos, len;
++      bool csi_seen = false;
++      void *data;
++      u8 csi;
++
++      if (!ctrl->subsys->clear_ids)
++              return status;
++
++      data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
++      if (!data)
++              return NVME_SC_INTERNAL;
++
++      status = nvmet_copy_from_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
++      if (status)
++              goto out_free;
++
++      for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
++              struct nvme_ns_id_desc *cur = data + pos;
++
++              if (cur->nidl == 0)
++                      break;
++              if (cur->nidt == NVME_NIDT_CSI) {
++                      memcpy(&csi, cur + 1, NVME_NIDT_CSI_LEN);
++                      csi_seen = true;
++                      break;
++              }
++              len = sizeof(struct nvme_ns_id_desc) + cur->nidl;
++      }
++
++      memset(data, 0, NVME_IDENTIFY_DATA_SIZE);
++      if (csi_seen) {
++              struct nvme_ns_id_desc *cur = data;
++
++              cur->nidt = NVME_NIDT_CSI;
++              cur->nidl = NVME_NIDT_CSI_LEN;
++              memcpy(cur + 1, &csi, NVME_NIDT_CSI_LEN);
++      }
++      status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
++out_free:
++      kfree(data);
++      return status;
++}
++
+ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
+ {
+       struct nvmet_ctrl *ctrl = req->sq->ctrl;
+@@ -152,6 +199,11 @@ static u16 nvmet_passthru_override_id_ns(struct nvmet_req 
*req)
+        */
+       id->mc = 0;
+ 
++      if (req->sq->ctrl->subsys->clear_ids) {
++              memset(id->nguid, 0, NVME_NIDT_NGUID_LEN);
++              memset(id->eui64, 0, NVME_NIDT_EUI64_LEN);
++      }
++
+       status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+ 
+ out_free:
+@@ -176,6 +228,9 @@ static void nvmet_passthru_execute_cmd_work(struct 
work_struct *w)
+               case NVME_ID_CNS_NS:
+                       nvmet_passthru_override_id_ns(req);
+                       break;
++              case NVME_ID_CNS_NS_DESC_LIST:
++                      nvmet_passthru_override_id_descs(req);
++                      break;
+               }
+       } else if (status < 0)
+               status = NVME_SC_INTERNAL;
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index 2793554e622ee..0a9542599ad1c 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -405,7 +405,7 @@ err:
+       return NVME_SC_INTERNAL;
+ }
+ 
+-static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
++static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
+               struct nvmet_tcp_cmd *cmd)
+ {
+       ahash_request_set_crypt(hash, cmd->req.sg,
+@@ -413,23 +413,6 @@ static void nvmet_tcp_send_ddgst(struct ahash_request 
*hash,
+       crypto_ahash_digest(hash);
+ }
+ 
+-static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
+-              struct nvmet_tcp_cmd *cmd)
+-{
+-      struct scatterlist sg;
+-      struct kvec *iov;
+-      int i;
+-
+-      crypto_ahash_init(hash);
+-      for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) {
+-              sg_init_one(&sg, iov->iov_base, iov->iov_len);
+-              ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len);
+-              crypto_ahash_update(hash);
+-      }
+-      ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0);
+-      crypto_ahash_final(hash);
+-}
+-
+ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
+ {
+       struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
+@@ -454,7 +437,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd 
*cmd)
+ 
+       if (queue->data_digest) {
+               pdu->hdr.flags |= NVME_TCP_F_DDGST;
+-              nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
++              nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
+       }
+ 
+       if (cmd->queue->hdr_digest) {
+@@ -1137,7 +1120,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct 
nvmet_tcp_cmd *cmd)
+ {
+       struct nvmet_tcp_queue *queue = cmd->queue;
+ 
+-      nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
++      nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
+       queue->offset = 0;
+       queue->left = NVME_TCP_DIGEST_LENGTH;
+       queue->rcv_state = NVMET_TCP_RECV_DDGST;
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index 5d9dd70e4e0f5..ddb8f14247c01 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -945,6 +945,8 @@ config PANASONIC_LAPTOP
+       tristate "Panasonic Laptop Extras"
+       depends on INPUT && ACPI
+       depends on BACKLIGHT_CLASS_DEVICE
++      depends on ACPI_VIDEO=n || ACPI_VIDEO
++      depends on SERIO_I8042 || SERIO_I8042 = n
+       select INPUT_SPARSEKMAP
+       help
+         This driver adds support for access to backlight control and hotkeys
+diff --git a/drivers/platform/x86/ideapad-laptop.c 
b/drivers/platform/x86/ideapad-laptop.c
+index 3ccb7b71dfb12..abd0c81d62c40 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -152,6 +152,10 @@ static bool no_bt_rfkill;
+ module_param(no_bt_rfkill, bool, 0444);
+ MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
+ 
++static bool allow_v4_dytc;
++module_param(allow_v4_dytc, bool, 0444);
++MODULE_PARM_DESC(allow_v4_dytc, "Enable DYTC version 4 platform-profile 
support.");
++
+ /*
+  * ACPI Helpers
+  */
+@@ -871,12 +875,18 @@ static void dytc_profile_refresh(struct ideapad_private 
*priv)
+ static const struct dmi_system_id ideapad_dytc_v4_allow_table[] = {
+       {
+               /* Ideapad 5 Pro 16ACH6 */
+-              .ident = "LENOVO 82L5",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "82L5")
+               }
+       },
++      {
++              /* Ideapad 5 15ITL05 */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++                      DMI_MATCH(DMI_PRODUCT_VERSION, "IdeaPad 5 15ITL05")
++              }
++      },
+       {}
+ };
+ 
+@@ -901,13 +911,16 @@ static int ideapad_dytc_profile_init(struct 
ideapad_private *priv)
+ 
+       dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF;
+ 
+-      if (dytc_version < 5) {
+-              if (dytc_version < 4 || 
!dmi_check_system(ideapad_dytc_v4_allow_table)) {
+-                      dev_info(&priv->platform_device->dev,
+-                               "DYTC_VERSION is less than 4 or is not 
allowed: %d\n",
+-                               dytc_version);
+-                      return -ENODEV;
+-              }
++      if (dytc_version < 4) {
++              dev_info(&priv->platform_device->dev, "DYTC_VERSION < 4 is not 
supported\n");
++              return -ENODEV;
++      }
++
++      if (dytc_version < 5 &&
++          !(allow_v4_dytc || dmi_check_system(ideapad_dytc_v4_allow_table))) {
++              dev_info(&priv->platform_device->dev,
++                       "DYTC_VERSION 4 support may not work. Pass 
ideapad_laptop.allow_v4_dytc=Y on the kernel commandline to enable\n");
++              return -ENODEV;
+       }
+ 
+       priv->dytc = kzalloc(sizeof(*priv->dytc), GFP_KERNEL);
+diff --git a/drivers/platform/x86/panasonic-laptop.c 
b/drivers/platform/x86/panasonic-laptop.c
+index 37850d07987d8..615e39cbbbf19 100644
+--- a/drivers/platform/x86/panasonic-laptop.c
++++ b/drivers/platform/x86/panasonic-laptop.c
+@@ -119,20 +119,22 @@
+  *            - v0.1  start from toshiba_acpi driver written by John Belmonte
+  */
+ 
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/init.h>
+-#include <linux/types.h>
++#include <linux/acpi.h>
+ #include <linux/backlight.h>
+ #include <linux/ctype.h>
+-#include <linux/seq_file.h>
+-#include <linux/uaccess.h>
+-#include <linux/slab.h>
+-#include <linux/acpi.h>
++#include <linux/i8042.h>
++#include <linux/init.h>
+ #include <linux/input.h>
+ #include <linux/input/sparse-keymap.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
+ #include <linux/platform_device.h>
+-
++#include <linux/seq_file.h>
++#include <linux/serio.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++#include <linux/uaccess.h>
++#include <acpi/video.h>
+ 
+ MODULE_AUTHOR("Hiroshi Miura <[email protected]>");
+ MODULE_AUTHOR("David Bronaugh <[email protected]>");
+@@ -241,6 +243,42 @@ struct pcc_acpi {
+       struct platform_device  *platform;
+ };
+ 
++/*
++ * On some Panasonic models the volume up / down / mute keys send duplicate
++ * keypress events over the PS/2 kbd interface, filter these out.
++ */
++static bool panasonic_i8042_filter(unsigned char data, unsigned char str,
++                                 struct serio *port)
++{
++      static bool extended;
++
++      if (str & I8042_STR_AUXDATA)
++              return false;
++
++      if (data == 0xe0) {
++              extended = true;
++              return true;
++      } else if (extended) {
++              extended = false;
++
++              switch (data & 0x7f) {
++              case 0x20: /* e0 20 / e0 a0, Volume Mute press / release */
++              case 0x2e: /* e0 2e / e0 ae, Volume Down press / release */
++              case 0x30: /* e0 30 / e0 b0, Volume Up press / release */
++                      return true;
++              default:
++                      /*
++                       * Report the previously filtered e0 before continuing
++                       * with the next non-filtered byte.
++                       */
++                      serio_interrupt(port, 0xe0, 0);
++                      return false;
++              }
++      }
++
++      return false;
++}
++
+ /* method access functions */
+ static int acpi_pcc_write_sset(struct pcc_acpi *pcc, int func, int val)
+ {
+@@ -762,6 +800,8 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi 
*pcc)
+       struct input_dev *hotk_input_dev = pcc->input_dev;
+       int rc;
+       unsigned long long result;
++      unsigned int key;
++      unsigned int updown;
+ 
+       rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY,
+                                  NULL, &result);
+@@ -770,20 +810,27 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi 
*pcc)
+               return;
+       }
+ 
++      key = result & 0xf;
++      updown = result & 0x80; /* 0x80 == key down; 0x00 = key up */
++
+       /* hack: some firmware sends no key down for sleep / hibernate */
+-      if ((result & 0xf) == 0x7 || (result & 0xf) == 0xa) {
+-              if (result & 0x80)
++      if (key == 7 || key == 10) {
++              if (updown)
+                       sleep_keydown_seen = 1;
+               if (!sleep_keydown_seen)
+                       sparse_keymap_report_event(hotk_input_dev,
+-                                      result & 0xf, 0x80, false);
++                                      key, 0x80, false);
+       }
+ 
+-      if ((result & 0xf) == 0x7 || (result & 0xf) == 0x9 || (result & 0xf) == 
0xa) {
+-              if (!sparse_keymap_report_event(hotk_input_dev,
+-                                              result & 0xf, result & 0x80, 
false))
+-                      pr_err("Unknown hotkey event: 0x%04llx\n", result);
+-      }
++      /*
++       * Don't report brightness key-presses if they are also reported
++       * by the ACPI video bus.
++       */
++      if ((key == 1 || key == 2) && 
acpi_video_handles_brightness_key_presses())
++              return;
++
++      if (!sparse_keymap_report_event(hotk_input_dev, key, updown, false))
++              pr_err("Unknown hotkey event: 0x%04llx\n", result);
+ }
+ 
+ static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event)
+@@ -997,6 +1044,7 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
+               pcc->platform = NULL;
+       }
+ 
++      i8042_install_filter(panasonic_i8042_filter);
+       return 0;
+ 
+ out_platform:
+@@ -1020,6 +1068,8 @@ static int acpi_pcc_hotkey_remove(struct acpi_device 
*device)
+       if (!device || !pcc)
+               return -EINVAL;
+ 
++      i8042_remove_filter(panasonic_i8042_filter);
++
+       if (pcc->platform) {
+               device_remove_file(&pcc->platform->dev, &dev_attr_cdpower);
+               platform_device_unregister(pcc->platform);
+diff --git a/drivers/platform/x86/thinkpad_acpi.c 
b/drivers/platform/x86/thinkpad_acpi.c
+index e6cb4a14cdd47..aa6ffeaa39329 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -4529,6 +4529,7 @@ static void thinkpad_acpi_amd_s2idle_restore(void)
+       iounmap(addr);
+ cleanup_resource:
+       release_resource(res);
++      kfree(res);
+ }
+ 
+ static struct acpi_s2idle_dev_ops thinkpad_acpi_s2idle_dev_ops = {
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c 
b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index e0de44000d92d..c290386aa2f37 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -1757,6 +1757,8 @@ static void mlx5_vdpa_set_vq_cb(struct vdpa_device 
*vdev, u16 idx, struct vdpa_c
+       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ 
+       ndev->event_cbs[idx] = *cb;
++      if (is_ctrl_vq_idx(mvdev, idx))
++              mvdev->cvq.event_cb = *cb;
+ }
+ 
+ static void mlx5_cvq_notify(struct vringh *vring)
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index a0467bca39fa7..263d1cd3af76f 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -4358,6 +4358,7 @@ static void flush_dirty_session_caps(struct 
ceph_mds_session *s)
+               ihold(inode);
+               dout("flush_dirty_caps %llx.%llx\n", ceph_vinop(inode));
+               spin_unlock(&mdsc->cap_dirty_lock);
++              ceph_wait_on_async_create(inode);
+               ceph_check_caps(ci, CHECK_CAPS_FLUSH, NULL);
+               iput(inode);
+               spin_lock(&mdsc->cap_dirty_lock);
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 98e4a1aa898e2..409cad848ae69 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -3423,7 +3423,9 @@ static int is_path_remote(struct mount_ctx *mnt_ctx)
+       struct cifs_tcon *tcon = mnt_ctx->tcon;
+       struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+       char *full_path;
++#ifdef CONFIG_CIFS_DFS_UPCALL
+       bool nodfs = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS;
++#endif
+ 
+       if (!server->ops->is_path_accessible)
+               return -EOPNOTSUPP;
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 7c190e8853404..7e8c715052c09 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -5254,7 +5254,7 @@ static int io_sendmsg_prep(struct io_kiocb *req, const 
struct io_uring_sqe *sqe)
+ 
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+               return -EINVAL;
+-      if (unlikely(sqe->addr2 || sqe->file_index))
++      if (unlikely(sqe->addr2 || sqe->file_index || sqe->ioprio))
+               return -EINVAL;
+ 
+       sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+@@ -5467,7 +5467,7 @@ static int io_recvmsg_prep(struct io_kiocb *req, const 
struct io_uring_sqe *sqe)
+ 
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+               return -EINVAL;
+-      if (unlikely(sqe->addr2 || sqe->file_index))
++      if (unlikely(sqe->addr2 || sqe->file_index || sqe->ioprio))
+               return -EINVAL;
+ 
+       sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index 16c803a9d996f..31138e9be1dc2 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -7705,7 +7705,7 @@ int smb2_ioctl(struct ksmbd_work *work)
+       {
+               struct file_zero_data_information *zero_data;
+               struct ksmbd_file *fp;
+-              loff_t off, len;
++              loff_t off, len, bfz;
+ 
+               if (!test_tree_conn_flag(work->tcon, 
KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+                       ksmbd_debug(SMB,
+@@ -7722,19 +7722,26 @@ int smb2_ioctl(struct ksmbd_work *work)
+               zero_data =
+                       (struct file_zero_data_information *)&req->Buffer[0];
+ 
+-              fp = ksmbd_lookup_fd_fast(work, id);
+-              if (!fp) {
+-                      ret = -ENOENT;
++              off = le64_to_cpu(zero_data->FileOffset);
++              bfz = le64_to_cpu(zero_data->BeyondFinalZero);
++              if (off > bfz) {
++                      ret = -EINVAL;
+                       goto out;
+               }
+ 
+-              off = le64_to_cpu(zero_data->FileOffset);
+-              len = le64_to_cpu(zero_data->BeyondFinalZero) - off;
++              len = bfz - off;
++              if (len) {
++                      fp = ksmbd_lookup_fd_fast(work, id);
++                      if (!fp) {
++                              ret = -ENOENT;
++                              goto out;
++                      }
+ 
+-              ret = ksmbd_vfs_zero_data(work, fp, off, len);
+-              ksmbd_fd_put(work, fp);
+-              if (ret < 0)
+-                      goto out;
++                      ret = ksmbd_vfs_zero_data(work, fp, off, len);
++                      ksmbd_fd_put(work, fp);
++                      if (ret < 0)
++                              goto out;
++              }
+               break;
+       }
+       case FSCTL_QUERY_ALLOCATED_RANGES:
+@@ -7808,14 +7815,24 @@ int smb2_ioctl(struct ksmbd_work *work)
+               src_off = le64_to_cpu(dup_ext->SourceFileOffset);
+               dst_off = le64_to_cpu(dup_ext->TargetFileOffset);
+               length = le64_to_cpu(dup_ext->ByteCount);
+-              cloned = vfs_clone_file_range(fp_in->filp, src_off, 
fp_out->filp,
+-                                            dst_off, length, 0);
++              /*
++               * XXX: It is not clear if FSCTL_DUPLICATE_EXTENTS_TO_FILE
++               * should fall back to vfs_copy_file_range().  This could be
++               * beneficial when re-exporting nfs/smb mount, but note that
++               * this can result in partial copy that returns an error status.
++               * If/when FSCTL_DUPLICATE_EXTENTS_TO_FILE_EX is implemented,
++               * fall back to vfs_copy_file_range(), should be avoided when
++               * the flag DUPLICATE_EXTENTS_DATA_EX_SOURCE_ATOMIC is set.
++               */
++              cloned = vfs_clone_file_range(fp_in->filp, src_off,
++                                            fp_out->filp, dst_off, length, 0);
+               if (cloned == -EXDEV || cloned == -EOPNOTSUPP) {
+                       ret = -EOPNOTSUPP;
+                       goto dup_ext_out;
+               } else if (cloned != length) {
+                       cloned = vfs_copy_file_range(fp_in->filp, src_off,
+-                                                   fp_out->filp, dst_off, 
length, 0);
++                                                   fp_out->filp, dst_off,
++                                                   length, 0);
+                       if (cloned != length) {
+                               if (cloned < 0)
+                                       ret = cloned;
+diff --git a/fs/ksmbd/vfs.c b/fs/ksmbd/vfs.c
+index dcdd07c6efffd..05efcdf7a4a73 100644
+--- a/fs/ksmbd/vfs.c
++++ b/fs/ksmbd/vfs.c
+@@ -1015,7 +1015,9 @@ int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct 
ksmbd_file *fp,
+                                    FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+                                    off, len);
+ 
+-      return vfs_fallocate(fp->filp, FALLOC_FL_ZERO_RANGE, off, len);
++      return vfs_fallocate(fp->filp,
++                           FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE,
++                           off, len);
+ }
+ 
+ int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
+@@ -1046,7 +1048,7 @@ int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t 
start, loff_t length,
+       *out_count = 0;
+       end = start + length;
+       while (start < end && *out_count < in_count) {
+-              extent_start = f->f_op->llseek(f, start, SEEK_DATA);
++              extent_start = vfs_llseek(f, start, SEEK_DATA);
+               if (extent_start < 0) {
+                       if (extent_start != -ENXIO)
+                               ret = (int)extent_start;
+@@ -1056,7 +1058,7 @@ int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t 
start, loff_t length,
+               if (extent_start >= end)
+                       break;
+ 
+-              extent_end = f->f_op->llseek(f, extent_start, SEEK_HOLE);
++              extent_end = vfs_llseek(f, extent_start, SEEK_HOLE);
+               if (extent_end < 0) {
+                       if (extent_end != -ENXIO)
+                               ret = (int)extent_end;
+@@ -1777,6 +1779,10 @@ int ksmbd_vfs_copy_file_ranges(struct ksmbd_work *work,
+ 
+               ret = vfs_copy_file_range(src_fp->filp, src_off,
+                                         dst_fp->filp, dst_off, len, 0);
++              if (ret == -EOPNOTSUPP || ret == -EXDEV)
++                      ret = generic_copy_file_range(src_fp->filp, src_off,
++                                                    dst_fp->filp, dst_off,
++                                                    len, 0);
+               if (ret < 0)
+                       return ret;
+ 
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index d1eaaeb7f7135..f0069395f02c7 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -4012,22 +4012,29 @@ static int _nfs4_discover_trunking(struct nfs_server 
*server,
+       }
+ 
+       page = alloc_page(GFP_KERNEL);
++      if (!page)
++              return -ENOMEM;
+       locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
+-      if (page == NULL || locations == NULL)
+-              goto out;
++      if (!locations)
++              goto out_free;
++      locations->fattr = nfs_alloc_fattr();
++      if (!locations->fattr)
++              goto out_free_2;
+ 
+       status = nfs4_proc_get_locations(server, fhandle, locations, page,
+                                        cred);
+       if (status)
+-              goto out;
++              goto out_free_3;
+ 
+       for (i = 0; i < locations->nlocations; i++)
+               test_fs_location_for_trunking(&locations->locations[i], clp,
+                                             server);
+-out:
+-      if (page)
+-              __free_page(page);
++out_free_3:
++      kfree(locations->fattr);
++out_free_2:
+       kfree(locations);
++out_free:
++      __free_page(page);
+       return status;
+ }
+ 
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 9656d40bb4887..673af2d4b5a23 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -2743,5 +2743,6 @@ again:
+               goto again;
+ 
+       nfs_put_client(clp);
++      module_put_and_kthread_exit(0);
+       return 0;
+ }
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index c22ad0532e8ee..67c851f02b249 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -577,6 +577,7 @@ out_err:
+ ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
+                            u64 dst_pos, u64 count)
+ {
++      ssize_t ret;
+ 
+       /*
+        * Limit copy to 4MB to prevent indefinitely blocking an nfsd
+@@ -587,7 +588,12 @@ ssize_t nfsd_copy_file_range(struct file *src, u64 
src_pos, struct file *dst,
+        * limit like this and pipeline multiple COPY requests.
+        */
+       count = min_t(u64, count, 1 << 22);
+-      return vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 0);
++      ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 0);
++
++      if (ret == -EOPNOTSUPP || ret == -EXDEV)
++              ret = generic_copy_file_range(src, src_pos, dst, dst_pos,
++                                            count, 0);
++      return ret;
+ }
+ 
+ __be32 nfsd4_vfs_fallocate(struct svc_rqst *rqstp, struct svc_fh *fhp,
+@@ -1170,6 +1176,7 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, 
u64 offset,
+                       nfsd_copy_write_verifier(verf, nn);
+                       err2 = filemap_check_wb_err(nf->nf_file->f_mapping,
+                                                   since);
++                      err = nfserrno(err2);
+                       break;
+               case -EINVAL:
+                       err = nfserr_notsupp;
+@@ -1177,8 +1184,8 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, 
u64 offset,
+               default:
+                       nfsd_reset_write_verifier(nn);
+                       trace_nfsd_writeverf_reset(nn, rqstp, err2);
++                      err = nfserrno(err2);
+               }
+-              err = nfserrno(err2);
+       } else
+               nfsd_copy_write_verifier(verf, nn);
+ 
+diff --git a/fs/notify/fanotify/fanotify_user.c 
b/fs/notify/fanotify/fanotify_user.c
+index 16d8fc84713a4..a12391d06fe5c 100644
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -1483,8 +1483,15 @@ static int fanotify_test_fid(struct dentry *dentry)
+       return 0;
+ }
+ 
+-static int fanotify_events_supported(struct path *path, __u64 mask)
++static int fanotify_events_supported(struct fsnotify_group *group,
++                                   struct path *path, __u64 mask,
++                                   unsigned int flags)
+ {
++      unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS;
++      /* Strict validation of events in non-dir inode mask with v5.17+ APIs */
++      bool strict_dir_events = FAN_GROUP_FLAG(group, FAN_REPORT_TARGET_FID) ||
++                               (mask & FAN_RENAME);
++
+       /*
+        * Some filesystems such as 'proc' acquire unusual locks when opening
+        * files. For them fanotify permission events have high chances of
+@@ -1496,6 +1503,16 @@ static int fanotify_events_supported(struct path *path, 
__u64 mask)
+       if (mask & FANOTIFY_PERM_EVENTS &&
+           path->mnt->mnt_sb->s_type->fs_flags & FS_DISALLOW_NOTIFY_PERM)
+               return -EINVAL;
++
++      /*
++       * We shouldn't have allowed setting dirent events and the directory
++       * flags FAN_ONDIR and FAN_EVENT_ON_CHILD in mask of non-dir inode,
++       * but because we always allowed it, error only when using new APIs.
++       */
++      if (strict_dir_events && mark_type == FAN_MARK_INODE &&
++          !d_is_dir(path->dentry) && (mask & FANOTIFY_DIRONLY_EVENT_BITS))
++              return -ENOTDIR;
++
+       return 0;
+ }
+ 
+@@ -1634,7 +1651,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned 
int flags, __u64 mask,
+               goto fput_and_out;
+ 
+       if (flags & FAN_MARK_ADD) {
+-              ret = fanotify_events_supported(&path, mask);
++              ret = fanotify_events_supported(group, &path, mask, flags);
+               if (ret)
+                       goto path_put_and_out;
+       }
+@@ -1657,19 +1674,6 @@ static int do_fanotify_mark(int fanotify_fd, unsigned 
int flags, __u64 mask,
+       else
+               mnt = path.mnt;
+ 
+-      /*
+-       * FAN_RENAME is not allowed on non-dir (for now).
+-       * We shouldn't have allowed setting any dirent events in mask of
+-       * non-dir, but because we always allowed it, error only if group
+-       * was initialized with the new flag FAN_REPORT_TARGET_FID.
+-       */
+-      ret = -ENOTDIR;
+-      if (inode && !S_ISDIR(inode->i_mode) &&
+-          ((mask & FAN_RENAME) ||
+-           ((mask & FANOTIFY_DIRENT_EVENTS) &&
+-            FAN_GROUP_FLAG(group, FAN_REPORT_TARGET_FID))))
+-              goto path_put_and_out;
+-
+       /* Mask out FAN_EVENT_ON_CHILD flag for sb/mount/non-dir marks */
+       if (mnt || !S_ISDIR(inode->i_mode)) {
+               mask &= ~FAN_EVENT_ON_CHILD;
+diff --git a/fs/read_write.c b/fs/read_write.c
+index e643aec2b0efe..671f47d5984ce 100644
+--- a/fs/read_write.c
++++ b/fs/read_write.c
+@@ -1381,28 +1381,6 @@ ssize_t generic_copy_file_range(struct file *file_in, 
loff_t pos_in,
+ }
+ EXPORT_SYMBOL(generic_copy_file_range);
+ 
+-static ssize_t do_copy_file_range(struct file *file_in, loff_t pos_in,
+-                                struct file *file_out, loff_t pos_out,
+-                                size_t len, unsigned int flags)
+-{
+-      /*
+-       * Although we now allow filesystems to handle cross sb copy, passing
+-       * a file of the wrong filesystem type to filesystem driver can result
+-       * in an attempt to dereference the wrong type of ->private_data, so
+-       * avoid doing that until we really have a good reason.  NFS defines
+-       * several different file_system_type structures, but they all end up
+-       * using the same ->copy_file_range() function pointer.
+-       */
+-      if (file_out->f_op->copy_file_range &&
+-          file_out->f_op->copy_file_range == file_in->f_op->copy_file_range)
+-              return file_out->f_op->copy_file_range(file_in, pos_in,
+-                                                     file_out, pos_out,
+-                                                     len, flags);
+-
+-      return generic_copy_file_range(file_in, pos_in, file_out, pos_out, len,
+-                                     flags);
+-}
+-
+ /*
+  * Performs necessary checks before doing a file copy
+  *
+@@ -1424,6 +1402,24 @@ static int generic_copy_file_checks(struct file 
*file_in, loff_t pos_in,
+       if (ret)
+               return ret;
+ 
++      /*
++       * We allow some filesystems to handle cross sb copy, but passing
++       * a file of the wrong filesystem type to filesystem driver can result
++       * in an attempt to dereference the wrong type of ->private_data, so
++       * avoid doing that until we really have a good reason.
++       *
++       * nfs and cifs define several different file_system_type structures
++       * and several different sets of file_operations, but they all end up
++       * using the same ->copy_file_range() function pointer.
++       */
++      if (file_out->f_op->copy_file_range) {
++              if (file_in->f_op->copy_file_range !=
++                  file_out->f_op->copy_file_range)
++                      return -EXDEV;
++      } else if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) {
++              return -EXDEV;
++      }
++
+       /* Don't touch certain kinds of inodes */
+       if (IS_IMMUTABLE(inode_out))
+               return -EPERM;
+@@ -1489,26 +1485,41 @@ ssize_t vfs_copy_file_range(struct file *file_in, 
loff_t pos_in,
+       file_start_write(file_out);
+ 
+       /*
+-       * Try cloning first, this is supported by more file systems, and
+-       * more efficient if both clone and copy are supported (e.g. NFS).
++       * Cloning is supported by more file systems, so we implement copy on
++       * same sb using clone, but for filesystems where both clone and copy
++       * are supported (e.g. nfs,cifs), we only call the copy method.
+        */
++      if (file_out->f_op->copy_file_range) {
++              ret = file_out->f_op->copy_file_range(file_in, pos_in,
++                                                    file_out, pos_out,
++                                                    len, flags);
++              goto done;
++      }
++
+       if (file_in->f_op->remap_file_range &&
+           file_inode(file_in)->i_sb == file_inode(file_out)->i_sb) {
+-              loff_t cloned;
+-
+-              cloned = file_in->f_op->remap_file_range(file_in, pos_in,
++              ret = file_in->f_op->remap_file_range(file_in, pos_in,
+                               file_out, pos_out,
+                               min_t(loff_t, MAX_RW_COUNT, len),
+                               REMAP_FILE_CAN_SHORTEN);
+-              if (cloned > 0) {
+-                      ret = cloned;
++              if (ret > 0)
+                       goto done;
+-              }
+       }
+ 
+-      ret = do_copy_file_range(file_in, pos_in, file_out, pos_out, len,
+-                              flags);
+-      WARN_ON_ONCE(ret == -EOPNOTSUPP);
++      /*
++       * We can get here for same sb copy of filesystems that do not implement
++       * ->copy_file_range() in case filesystem does not support clone or in
++       * case filesystem supports clone but rejected the clone request (e.g.
++       * because it was not block aligned).
++       *
++       * In both cases, fall back to kernel copy so we are able to maintain a
++       * consistent story about which filesystems support copy_file_range()
++       * and which filesystems do not, that will allow userspace tools to
++       * make consistent desicions w.r.t using copy_file_range().
++       */
++      ret = generic_copy_file_range(file_in, pos_in, file_out, pos_out, len,
++                                    flags);
++
+ done:
+       if (ret > 0) {
+               fsnotify_access(file_in);
+diff --git a/include/linux/dim.h b/include/linux/dim.h
+index b698266d00356..6c5733981563e 100644
+--- a/include/linux/dim.h
++++ b/include/linux/dim.h
+@@ -21,7 +21,7 @@
+  * We consider 10% difference as significant.
+  */
+ #define IS_SIGNIFICANT_DIFF(val, ref) \
+-      (((100UL * abs((val) - (ref))) / (ref)) > 10)
++      ((ref) && (((100UL * abs((val) - (ref))) / (ref)) > 10))
+ 
+ /*
+  * Calculate the gap between two values.
+diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
+index 419cadcd7ff55..2f0e418888543 100644
+--- a/include/linux/fanotify.h
++++ b/include/linux/fanotify.h
+@@ -110,6 +110,10 @@
+                                        FANOTIFY_PERM_EVENTS | \
+                                        FAN_Q_OVERFLOW | FAN_ONDIR)
+ 
++/* Events and flags relevant only for directories */
++#define FANOTIFY_DIRONLY_EVENT_BITS   (FANOTIFY_DIRENT_EVENTS | \
++                                       FAN_EVENT_ON_CHILD | FAN_ONDIR)
++
+ #define ALL_FANOTIFY_EVENT_BITS               (FANOTIFY_OUTGOING_EVENTS | \
+                                        FANOTIFY_EVENT_FLAGS)
+ 
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index f736c020cde27..bfa27972f8605 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1653,7 +1653,7 @@ enum netdev_priv_flags {
+       IFF_FAILOVER_SLAVE              = 1<<28,
+       IFF_L3MDEV_RX_HANDLER           = 1<<29,
+       IFF_LIVE_RENAME_OK              = 1<<30,
+-      IFF_TX_SKB_NO_LINEAR            = 1<<31,
++      IFF_TX_SKB_NO_LINEAR            = BIT_ULL(31),
+       IFF_CHANGE_PROTO_DOWN           = BIT_ULL(32),
+ };
+ 
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index 36ca2b5c22533..b2b76dc2e5e26 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -571,6 +571,10 @@ struct macsec_ops;
+  * @mdix: Current crossover
+  * @mdix_ctrl: User setting of crossover
+  * @interrupts: Flag interrupts have been enabled
++ * @irq_suspended: Flag indicating PHY is suspended and therefore interrupt
++ *                 handling shall be postponed until PHY has resumed
++ * @irq_rerun: Flag indicating interrupts occurred while PHY was suspended,
++ *             requiring a rerun of the interrupt handler after resume
+  * @interface: enum phy_interface_t value
+  * @skb: Netlink message for cable diagnostics
+  * @nest: Netlink nest used for cable diagnostics
+@@ -625,6 +629,8 @@ struct phy_device {
+ 
+       /* Interrupts are enabled */
+       unsigned interrupts:1;
++      unsigned irq_suspended:1;
++      unsigned irq_rerun:1;
+ 
+       enum phy_state state;
+ 
+diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
+index fc0c1454d2757..7b9e3f9a0f00b 100644
+--- a/include/uapi/drm/drm_fourcc.h
++++ b/include/uapi/drm/drm_fourcc.h
+@@ -1375,11 +1375,11 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 
modifier)
+ #define AMD_FMT_MOD_PIPE_MASK 0x7
+ 
+ #define AMD_FMT_MOD_SET(field, value) \
+-      ((uint64_t)(value) << AMD_FMT_MOD_##field##_SHIFT)
++      ((__u64)(value) << AMD_FMT_MOD_##field##_SHIFT)
+ #define AMD_FMT_MOD_GET(field, value) \
+       (((value) >> AMD_FMT_MOD_##field##_SHIFT) & AMD_FMT_MOD_##field##_MASK)
+ #define AMD_FMT_MOD_CLEAR(field) \
+-      (~((uint64_t)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT))
++      (~((__u64)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT))
+ 
+ #if defined(__cplusplus)
+ }
+diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
+index 9690efedb5fa6..3e085b6c05a6d 100644
+--- a/include/uapi/linux/mptcp.h
++++ b/include/uapi/linux/mptcp.h
+@@ -2,16 +2,17 @@
+ #ifndef _UAPI_MPTCP_H
+ #define _UAPI_MPTCP_H
+ 
++#ifndef __KERNEL__
++#include <netinet/in.h>               /* for sockaddr_in and sockaddr_in6     
*/
++#include <sys/socket.h>               /* for struct sockaddr                  
*/
++#endif
++
+ #include <linux/const.h>
+ #include <linux/types.h>
+ #include <linux/in.h>         /* for sockaddr_in                      */
+ #include <linux/in6.h>                /* for sockaddr_in6                     
*/
+ #include <linux/socket.h>     /* for sockaddr_storage and sa_family   */
+ 
+-#ifndef __KERNEL__
+-#include <sys/socket.h>               /* for struct sockaddr                  
*/
+-#endif
+-
+ #define MPTCP_SUBFLOW_FLAG_MCAP_REM           _BITUL(0)
+ #define MPTCP_SUBFLOW_FLAG_MCAP_LOC           _BITUL(1)
+ #define MPTCP_SUBFLOW_FLAG_JOIN_REM           _BITUL(2)
+diff --git a/lib/sbitmap.c b/lib/sbitmap.c
+index ae4fd4de9ebe7..29eb0484215af 100644
+--- a/lib/sbitmap.c
++++ b/lib/sbitmap.c
+@@ -528,7 +528,7 @@ unsigned long __sbitmap_queue_get_batch(struct 
sbitmap_queue *sbq, int nr_tags,
+ 
+               sbitmap_deferred_clear(map);
+               if (map->word == (1UL << (map_depth - 1)) - 1)
+-                      continue;
++                      goto next;
+ 
+               nr = find_first_zero_bit(&map->word, map_depth);
+               if (nr + nr_tags <= map_depth) {
+@@ -539,6 +539,8 @@ unsigned long __sbitmap_queue_get_batch(struct 
sbitmap_queue *sbq, int nr_tags,
+                       get_mask = ((1UL << map_tags) - 1) << nr;
+                       do {
+                               val = READ_ONCE(map->word);
++                              if ((val & ~get_mask) != val)
++                                      goto next;
+                               ret = atomic_long_cmpxchg(ptr, val, get_mask | 
val);
+                       } while (ret != val);
+                       get_mask = (get_mask & ~ret) >> nr;
+@@ -549,6 +551,7 @@ unsigned long __sbitmap_queue_get_batch(struct 
sbitmap_queue *sbq, int nr_tags,
+                               return get_mask;
+                       }
+               }
++next:
+               /* Jump to next index. */
+               if (++index >= sb->map_nr)
+                       index = 0;
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index 6b2dc7b2b6127..cc1caab4a6549 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -410,7 +410,7 @@ int skb_tunnel_check_pmtu(struct sk_buff *skb, struct 
dst_entry *encap_dst,
+       u32 mtu = dst_mtu(encap_dst) - headroom;
+ 
+       if ((skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) ||
+-          (!skb_is_gso(skb) && (skb->len - skb_mac_header_len(skb)) <= mtu))
++          (!skb_is_gso(skb) && (skb->len - skb_network_offset(skb)) <= mtu))
+               return 0;
+ 
+       skb_dst_update_pmtu_no_confirm(skb, mtu);
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 30a74e4eeab49..cd78b4fc334f7 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1965,7 +1965,10 @@ process:
+               struct sock *nsk;
+ 
+               sk = req->rsk_listener;
+-              drop_reason = tcp_inbound_md5_hash(sk, skb,
++              if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
++                      drop_reason = SKB_DROP_REASON_XFRM_POLICY;
++              else
++                      drop_reason = tcp_inbound_md5_hash(sk, skb,
+                                                  &iph->saddr, &iph->daddr,
+                                                  AF_INET, dif, sdif);
+               if (unlikely(drop_reason)) {
+@@ -2017,6 +2020,7 @@ process:
+                       }
+                       goto discard_and_relse;
+               }
++              nf_reset_ct(skb);
+               if (nsk == sk) {
+                       reqsk_put(req);
+                       tcp_v4_restore_cb(skb);
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 51e77dc6571a2..3b47c901c832b 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1109,10 +1109,6 @@ ipv6_add_addr(struct inet6_dev *idev, struct 
ifa6_config *cfg,
+               goto out;
+       }
+ 
+-      if (net->ipv6.devconf_all->disable_policy ||
+-          idev->cnf.disable_policy)
+-              f6i->dst_nopolicy = true;
+-
+       neigh_parms_data_state_setall(idev->nd_parms);
+ 
+       ifa->addr = *cfg->pfx;
+@@ -5174,9 +5170,9 @@ next:
+               fillargs->event = RTM_GETMULTICAST;
+ 
+               /* multicast address */
+-              for (ifmca = rcu_dereference(idev->mc_list);
++              for (ifmca = rtnl_dereference(idev->mc_list);
+                    ifmca;
+-                   ifmca = rcu_dereference(ifmca->next), ip_idx++) {
++                   ifmca = rtnl_dereference(ifmca->next), ip_idx++) {
+                       if (ip_idx < s_ip_idx)
+                               continue;
+                       err = inet6_fill_ifmcaddr(skb, ifmca, fillargs);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index c4b6ce017d5e3..83786de847abf 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -4565,8 +4565,15 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
+       }
+ 
+       f6i = ip6_route_info_create(&cfg, gfp_flags, NULL);
+-      if (!IS_ERR(f6i))
++      if (!IS_ERR(f6i)) {
+               f6i->dst_nocount = true;
++
++              if (!anycast &&
++                  (net->ipv6.devconf_all->disable_policy ||
++                   idev->cnf.disable_policy))
++                      f6i->dst_nopolicy = true;
++      }
++
+       return f6i;
+ }
+ 
+diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
+index 6de01185cc68f..d43c50a7310d6 100644
+--- a/net/ipv6/seg6_hmac.c
++++ b/net/ipv6/seg6_hmac.c
+@@ -406,7 +406,6 @@ int __net_init seg6_hmac_net_init(struct net *net)
+ 
+       return rhashtable_init(&sdata->hmac_infos, &rht_params);
+ }
+-EXPORT_SYMBOL(seg6_hmac_net_init);
+ 
+ void seg6_hmac_exit(void)
+ {
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index c0b138c209925..6bcd5e419a083 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -323,8 +323,6 @@ static int ipip6_tunnel_get_prl(struct net_device *dev, 
struct ip_tunnel_prl __u
+               kcalloc(cmax, sizeof(*kp), GFP_KERNEL_ACCOUNT | __GFP_NOWARN) :
+               NULL;
+ 
+-      rcu_read_lock();
+-
+       ca = min(t->prl_count, cmax);
+ 
+       if (!kp) {
+@@ -341,7 +339,7 @@ static int ipip6_tunnel_get_prl(struct net_device *dev, 
struct ip_tunnel_prl __u
+               }
+       }
+ 
+-      c = 0;
++      rcu_read_lock();
+       for_each_prl_rcu(t->prl) {
+               if (c >= cmax)
+                       break;
+@@ -353,7 +351,7 @@ static int ipip6_tunnel_get_prl(struct net_device *dev, 
struct ip_tunnel_prl __u
+               if (kprl.addr != htonl(INADDR_ANY))
+                       break;
+       }
+-out:
++
+       rcu_read_unlock();
+ 
+       len = sizeof(*kp) * c;
+@@ -362,7 +360,7 @@ out:
+               ret = -EFAULT;
+ 
+       kfree(kp);
+-
++out:
+       return ret;
+ }
+ 
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 8f54293c1d887..713077eef04ac 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2305,6 +2305,11 @@ static void __mptcp_close_ssk(struct sock *sk, struct 
sock *ssk,
+               kfree_rcu(subflow, rcu);
+       } else {
+               /* otherwise tcp will dispose of the ssk and subflow ctx */
++              if (ssk->sk_state == TCP_LISTEN) {
++                      tcp_set_state(ssk, TCP_CLOSE);
++                      mptcp_subflow_queue_clean(ssk);
++                      inet_csk_listen_stop(ssk);
++              }
+               __tcp_close(ssk, 0);
+ 
+               /* close acquired an extra ref */
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 9ac63fa4866ef..2aab5aff6bcdf 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -286,6 +286,7 @@ struct mptcp_sock {
+ 
+       u32 setsockopt_seq;
+       char            ca_name[TCP_CA_NAME_MAX];
++      struct mptcp_sock       *dl_next;
+ };
+ 
+ #define mptcp_data_lock(sk) spin_lock_bh(&(sk)->sk_lock.slock)
+@@ -585,6 +586,7 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+                    struct mptcp_subflow_context *subflow);
+ void mptcp_subflow_send_ack(struct sock *ssk);
+ void mptcp_subflow_reset(struct sock *ssk);
++void mptcp_subflow_queue_clean(struct sock *ssk);
+ void mptcp_sock_graft(struct sock *sk, struct socket *parent);
+ struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
+ 
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index be76ada89d969..7919e259175df 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1688,6 +1688,58 @@ static void subflow_state_change(struct sock *sk)
+       }
+ }
+ 
++void mptcp_subflow_queue_clean(struct sock *listener_ssk)
++{
++      struct request_sock_queue *queue = 
&inet_csk(listener_ssk)->icsk_accept_queue;
++      struct mptcp_sock *msk, *next, *head = NULL;
++      struct request_sock *req;
++
++      /* build a list of all unaccepted mptcp sockets */
++      spin_lock_bh(&queue->rskq_lock);
++      for (req = queue->rskq_accept_head; req; req = req->dl_next) {
++              struct mptcp_subflow_context *subflow;
++              struct sock *ssk = req->sk;
++              struct mptcp_sock *msk;
++
++              if (!sk_is_mptcp(ssk))
++                      continue;
++
++              subflow = mptcp_subflow_ctx(ssk);
++              if (!subflow || !subflow->conn)
++                      continue;
++
++              /* skip if already in list */
++              msk = mptcp_sk(subflow->conn);
++              if (msk->dl_next || msk == head)
++                      continue;
++
++              msk->dl_next = head;
++              head = msk;
++      }
++      spin_unlock_bh(&queue->rskq_lock);
++      if (!head)
++              return;
++
++      /* can't acquire the msk socket lock under the subflow one,
++       * or will cause ABBA deadlock
++       */
++      release_sock(listener_ssk);
++
++      for (msk = head; msk; msk = next) {
++              struct sock *sk = (struct sock *)msk;
++              bool slow;
++
++              slow = lock_sock_fast_nested(sk);
++              next = msk->dl_next;
++              msk->first = NULL;
++              msk->dl_next = NULL;
++              unlock_sock_fast(sk, slow);
++      }
++
++      /* we are still under the listener msk socket lock */
++      lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
++}
++
+ static int subflow_ulp_init(struct sock *sk)
+ {
+       struct inet_connection_sock *icsk = inet_csk(sk);
+diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
+index df40314de21f5..76de6c8d98655 100644
+--- a/net/netfilter/nft_set_hash.c
++++ b/net/netfilter/nft_set_hash.c
+@@ -143,6 +143,7 @@ static bool nft_rhash_update(struct nft_set *set, const 
u32 *key,
+       /* Another cpu may race to insert the element with the same key */
+       if (prev) {
+               nft_set_elem_destroy(set, he, true);
++              atomic_dec(&set->nelems);
+               he = prev;
+       }
+ 
+@@ -152,6 +153,7 @@ out:
+ 
+ err2:
+       nft_set_elem_destroy(set, he, true);
++      atomic_dec(&set->nelems);
+ err1:
+       return false;
+ }
+diff --git a/net/rose/rose_timer.c b/net/rose/rose_timer.c
+index b3138fc2e552e..f06ddbed3fed6 100644
+--- a/net/rose/rose_timer.c
++++ b/net/rose/rose_timer.c
+@@ -31,89 +31,89 @@ static void rose_idletimer_expiry(struct timer_list *);
+ 
+ void rose_start_heartbeat(struct sock *sk)
+ {
+-      del_timer(&sk->sk_timer);
++      sk_stop_timer(sk, &sk->sk_timer);
+ 
+       sk->sk_timer.function = rose_heartbeat_expiry;
+       sk->sk_timer.expires  = jiffies + 5 * HZ;
+ 
+-      add_timer(&sk->sk_timer);
++      sk_reset_timer(sk, &sk->sk_timer, sk->sk_timer.expires);
+ }
+ 
+ void rose_start_t1timer(struct sock *sk)
+ {
+       struct rose_sock *rose = rose_sk(sk);
+ 
+-      del_timer(&rose->timer);
++      sk_stop_timer(sk, &rose->timer);
+ 
+       rose->timer.function = rose_timer_expiry;
+       rose->timer.expires  = jiffies + rose->t1;
+ 
+-      add_timer(&rose->timer);
++      sk_reset_timer(sk, &rose->timer, rose->timer.expires);
+ }
+ 
+ void rose_start_t2timer(struct sock *sk)
+ {
+       struct rose_sock *rose = rose_sk(sk);
+ 
+-      del_timer(&rose->timer);
++      sk_stop_timer(sk, &rose->timer);
+ 
+       rose->timer.function = rose_timer_expiry;
+       rose->timer.expires  = jiffies + rose->t2;
+ 
+-      add_timer(&rose->timer);
++      sk_reset_timer(sk, &rose->timer, rose->timer.expires);
+ }
+ 
+ void rose_start_t3timer(struct sock *sk)
+ {
+       struct rose_sock *rose = rose_sk(sk);
+ 
+-      del_timer(&rose->timer);
++      sk_stop_timer(sk, &rose->timer);
+ 
+       rose->timer.function = rose_timer_expiry;
+       rose->timer.expires  = jiffies + rose->t3;
+ 
+-      add_timer(&rose->timer);
++      sk_reset_timer(sk, &rose->timer, rose->timer.expires);
+ }
+ 
+ void rose_start_hbtimer(struct sock *sk)
+ {
+       struct rose_sock *rose = rose_sk(sk);
+ 
+-      del_timer(&rose->timer);
++      sk_stop_timer(sk, &rose->timer);
+ 
+       rose->timer.function = rose_timer_expiry;
+       rose->timer.expires  = jiffies + rose->hb;
+ 
+-      add_timer(&rose->timer);
++      sk_reset_timer(sk, &rose->timer, rose->timer.expires);
+ }
+ 
+ void rose_start_idletimer(struct sock *sk)
+ {
+       struct rose_sock *rose = rose_sk(sk);
+ 
+-      del_timer(&rose->idletimer);
++      sk_stop_timer(sk, &rose->idletimer);
+ 
+       if (rose->idle > 0) {
+               rose->idletimer.function = rose_idletimer_expiry;
+               rose->idletimer.expires  = jiffies + rose->idle;
+ 
+-              add_timer(&rose->idletimer);
++              sk_reset_timer(sk, &rose->idletimer, rose->idletimer.expires);
+       }
+ }
+ 
+ void rose_stop_heartbeat(struct sock *sk)
+ {
+-      del_timer(&sk->sk_timer);
++      sk_stop_timer(sk, &sk->sk_timer);
+ }
+ 
+ void rose_stop_timer(struct sock *sk)
+ {
+-      del_timer(&rose_sk(sk)->timer);
++      sk_stop_timer(sk, &rose_sk(sk)->timer);
+ }
+ 
+ void rose_stop_idletimer(struct sock *sk)
+ {
+-      del_timer(&rose_sk(sk)->idletimer);
++      sk_stop_timer(sk, &rose_sk(sk)->idletimer);
+ }
+ 
+ static void rose_heartbeat_expiry(struct timer_list *t)
+@@ -130,6 +130,7 @@ static void rose_heartbeat_expiry(struct timer_list *t)
+                   (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
+                       bh_unlock_sock(sk);
+                       rose_destroy_socket(sk);
++                      sock_put(sk);
+                       return;
+               }
+               break;
+@@ -152,6 +153,7 @@ static void rose_heartbeat_expiry(struct timer_list *t)
+ 
+       rose_start_heartbeat(sk);
+       bh_unlock_sock(sk);
++      sock_put(sk);
+ }
+ 
+ static void rose_timer_expiry(struct timer_list *t)
+@@ -181,6 +183,7 @@ static void rose_timer_expiry(struct timer_list *t)
+               break;
+       }
+       bh_unlock_sock(sk);
++      sock_put(sk);
+ }
+ 
+ static void rose_idletimer_expiry(struct timer_list *t)
+@@ -205,4 +208,5 @@ static void rose_idletimer_expiry(struct timer_list *t)
+               sock_set_flag(sk, SOCK_DEAD);
+       }
+       bh_unlock_sock(sk);
++      sock_put(sk);
+ }
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index 4f51094da9dab..6fa9e7b1406a4 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -588,7 +588,8 @@ static int tcf_idr_release_unsafe(struct tc_action *p)
+ }
+ 
+ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
+-                        const struct tc_action_ops *ops)
++                        const struct tc_action_ops *ops,
++                        struct netlink_ext_ack *extack)
+ {
+       struct nlattr *nest;
+       int n_i = 0;
+@@ -604,20 +605,25 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, 
struct sk_buff *skb,
+       if (nla_put_string(skb, TCA_KIND, ops->kind))
+               goto nla_put_failure;
+ 
++      ret = 0;
+       mutex_lock(&idrinfo->lock);
+       idr_for_each_entry_ul(idr, p, tmp, id) {
+               if (IS_ERR(p))
+                       continue;
+               ret = tcf_idr_release_unsafe(p);
+-              if (ret == ACT_P_DELETED) {
++              if (ret == ACT_P_DELETED)
+                       module_put(ops->owner);
+-                      n_i++;
+-              } else if (ret < 0) {
+-                      mutex_unlock(&idrinfo->lock);
+-                      goto nla_put_failure;
+-              }
++              else if (ret < 0)
++                      break;
++              n_i++;
+       }
+       mutex_unlock(&idrinfo->lock);
++      if (ret < 0) {
++              if (n_i)
++                      NL_SET_ERR_MSG(extack, "Unable to flush all TC 
actions");
++              else
++                      goto nla_put_failure;
++      }
+ 
+       ret = nla_put_u32(skb, TCA_FCNT, n_i);
+       if (ret)
+@@ -638,7 +644,7 @@ int tcf_generic_walker(struct tc_action_net *tn, struct 
sk_buff *skb,
+       struct tcf_idrinfo *idrinfo = tn->idrinfo;
+ 
+       if (type == RTM_DELACTION) {
+-              return tcf_del_walker(idrinfo, skb, ops);
++              return tcf_del_walker(idrinfo, skb, ops, extack);
+       } else if (type == RTM_GETACTION) {
+               return tcf_dump_walker(idrinfo, skb, cb);
+       } else {
+diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
+index b57cf9df4de89..8272427d29ca8 100644
+--- a/net/sunrpc/xdr.c
++++ b/net/sunrpc/xdr.c
+@@ -979,7 +979,7 @@ static __be32 *xdr_get_next_encode_buffer(struct 
xdr_stream *xdr,
+        */
+       xdr->p = (void *)p + frag2bytes;
+       space_left = xdr->buf->buflen - xdr->buf->len;
+-      if (space_left - nbytes >= PAGE_SIZE)
++      if (space_left - frag1bytes >= PAGE_SIZE)
+               xdr->end = (void *)p + PAGE_SIZE;
+       else
+               xdr->end = (void *)p + space_left - frag1bytes;
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index 6ef95ce565bd3..b48d97cbbe29c 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -472,8 +472,8 @@ struct tipc_node *tipc_node_create(struct net *net, u32 
addr, u8 *peer_id,
+                                  bool preliminary)
+ {
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
++      struct tipc_link *l, *snd_l = tipc_bc_sndlink(net);
+       struct tipc_node *n, *temp_node;
+-      struct tipc_link *l;
+       unsigned long intv;
+       int bearer_id;
+       int i;
+@@ -488,6 +488,16 @@ struct tipc_node *tipc_node_create(struct net *net, u32 
addr, u8 *peer_id,
+                       goto exit;
+               /* A preliminary node becomes "real" now, refresh its data */
+               tipc_node_write_lock(n);
++              if (!tipc_link_bc_create(net, tipc_own_addr(net), addr, 
peer_id, U16_MAX,
++                                       tipc_link_min_win(snd_l), 
tipc_link_max_win(snd_l),
++                                       n->capabilities, &n->bc_entry.inputq1,
++                                       &n->bc_entry.namedq, snd_l, 
&n->bc_entry.link)) {
++                      pr_warn("Broadcast rcv link refresh failed, no 
memory\n");
++                      tipc_node_write_unlock_fast(n);
++                      tipc_node_put(n);
++                      n = NULL;
++                      goto exit;
++              }
+               n->preliminary = false;
+               n->addr = addr;
+               hlist_del_rcu(&n->hash);
+@@ -567,7 +577,16 @@ update:
+       n->signature = INVALID_NODE_SIG;
+       n->active_links[0] = INVALID_BEARER_ID;
+       n->active_links[1] = INVALID_BEARER_ID;
+-      n->bc_entry.link = NULL;
++      if (!preliminary &&
++          !tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, 
U16_MAX,
++                               tipc_link_min_win(snd_l), 
tipc_link_max_win(snd_l),
++                               n->capabilities, &n->bc_entry.inputq1,
++                               &n->bc_entry.namedq, snd_l, 
&n->bc_entry.link)) {
++              pr_warn("Broadcast rcv link creation failed, no memory\n");
++              kfree(n);
++              n = NULL;
++              goto exit;
++      }
+       tipc_node_get(n);
+       timer_setup(&n->timer, tipc_node_timeout, 0);
+       /* Start a slow timer anyway, crypto needs it */
+@@ -1155,7 +1174,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+                         bool *respond, bool *dupl_addr)
+ {
+       struct tipc_node *n;
+-      struct tipc_link *l, *snd_l;
++      struct tipc_link *l;
+       struct tipc_link_entry *le;
+       bool addr_match = false;
+       bool sign_match = false;
+@@ -1175,22 +1194,6 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+               return;
+ 
+       tipc_node_write_lock(n);
+-      if (unlikely(!n->bc_entry.link)) {
+-              snd_l = tipc_bc_sndlink(net);
+-              if (!tipc_link_bc_create(net, tipc_own_addr(net),
+-                                       addr, peer_id, U16_MAX,
+-                                       tipc_link_min_win(snd_l),
+-                                       tipc_link_max_win(snd_l),
+-                                       n->capabilities,
+-                                       &n->bc_entry.inputq1,
+-                                       &n->bc_entry.namedq, snd_l,
+-                                       &n->bc_entry.link)) {
+-                      pr_warn("Broadcast rcv link creation failed, no mem\n");
+-                      tipc_node_write_unlock_fast(n);
+-                      tipc_node_put(n);
+-                      return;
+-              }
+-      }
+ 
+       le = &n->links[b->identity];
+ 
+diff --git a/tools/testing/selftests/net/bpf/Makefile 
b/tools/testing/selftests/net/bpf/Makefile
+index 8a69c91fcca07..8ccaf8732eb22 100644
+--- a/tools/testing/selftests/net/bpf/Makefile
++++ b/tools/testing/selftests/net/bpf/Makefile
+@@ -2,7 +2,7 @@
+ 
+ CLANG ?= clang
+ CCINCLUDE += -I../../bpf
+-CCINCLUDE += -I../../../lib
++CCINCLUDE += -I../../../../lib
+ CCINCLUDE += -I../../../../../usr/include/
+ 
+ TEST_CUSTOM_PROGS = $(OUTPUT)/bpf/nat6to4.o
+diff --git a/tools/testing/selftests/net/mptcp/diag.sh 
b/tools/testing/selftests/net/mptcp/diag.sh
+index ff821025d3096..49dfabded1d44 100755
+--- a/tools/testing/selftests/net/mptcp/diag.sh
++++ b/tools/testing/selftests/net/mptcp/diag.sh
+@@ -61,6 +61,39 @@ chk_msk_nr()
+       __chk_nr "grep -c token:" $*
+ }
+ 
++wait_msk_nr()
++{
++      local condition="grep -c token:"
++      local expected=$1
++      local timeout=20
++      local msg nr
++      local max=0
++      local i=0
++
++      shift 1
++      msg=$*
++
++      while [ $i -lt $timeout ]; do
++              nr=$(ss -inmHMN $ns | $condition)
++              [ $nr == $expected ] && break;
++              [ $nr -gt $max ] && max=$nr
++              i=$((i + 1))
++              sleep 1
++      done
++
++      printf "%-50s" "$msg"
++      if [ $i -ge $timeout ]; then
++              echo "[ fail ] timeout while expecting $expected max $max last 
$nr"
++              ret=$test_cnt
++      elif [ $nr != $expected ]; then
++              echo "[ fail ] expected $expected found $nr"
++              ret=$test_cnt
++      else
++              echo "[  ok  ]"
++      fi
++      test_cnt=$((test_cnt+1))
++}
++
+ chk_msk_fallback_nr()
+ {
+               __chk_nr "grep -c fallback" $*
+@@ -109,7 +142,7 @@ ip -n $ns link set dev lo up
+ echo "a" | \
+       timeout ${timeout_test} \
+               ip netns exec $ns \
+-                      ./mptcp_connect -p 10000 -l -t ${timeout_poll} \
++                      ./mptcp_connect -p 10000 -l -t ${timeout_poll} -w 20 \
+                               0.0.0.0 >/dev/null &
+ wait_local_port_listen $ns 10000
+ chk_msk_nr 0 "no msk on netns creation"
+@@ -117,7 +150,7 @@ chk_msk_nr 0 "no msk on netns creation"
+ echo "b" | \
+       timeout ${timeout_test} \
+               ip netns exec $ns \
+-                      ./mptcp_connect -p 10000 -r 0 -t ${timeout_poll} \
++                      ./mptcp_connect -p 10000 -r 0 -t ${timeout_poll} -w 20 \
+                               127.0.0.1 >/dev/null &
+ wait_connected $ns 10000
+ chk_msk_nr 2 "after MPC handshake "
+@@ -129,13 +162,13 @@ flush_pids
+ echo "a" | \
+       timeout ${timeout_test} \
+               ip netns exec $ns \
+-                      ./mptcp_connect -p 10001 -l -s TCP -t ${timeout_poll} \
++                      ./mptcp_connect -p 10001 -l -s TCP -t ${timeout_poll} 
-w 20 \
+                               0.0.0.0 >/dev/null &
+ wait_local_port_listen $ns 10001
+ echo "b" | \
+       timeout ${timeout_test} \
+               ip netns exec $ns \
+-                      ./mptcp_connect -p 10001 -r 0 -t ${timeout_poll} \
++                      ./mptcp_connect -p 10001 -r 0 -t ${timeout_poll} -w 20 \
+                               127.0.0.1 >/dev/null &
+ wait_connected $ns 10001
+ chk_msk_fallback_nr 1 "check fallback"
+@@ -146,7 +179,7 @@ for I in `seq 1 $NR_CLIENTS`; do
+       echo "a" | \
+               timeout ${timeout_test} \
+                       ip netns exec $ns \
+-                              ./mptcp_connect -p $((I+10001)) -l -w 10 \
++                              ./mptcp_connect -p $((I+10001)) -l -w 20 \
+                                       -t ${timeout_poll} 0.0.0.0 >/dev/null &
+ done
+ wait_local_port_listen $ns $((NR_CLIENTS + 10001))
+@@ -155,12 +188,11 @@ for I in `seq 1 $NR_CLIENTS`; do
+       echo "b" | \
+               timeout ${timeout_test} \
+                       ip netns exec $ns \
+-                              ./mptcp_connect -p $((I+10001)) -w 10 \
++                              ./mptcp_connect -p $((I+10001)) -w 20 \
+                                       -t ${timeout_poll} 127.0.0.1 >/dev/null 
&
+ done
+-sleep 1.5
+ 
+-chk_msk_nr $((NR_CLIENTS*2)) "many msk socket present"
++wait_msk_nr $((NR_CLIENTS*2)) "many msk socket present"
+ flush_pids
+ 
+ exit $ret
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c 
b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+index 8628aa61b7634..e2ea6c126c99f 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+@@ -265,7 +265,7 @@ static void sock_test_tcpulp(int sock, int proto, unsigned 
int line)
+ static int sock_listen_mptcp(const char * const listenaddr,
+                            const char * const port)
+ {
+-      int sock;
++      int sock = -1;
+       struct addrinfo hints = {
+               .ai_protocol = IPPROTO_TCP,
+               .ai_socktype = SOCK_STREAM,
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_inq.c 
b/tools/testing/selftests/net/mptcp/mptcp_inq.c
+index 29f75e2a11168..8672d898f8cda 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_inq.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_inq.c
+@@ -88,7 +88,7 @@ static void xgetaddrinfo(const char *node, const char 
*service,
+ static int sock_listen_mptcp(const char * const listenaddr,
+                            const char * const port)
+ {
+-      int sock;
++      int sock = -1;
+       struct addrinfo hints = {
+               .ai_protocol = IPPROTO_TCP,
+               .ai_socktype = SOCK_STREAM,
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c 
b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
+index ac9a4d9c17646..ae61f39556ca8 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
+@@ -136,7 +136,7 @@ static void xgetaddrinfo(const char *node, const char 
*service,
+ static int sock_listen_mptcp(const char * const listenaddr,
+                            const char * const port)
+ {
+-      int sock;
++      int sock = -1;
+       struct addrinfo hints = {
+               .ai_protocol = IPPROTO_TCP,
+               .ai_socktype = SOCK_STREAM,
+diff --git a/tools/testing/selftests/net/udpgso_bench.sh 
b/tools/testing/selftests/net/udpgso_bench.sh
+index 80b5d352702e5..dc932fd653634 100755
+--- a/tools/testing/selftests/net/udpgso_bench.sh
++++ b/tools/testing/selftests/net/udpgso_bench.sh
+@@ -120,7 +120,7 @@ run_all() {
+       run_udp "${ipv4_args}"
+ 
+       echo "ipv6"
+-      run_tcp "${ipv4_args}"
++      run_tcp "${ipv6_args}"
+       run_udp "${ipv6_args}"
+ }
+ 

Reply via email to