commit:     07679341f981799a21fdb7085bab0927c9d1ba64
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Nov 22 17:48:43 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Nov 22 17:48:43 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=07679341

Linux patch 6.1.119

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    8 +
 1118_linux-6.1.119.patch | 3274 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3282 insertions(+)

diff --git a/0000_README b/0000_README
index 1fe735fc..793d364a 100644
--- a/0000_README
+++ b/0000_README
@@ -515,6 +515,14 @@ Patch:  1116_linux-6.1.117.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.117
 
+Patch:  1117_linux-6.1.118.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.118
+
+Patch:  1118_linux-6.1.119.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.119
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1118_linux-6.1.119.patch b/1118_linux-6.1.119.patch
new file mode 100644
index 00000000..8e00ebdf
--- /dev/null
+++ b/1118_linux-6.1.119.patch
@@ -0,0 +1,3274 @@
+diff --git a/Makefile b/Makefile
+index ca304cece572b0..cc153c7314053c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 118
++SUBLEVEL = 119
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
+index 29e2900178a1f4..b97da9e069a06b 100644
+--- a/arch/arm/kernel/head.S
++++ b/arch/arm/kernel/head.S
+@@ -252,11 +252,15 @@ __create_page_tables:
+        */
+       add     r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
+       ldr     r6, =(_end - 1)
++
++      /* For XIP, kernel_sec_start/kernel_sec_end are currently in RO memory 
*/
++#ifndef CONFIG_XIP_KERNEL
+       adr_l   r5, kernel_sec_start            @ _pa(kernel_sec_start)
+ #if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
+       str     r8, [r5, #4]                    @ Save physical start of kernel 
(BE)
+ #else
+       str     r8, [r5]                        @ Save physical start of kernel 
(LE)
++#endif
+ #endif
+       orr     r3, r8, r7                      @ Add the MMU flags
+       add     r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER)
+@@ -264,6 +268,7 @@ __create_page_tables:
+       add     r3, r3, #1 << SECTION_SHIFT
+       cmp     r0, r6
+       bls     1b
++#ifndef CONFIG_XIP_KERNEL
+       eor     r3, r3, r7                      @ Remove the MMU flags
+       adr_l   r5, kernel_sec_end              @ _pa(kernel_sec_end)
+ #if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
+@@ -271,8 +276,7 @@ __create_page_tables:
+ #else
+       str     r3, [r5]                        @ Save physical end of kernel 
(LE)
+ #endif
+-
+-#ifdef CONFIG_XIP_KERNEL
++#else
+       /*
+        * Map the kernel image separately as it is not located in RAM.
+        */
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index 463fc2a8448f0a..a39a7043f18967 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -1401,18 +1401,6 @@ static void __init devicemaps_init(const struct 
machine_desc *mdesc)
+               create_mapping(&map);
+       }
+ 
+-      /*
+-       * Map the kernel if it is XIP.
+-       * It is always first in the modulearea.
+-       */
+-#ifdef CONFIG_XIP_KERNEL
+-      map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
+-      map.virtual = MODULES_VADDR;
+-      map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & 
SECTION_MASK;
+-      map.type = MT_ROM;
+-      create_mapping(&map);
+-#endif
+-
+       /*
+        * Map the cache flushing regions.
+        */
+@@ -1602,12 +1590,27 @@ static void __init map_kernel(void)
+        * This will only persist until we turn on proper memory management 
later on
+        * and we remap the whole kernel with page granularity.
+        */
++#ifdef CONFIG_XIP_KERNEL
++      phys_addr_t kernel_nx_start = kernel_sec_start;
++#else
+       phys_addr_t kernel_x_start = kernel_sec_start;
+       phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+       phys_addr_t kernel_nx_start = kernel_x_end;
++#endif
+       phys_addr_t kernel_nx_end = kernel_sec_end;
+       struct map_desc map;
+ 
++      /*
++       * Map the kernel if it is XIP.
++       * It is always first in the modulearea.
++       */
++#ifdef CONFIG_XIP_KERNEL
++      map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
++      map.virtual = MODULES_VADDR;
++      map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & 
SECTION_MASK;
++      map.type = MT_ROM;
++      create_mapping(&map);
++#else
+       map.pfn = __phys_to_pfn(kernel_x_start);
+       map.virtual = __phys_to_virt(kernel_x_start);
+       map.length = kernel_x_end - kernel_x_start;
+@@ -1617,7 +1620,7 @@ static void __init map_kernel(void)
+       /* If the nx part is small it may end up covered by the tail of the RWX 
section */
+       if (kernel_x_end == kernel_nx_end)
+               return;
+-
++#endif
+       map.pfn = __phys_to_pfn(kernel_nx_start);
+       map.virtual = __phys_to_virt(kernel_nx_start);
+       map.length = kernel_nx_end - kernel_nx_start;
+@@ -1762,6 +1765,11 @@ void __init paging_init(const struct machine_desc 
*mdesc)
+ {
+       void *zero_page;
+ 
++#ifdef CONFIG_XIP_KERNEL
++      /* Store the kernel RW RAM region start/end in these variables */
++      kernel_sec_start = CONFIG_PHYS_OFFSET & SECTION_MASK;
++      kernel_sec_end = round_up(__pa(_end), SECTION_SIZE);
++#endif
+       pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n",
+                kernel_sec_start, kernel_sec_end);
+ 
+diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h
+index 5966ee4a61542e..ef35c52aabd66d 100644
+--- a/arch/arm64/include/asm/mman.h
++++ b/arch/arm64/include/asm/mman.h
+@@ -3,6 +3,8 @@
+ #define __ASM_MMAN_H__
+ 
+ #include <linux/compiler.h>
++#include <linux/fs.h>
++#include <linux/shmem_fs.h>
+ #include <linux/types.h>
+ #include <uapi/asm/mman.h>
+ 
+@@ -21,19 +23,21 @@ static inline unsigned long 
arch_calc_vm_prot_bits(unsigned long prot,
+ }
+ #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
+ 
+-static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
++static inline unsigned long arch_calc_vm_flag_bits(struct file *file,
++                                                 unsigned long flags)
+ {
+       /*
+        * Only allow MTE on anonymous mappings as these are guaranteed to be
+        * backed by tags-capable memory. The vm_flags may be overridden by a
+        * filesystem supporting MTE (RAM-based).
+        */
+-      if (system_supports_mte() && (flags & MAP_ANONYMOUS))
++      if (system_supports_mte() &&
++          ((flags & MAP_ANONYMOUS) || shmem_file(file)))
+               return VM_MTE_ALLOWED;
+ 
+       return 0;
+ }
+-#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags)
++#define arch_calc_vm_flag_bits(file, flags) arch_calc_vm_flag_bits(file, 
flags)
+ 
+ static inline bool arch_validate_prot(unsigned long prot,
+       unsigned long addr __always_unused)
+diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
+index 3341d4a4219903..3a32b49d7ad0d9 100644
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -18,6 +18,7 @@ config PARISC
+       select ARCH_SUPPORTS_HUGETLBFS if PA20
+       select ARCH_SUPPORTS_MEMORY_FAILURE
+       select ARCH_STACKWALK
++      select ARCH_HAS_CACHE_LINE_SIZE
+       select ARCH_HAS_DEBUG_VM_PGTABLE
+       select HAVE_RELIABLE_STACKTRACE
+       select DMA_OPS
+diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
+index e23d06b51a2044..91e753f08eaa3d 100644
+--- a/arch/parisc/include/asm/cache.h
++++ b/arch/parisc/include/asm/cache.h
+@@ -20,7 +20,16 @@
+ 
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+ 
+-#define ARCH_DMA_MINALIGN     L1_CACHE_BYTES
++#ifdef CONFIG_PA20
++#define ARCH_DMA_MINALIGN     128
++#else
++#define ARCH_DMA_MINALIGN     32
++#endif
++#define ARCH_KMALLOC_MINALIGN 16      /* ldcw requires 16-byte alignment */
++
++#define arch_slab_minalign()  ((unsigned)dcache_stride)
++#define cache_line_size()     dcache_stride
++#define dma_get_cache_alignment cache_line_size
+ 
+ #define __read_mostly __section(".data..read_mostly")
+ 
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 246374cb3ed36b..7f57dce5c8286c 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2453,19 +2453,26 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
+ {
+       struct kvm_lapic *apic = vcpu->arch.apic;
+ 
+-      if (apic->apicv_active) {
+-              /* irr_pending is always true when apicv is activated. */
+-              apic->irr_pending = true;
++      /*
++       * When APICv is enabled, KVM must always search the IRR for a pending
++       * IRQ, as other vCPUs and devices can set IRR bits even if the vCPU
++       * isn't running.  If APICv is disabled, KVM _should_ search the IRR
++       * for a pending IRQ.  But KVM currently doesn't ensure *all* hardware,
++       * e.g. CPUs and IOMMUs, has seen the change in state, i.e. searching
++       * the IRR at this time could race with IRQ delivery from hardware that
++       * still sees APICv as being enabled.
++       *
++       * FIXME: Ensure other vCPUs and devices observe the change in APICv
++       *        state prior to updating KVM's metadata caches, so that KVM
++       *        can safely search the IRR and set irr_pending accordingly.
++       */
++      apic->irr_pending = true;
++
++      if (apic->apicv_active)
+               apic->isr_count = 1;
+-      } else {
+-              /*
+-               * Don't clear irr_pending, searching the IRR can race with
+-               * updates from the CPU as APICv is still active from hardware's
+-               * perspective.  The flag will be cleared as appropriate when
+-               * KVM injects the interrupt.
+-               */
++      else
+               apic->isr_count = count_vectors(apic->regs + APIC_ISR);
+-      }
++
+       apic->highest_isr_cache = -1;
+ }
+ EXPORT_SYMBOL_GPL(kvm_apic_update_apicv);
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 2283f485a81fbc..8052f8b7d8e198 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -1126,11 +1126,14 @@ static void nested_vmx_transition_tlb_flush(struct 
kvm_vcpu *vcpu,
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+ 
+       /*
+-       * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
+-       * for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a
+-       * full TLB flush from the guest's perspective.  This is required even
+-       * if VPID is disabled in the host as KVM may need to synchronize the
+-       * MMU in response to the guest TLB flush.
++       * If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the
++       * same VPID as the host, and so architecturally, linear and combined
++       * mappings for VPID=0 must be flushed at VM-Enter and VM-Exit.  KVM
++       * emulates L2 sharing L1's VPID=0 by using vpid01 while running L2,
++       * and so KVM must also emulate TLB flush of VPID=0, i.e. vpid01.  This
++       * is required if VPID is disabled in KVM, as a TLB flush (there are no
++       * VPIDs) still occurs from L1's perspective, and KVM may need to
++       * synchronize the MMU in response to the guest TLB flush.
+        *
+        * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use.
+        * EPT is a special snowflake, as guest-physical mappings aren't
+@@ -2196,6 +2199,17 @@ static void prepare_vmcs02_early_rare(struct vcpu_vmx 
*vmx,
+ 
+       vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA);
+ 
++      /*
++       * If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the
++       * same VPID as the host.  Emulate this behavior by using vpid01 for L2
++       * if VPID is disabled in vmcs12.  Note, if VPID is disabled, VM-Enter
++       * and VM-Exit are architecturally required to flush VPID=0, but *only*
++       * VPID=0.  I.e. using vpid02 would be ok (so long as KVM emulates the
++       * required flushes), but doing so would cause KVM to over-flush.  E.g.
++       * if L1 runs L2 X with VPID12=1, then runs L2 Y with VPID12 disabled,
++       * and then runs L2 X again, then KVM can and should retain TLB entries
++       * for VPID12=1.
++       */
+       if (enable_vpid) {
+               if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
+                       vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
+@@ -5758,6 +5772,12 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
+               return nested_vmx_fail(vcpu,
+                       VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+ 
++      /*
++       * Always flush the effective vpid02, i.e. never flush the current VPID
++       * and never explicitly flush vpid01.  INVVPID targets a VPID, not a
++       * VMCS, and so whether or not the current vmcs12 has VPID enabled is
++       * irrelevant (and there may not be a loaded vmcs12).
++       */
+       vpid02 = nested_get_vpid02(vcpu);
+       switch (type) {
+       case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 87abf4eebf8a75..460ba48eb66c8a 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -209,9 +209,11 @@ module_param(ple_window_shrink, uint, 0444);
+ static unsigned int ple_window_max        = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
+ module_param(ple_window_max, uint, 0444);
+ 
+-/* Default is SYSTEM mode, 1 for host-guest mode */
++/* Default is SYSTEM mode, 1 for host-guest mode (which is BROKEN) */
+ int __read_mostly pt_mode = PT_MODE_SYSTEM;
++#ifdef CONFIG_BROKEN
+ module_param(pt_mode, int, S_IRUGO);
++#endif
+ 
+ static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
+ static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
+@@ -3098,7 +3100,7 @@ static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
+ 
+ static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
+ {
+-      if (is_guest_mode(vcpu))
++      if (is_guest_mode(vcpu) && nested_cpu_has_vpid(get_vmcs12(vcpu)))
+               return nested_get_vpid02(vcpu);
+       return to_vmx(vcpu)->vpid;
+ }
+diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
+index 6453fbaedb081d..aa5234034c50d8 100644
+--- a/arch/x86/mm/ioremap.c
++++ b/arch/x86/mm/ioremap.c
+@@ -650,7 +650,8 @@ static bool memremap_is_setup_data(resource_size_t 
phys_addr,
+               paddr_next = data->next;
+               len = data->len;
+ 
+-              if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
++              if ((phys_addr > paddr) &&
++                  (phys_addr < (paddr + sizeof(struct setup_data) + len))) {
+                       memunmap(data);
+                       return true;
+               }
+@@ -712,7 +713,8 @@ static bool __init 
early_memremap_is_setup_data(resource_size_t phys_addr,
+               paddr_next = data->next;
+               len = data->len;
+ 
+-              if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
++              if ((phys_addr > paddr) &&
++                  (phys_addr < (paddr + sizeof(struct setup_data) + len))) {
+                       early_memunmap(data, sizeof(*data));
+                       return true;
+               }
+diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
+index 4d78b5583dc6a7..e66cace433cbf6 100644
+--- a/drivers/block/null_blk/main.c
++++ b/drivers/block/null_blk/main.c
+@@ -392,13 +392,25 @@ static int nullb_update_nr_hw_queues(struct nullb_device 
*dev,
+ static int nullb_apply_submit_queues(struct nullb_device *dev,
+                                    unsigned int submit_queues)
+ {
+-      return nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
++      int ret;
++
++      mutex_lock(&lock);
++      ret = nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
++      mutex_unlock(&lock);
++
++      return ret;
+ }
+ 
+ static int nullb_apply_poll_queues(struct nullb_device *dev,
+                                  unsigned int poll_queues)
+ {
+-      return nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
++      int ret;
++
++      mutex_lock(&lock);
++      ret = nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
++      mutex_unlock(&lock);
++
++      return ret;
+ }
+ 
+ NULLB_DEVICE_ATTR(size, ulong, NULL);
+@@ -444,28 +456,32 @@ static ssize_t nullb_device_power_store(struct 
config_item *item,
+       if (ret < 0)
+               return ret;
+ 
++      ret = count;
++      mutex_lock(&lock);
+       if (!dev->power && newp) {
+               if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
+-                      return count;
++                      goto out;
++
+               ret = null_add_dev(dev);
+               if (ret) {
+                       clear_bit(NULLB_DEV_FL_UP, &dev->flags);
+-                      return ret;
++                      goto out;
+               }
+ 
+               set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
+               dev->power = newp;
++              ret = count;
+       } else if (dev->power && !newp) {
+               if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
+-                      mutex_lock(&lock);
+                       dev->power = newp;
+                       null_del_dev(dev->nullb);
+-                      mutex_unlock(&lock);
+               }
+               clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
+       }
+ 
+-      return count;
++out:
++      mutex_unlock(&lock);
++      return ret;
+ }
+ 
+ CONFIGFS_ATTR(nullb_device_, power);
+@@ -1764,7 +1780,7 @@ static void null_del_dev(struct nullb *nullb)
+ 
+       dev = nullb->dev;
+ 
+-      ida_simple_remove(&nullb_indexes, nullb->index);
++      ida_free(&nullb_indexes, nullb->index);
+ 
+       list_del_init(&nullb->list);
+ 
+@@ -2102,15 +2118,12 @@ static int null_add_dev(struct nullb_device *dev)
+       blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
+       blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
+ 
+-      mutex_lock(&lock);
+-      rv = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
+-      if (rv < 0) {
+-              mutex_unlock(&lock);
++      rv = ida_alloc(&nullb_indexes, GFP_KERNEL);
++      if (rv < 0)
+               goto out_cleanup_zone;
+-      }
++
+       nullb->index = rv;
+       dev->index = rv;
+-      mutex_unlock(&lock);
+ 
+       blk_queue_logical_block_size(nullb->q, dev->blocksize);
+       blk_queue_physical_block_size(nullb->q, dev->blocksize);
+@@ -2134,9 +2147,7 @@ static int null_add_dev(struct nullb_device *dev)
+       if (rv)
+               goto out_ida_free;
+ 
+-      mutex_lock(&lock);
+       list_add_tail(&nullb->list, &nullb_list);
+-      mutex_unlock(&lock);
+ 
+       pr_info("disk %s created\n", nullb->disk_name);
+ 
+@@ -2185,7 +2196,9 @@ static int null_create_dev(void)
+       if (!dev)
+               return -ENOMEM;
+ 
++      mutex_lock(&lock);
+       ret = null_add_dev(dev);
++      mutex_unlock(&lock);
+       if (ret) {
+               null_free_dev(dev);
+               return ret;
+diff --git a/drivers/char/xillybus/xillybus_class.c 
b/drivers/char/xillybus/xillybus_class.c
+index 0f238648dcfe2d..e9a288e61c1566 100644
+--- a/drivers/char/xillybus/xillybus_class.c
++++ b/drivers/char/xillybus/xillybus_class.c
+@@ -227,14 +227,15 @@ int xillybus_find_inode(struct inode *inode,
+                       break;
+               }
+ 
+-      mutex_unlock(&unit_mutex);
+-
+-      if (!unit)
++      if (!unit) {
++              mutex_unlock(&unit_mutex);
+               return -ENODEV;
++      }
+ 
+       *private_data = unit->private_data;
+       *index = minor - unit->lowest_minor;
+ 
++      mutex_unlock(&unit_mutex);
+       return 0;
+ }
+ EXPORT_SYMBOL(xillybus_find_inode);
+diff --git a/drivers/char/xillybus/xillyusb.c 
b/drivers/char/xillybus/xillyusb.c
+index 3a2a0fb3d928a9..45771b1a3716a2 100644
+--- a/drivers/char/xillybus/xillyusb.c
++++ b/drivers/char/xillybus/xillyusb.c
+@@ -185,6 +185,14 @@ struct xillyusb_dev {
+       struct mutex process_in_mutex; /* synchronize wakeup_all() */
+ };
+ 
++/*
++ * kref_mutex is used in xillyusb_open() to prevent the xillyusb_dev
++ * struct from being freed during the gap between being found by
++ * xillybus_find_inode() and having its reference count incremented.
++ */
++
++static DEFINE_MUTEX(kref_mutex);
++
+ /* FPGA to host opcodes */
+ enum {
+       OPCODE_DATA = 0,
+@@ -1234,9 +1242,16 @@ static int xillyusb_open(struct inode *inode, struct 
file *filp)
+       int rc;
+       int index;
+ 
++      mutex_lock(&kref_mutex);
++
+       rc = xillybus_find_inode(inode, (void **)&xdev, &index);
+-      if (rc)
++      if (rc) {
++              mutex_unlock(&kref_mutex);
+               return rc;
++      }
++
++      kref_get(&xdev->kref);
++      mutex_unlock(&kref_mutex);
+ 
+       chan = &xdev->channels[index];
+       filp->private_data = chan;
+@@ -1272,8 +1287,6 @@ static int xillyusb_open(struct inode *inode, struct 
file *filp)
+           ((filp->f_mode & FMODE_WRITE) && chan->open_for_write))
+               goto unmutex_fail;
+ 
+-      kref_get(&xdev->kref);
+-
+       if (filp->f_mode & FMODE_READ)
+               chan->open_for_read = 1;
+ 
+@@ -1410,6 +1423,7 @@ static int xillyusb_open(struct inode *inode, struct 
file *filp)
+       return rc;
+ 
+ unmutex_fail:
++      kref_put(&xdev->kref, cleanup_dev);
+       mutex_unlock(&chan->lock);
+       return rc;
+ }
+@@ -2244,7 +2258,9 @@ static void xillyusb_disconnect(struct usb_interface 
*interface)
+ 
+       xdev->dev = NULL;
+ 
++      mutex_lock(&kref_mutex);
+       kref_put(&xdev->kref, cleanup_dev);
++      mutex_unlock(&kref_mutex);
+ }
+ 
+ static struct usb_driver xillyusb_driver = {
+diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
+index 8d92a24fd73d98..97adf9a7ea8947 100644
+--- a/drivers/cxl/core/pci.c
++++ b/drivers/cxl/core/pci.c
+@@ -377,7 +377,7 @@ static bool __cxl_hdm_decode_init(struct cxl_dev_state 
*cxlds,
+ 
+       if (!allowed && info->mem_enabled) {
+               dev_err(dev, "Range register decodes outside platform defined 
CXL ranges.\n");
+-              return -ENXIO;
++              return false;
+       }
+ 
+       /*
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c 
b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
+index def89379b51a57..d23e7391c6f29b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
+@@ -247,6 +247,12 @@ static void nbio_v7_7_init_registers(struct amdgpu_device 
*adev)
+       if (def != data)
+               WREG32_SOC15(NBIO, 0, regBIF0_PCIE_MST_CTRL_3, data);
+ 
++      switch (adev->ip_versions[NBIO_HWIP][0]) {
++      case IP_VERSION(7, 7, 0):
++              data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4) & 
~BIT(23);
++              WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4, data);
++              break;
++      }
+ }
+ 
+ static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device 
*adev,
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+index 3aab1caed2ac77..e159f715c1c21f 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+@@ -2498,6 +2498,9 @@ int smu_v13_0_update_pcie_parameters(struct smu_context 
*smu,
+       uint32_t smu_pcie_arg;
+       int ret, i;
+ 
++      if (!num_of_levels)
++              return 0;
++
+       if (!amdgpu_device_pcie_dynamic_switching_supported()) {
+               if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
+                       pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
+diff --git a/drivers/gpu/drm/bridge/tc358768.c 
b/drivers/gpu/drm/bridge/tc358768.c
+index aabdb5c74d936d..ed8094b2a7ff42 100644
+--- a/drivers/gpu/drm/bridge/tc358768.c
++++ b/drivers/gpu/drm/bridge/tc358768.c
+@@ -126,6 +126,9 @@
+ #define TC358768_DSI_CONFW_MODE_CLR   (6 << 29)
+ #define TC358768_DSI_CONFW_ADDR_DSI_CONTROL   (0x3 << 24)
+ 
++/* TC358768_DSICMD_TX (0x0600) register */
++#define TC358768_DSI_CMDTX_DC_START   BIT(0)
++
+ static const char * const tc358768_supplies[] = {
+       "vddc", "vddmipi", "vddio"
+ };
+@@ -230,6 +233,21 @@ static void tc358768_update_bits(struct tc358768_priv 
*priv, u32 reg, u32 mask,
+               tc358768_write(priv, reg, tmp);
+ }
+ 
++static void tc358768_dsicmd_tx(struct tc358768_priv *priv)
++{
++      u32 val;
++
++      /* start transfer */
++      tc358768_write(priv, TC358768_DSICMD_TX, TC358768_DSI_CMDTX_DC_START);
++      if (priv->error)
++              return;
++
++      /* wait transfer completion */
++      priv->error = regmap_read_poll_timeout(priv->regmap, 
TC358768_DSICMD_TX, val,
++                                             (val & 
TC358768_DSI_CMDTX_DC_START) == 0,
++                                             100, 100000);
++}
++
+ static int tc358768_sw_reset(struct tc358768_priv *priv)
+ {
+       /* Assert Reset */
+@@ -517,8 +535,7 @@ static ssize_t tc358768_dsi_host_transfer(struct 
mipi_dsi_host *host,
+               }
+       }
+ 
+-      /* start transfer */
+-      tc358768_write(priv, TC358768_DSICMD_TX, 1);
++      tc358768_dsicmd_tx(priv);
+ 
+       ret = tc358768_clear_error(priv);
+       if (ret)
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c 
b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+index b2289a523c408d..e5b2112af13814 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+@@ -1080,10 +1080,10 @@ static int vop_plane_atomic_async_check(struct 
drm_plane *plane,
+       if (!plane->state->fb)
+               return -EINVAL;
+ 
+-      if (state)
+-              crtc_state = drm_atomic_get_existing_crtc_state(state,
+-                                                              
new_plane_state->crtc);
+-      else /* Special case for asynchronous cursor updates. */
++      crtc_state = drm_atomic_get_existing_crtc_state(state, 
new_plane_state->crtc);
++
++      /* Special case for asynchronous cursor updates. */
++      if (!crtc_state)
+               crtc_state = plane->crtc->state;
+ 
+       return drm_atomic_helper_check_plane_state(plane->state, crtc_state,
+diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
+index d1212acb70932d..fc9a9ef93cae88 100644
+--- a/drivers/media/dvb-core/dvbdev.c
++++ b/drivers/media/dvb-core/dvbdev.c
+@@ -534,6 +534,9 @@ int dvb_register_device(struct dvb_adapter *adap, struct 
dvb_device **pdvbdev,
+       for (minor = 0; minor < MAX_DVB_MINORS; minor++)
+               if (dvb_minors[minor] == NULL)
+                       break;
++#else
++      minor = nums2minor(adap->num, type, id);
++#endif
+       if (minor >= MAX_DVB_MINORS) {
+               if (new_node) {
+                       list_del (&new_node->list_head);
+@@ -547,17 +550,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct 
dvb_device **pdvbdev,
+               mutex_unlock(&dvbdev_register_lock);
+               return -EINVAL;
+       }
+-#else
+-      minor = nums2minor(adap->num, type, id);
+-      if (minor >= MAX_DVB_MINORS) {
+-              dvb_media_device_free(dvbdev);
+-              list_del(&dvbdev->list_head);
+-              kfree(dvbdev);
+-              *pdvbdev = NULL;
+-              mutex_unlock(&dvbdev_register_lock);
+-              return ret;
+-      }
+-#endif
++
+       dvbdev->minor = minor;
+       dvb_minors[minor] = dvb_device_get(dvbdev);
+       up_write(&minor_rwsem);
+diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
+index 121e833efe2891..d0da4573b38cd5 100644
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -2952,8 +2952,8 @@ static int dw_mci_init_slot(struct dw_mci *host)
+       if (host->use_dma == TRANS_MODE_IDMAC) {
+               mmc->max_segs = host->ring_size;
+               mmc->max_blk_size = 65535;
+-              mmc->max_req_size = DW_MCI_DESC_DATA_LENGTH * host->ring_size;
+-              mmc->max_seg_size = mmc->max_req_size;
++              mmc->max_seg_size = 0x1000;
++              mmc->max_req_size = mmc->max_seg_size * host->ring_size;
+               mmc->max_blk_count = mmc->max_req_size / 512;
+       } else if (host->use_dma == TRANS_MODE_EDMAC) {
+               mmc->max_segs = 64;
+diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
+index 69dcb8805e05fe..e7f3240a53d878 100644
+--- a/drivers/mmc/host/sunxi-mmc.c
++++ b/drivers/mmc/host/sunxi-mmc.c
+@@ -1191,10 +1191,9 @@ static const struct sunxi_mmc_cfg sun50i_a64_emmc_cfg = 
{
+       .needs_new_timings = true,
+ };
+ 
+-static const struct sunxi_mmc_cfg sun50i_a100_cfg = {
++static const struct sunxi_mmc_cfg sun50i_h616_cfg = {
+       .idma_des_size_bits = 16,
+       .idma_des_shift = 2,
+-      .clk_delays = NULL,
+       .can_calibrate = true,
+       .mask_data0 = true,
+       .needs_new_timings = true,
+@@ -1217,8 +1216,9 @@ static const struct of_device_id sunxi_mmc_of_match[] = {
+       { .compatible = "allwinner,sun20i-d1-mmc", .data = &sun20i_d1_cfg },
+       { .compatible = "allwinner,sun50i-a64-mmc", .data = &sun50i_a64_cfg },
+       { .compatible = "allwinner,sun50i-a64-emmc", .data = 
&sun50i_a64_emmc_cfg },
+-      { .compatible = "allwinner,sun50i-a100-mmc", .data = &sun50i_a100_cfg },
++      { .compatible = "allwinner,sun50i-a100-mmc", .data = &sun20i_d1_cfg },
+       { .compatible = "allwinner,sun50i-a100-emmc", .data = 
&sun50i_a100_emmc_cfg },
++      { .compatible = "allwinner,sun50i-h616-mmc", .data = &sun50i_h616_cfg },
+       { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 51d6cf0a3fb4e9..26a9f99882e61c 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -919,6 +919,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct 
slave *new_active,
+ 
+               if (bond->dev->flags & IFF_UP)
+                       bond_hw_addr_flush(bond->dev, old_active->dev);
++
++              bond_slave_ns_maddrs_add(bond, old_active);
+       }
+ 
+       if (new_active) {
+@@ -935,6 +937,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct 
slave *new_active,
+                       dev_mc_sync(new_active->dev, bond->dev);
+                       netif_addr_unlock_bh(bond->dev);
+               }
++
++              bond_slave_ns_maddrs_del(bond, new_active);
+       }
+ }
+ 
+@@ -2231,6 +2235,11 @@ int bond_enslave(struct net_device *bond_dev, struct 
net_device *slave_dev,
+       bond_compute_features(bond);
+       bond_set_carrier(bond);
+ 
++      /* Needs to be called before bond_select_active_slave(), which will
++       * remove the maddrs if the slave is selected as active slave.
++       */
++      bond_slave_ns_maddrs_add(bond, new_slave);
++
+       if (bond_uses_primary(bond)) {
+               block_netpoll_tx();
+               bond_select_active_slave(bond);
+@@ -2240,7 +2249,6 @@ int bond_enslave(struct net_device *bond_dev, struct 
net_device *slave_dev,
+       if (bond_mode_can_use_xmit_hash(bond))
+               bond_update_slave_arr(bond, NULL);
+ 
+-
+       if (!slave_dev->netdev_ops->ndo_bpf ||
+           !slave_dev->netdev_ops->ndo_xdp_xmit) {
+               if (bond->xdp_prog) {
+@@ -2436,6 +2444,12 @@ static int __bond_release_one(struct net_device 
*bond_dev,
+       if (oldcurrent == slave)
+               bond_change_active_slave(bond, NULL);
+ 
++      /* Must be called after bond_change_active_slave () as the slave
++       * might change from an active slave to a backup slave. Then it is
++       * necessary to clear the maddrs on the backup slave.
++       */
++      bond_slave_ns_maddrs_del(bond, slave);
++
+       if (bond_is_lb(bond)) {
+               /* Must be called only after the slave has been
+                * detached from the list and the curr_active_slave
+diff --git a/drivers/net/bonding/bond_options.c 
b/drivers/net/bonding/bond_options.c
+index 06c4cd0f000249..c8536dc7d860d0 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -15,6 +15,7 @@
+ #include <linux/sched/signal.h>
+ 
+ #include <net/bonding.h>
++#include <net/ndisc.h>
+ 
+ static int bond_option_active_slave_set(struct bonding *bond,
+                                       const struct bond_opt_value *newval);
+@@ -1230,6 +1231,68 @@ static int bond_option_arp_ip_targets_set(struct 
bonding *bond,
+ }
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
++static bool slave_can_set_ns_maddr(const struct bonding *bond, struct slave 
*slave)
++{
++      return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
++             !bond_is_active_slave(slave) &&
++             slave->dev->flags & IFF_MULTICAST;
++}
++
++static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, 
bool add)
++{
++      struct in6_addr *targets = bond->params.ns_targets;
++      char slot_maddr[MAX_ADDR_LEN];
++      int i;
++
++      if (!slave_can_set_ns_maddr(bond, slave))
++              return;
++
++      for (i = 0; i < BOND_MAX_NS_TARGETS; i++) {
++              if (ipv6_addr_any(&targets[i]))
++                      break;
++
++              if (!ndisc_mc_map(&targets[i], slot_maddr, slave->dev, 0)) {
++                      if (add)
++                              dev_mc_add(slave->dev, slot_maddr);
++                      else
++                              dev_mc_del(slave->dev, slot_maddr);
++              }
++      }
++}
++
++void bond_slave_ns_maddrs_add(struct bonding *bond, struct slave *slave)
++{
++      if (!bond->params.arp_validate)
++              return;
++      slave_set_ns_maddrs(bond, slave, true);
++}
++
++void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave)
++{
++      if (!bond->params.arp_validate)
++              return;
++      slave_set_ns_maddrs(bond, slave, false);
++}
++
++static void slave_set_ns_maddr(struct bonding *bond, struct slave *slave,
++                             struct in6_addr *target, struct in6_addr *slot)
++{
++      char target_maddr[MAX_ADDR_LEN], slot_maddr[MAX_ADDR_LEN];
++
++      if (!bond->params.arp_validate || !slave_can_set_ns_maddr(bond, slave))
++              return;
++
++      /* remove the previous maddr from slave */
++      if (!ipv6_addr_any(slot) &&
++          !ndisc_mc_map(slot, slot_maddr, slave->dev, 0))
++              dev_mc_del(slave->dev, slot_maddr);
++
++      /* add new maddr on slave if target is set */
++      if (!ipv6_addr_any(target) &&
++          !ndisc_mc_map(target, target_maddr, slave->dev, 0))
++              dev_mc_add(slave->dev, target_maddr);
++}
++
+ static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot,
+                                           struct in6_addr *target,
+                                           unsigned long last_rx)
+@@ -1239,8 +1302,10 @@ static void _bond_options_ns_ip6_target_set(struct 
bonding *bond, int slot,
+       struct slave *slave;
+ 
+       if (slot >= 0 && slot < BOND_MAX_NS_TARGETS) {
+-              bond_for_each_slave(bond, slave, iter)
++              bond_for_each_slave(bond, slave, iter) {
+                       slave->target_last_arp_rx[slot] = last_rx;
++                      slave_set_ns_maddr(bond, slave, target, &targets[slot]);
++              }
+               targets[slot] = *target;
+       }
+ }
+@@ -1292,15 +1357,30 @@ static int bond_option_ns_ip6_targets_set(struct 
bonding *bond,
+ {
+       return -EPERM;
+ }
++
++static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, 
bool add) {}
++
++void bond_slave_ns_maddrs_add(struct bonding *bond, struct slave *slave) {}
++
++void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave) {}
+ #endif
+ 
+ static int bond_option_arp_validate_set(struct bonding *bond,
+                                       const struct bond_opt_value *newval)
+ {
++      bool changed = !!bond->params.arp_validate != !!newval->value;
++      struct list_head *iter;
++      struct slave *slave;
++
+       netdev_dbg(bond->dev, "Setting arp_validate to %s (%llu)\n",
+                  newval->string, newval->value);
+       bond->params.arp_validate = newval->value;
+ 
++      if (changed) {
++              bond_for_each_slave(bond, slave, iter)
++                      slave_set_ns_maddrs(bond, slave, 
!!bond->params.arp_validate);
++      }
++
+       return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/freescale/fec_main.c 
b/drivers/net/ethernet/freescale/fec_main.c
+index 0a5c3d27ed3b06..aeab6c28892f2f 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -3508,29 +3508,6 @@ fec_set_mac_address(struct net_device *ndev, void *p)
+       return 0;
+ }
+ 
+-#ifdef CONFIG_NET_POLL_CONTROLLER
+-/**
+- * fec_poll_controller - FEC Poll controller function
+- * @dev: The FEC network adapter
+- *
+- * Polled functionality used by netconsole and others in non interrupt mode
+- *
+- */
+-static void fec_poll_controller(struct net_device *dev)
+-{
+-      int i;
+-      struct fec_enet_private *fep = netdev_priv(dev);
+-
+-      for (i = 0; i < FEC_IRQ_NUM; i++) {
+-              if (fep->irq[i] > 0) {
+-                      disable_irq(fep->irq[i]);
+-                      fec_enet_interrupt(fep->irq[i], dev);
+-                      enable_irq(fep->irq[i]);
+-              }
+-      }
+-}
+-#endif
+-
+ static inline void fec_enet_set_netdev_features(struct net_device *netdev,
+       netdev_features_t features)
+ {
+@@ -3604,9 +3581,6 @@ static const struct net_device_ops fec_netdev_ops = {
+       .ndo_tx_timeout         = fec_timeout,
+       .ndo_set_mac_address    = fec_set_mac_address,
+       .ndo_eth_ioctl          = fec_enet_ioctl,
+-#ifdef CONFIG_NET_POLL_CONTROLLER
+-      .ndo_poll_controller    = fec_poll_controller,
+-#endif
+       .ndo_set_features       = fec_set_features,
+ };
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+index f01f7dfdbcf88a..b011e0d2b620e1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+@@ -862,7 +862,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
+       return 0;
+ 
+ err_rule:
+-      mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, 
zone_rule->mh);
++      mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, attr, zone_rule->mh);
+       mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
+ err_mod_hdr:
+       kfree(attr);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+index 2e0335246967b1..6d56d4a9977b0b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+@@ -665,7 +665,7 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx 
*priv_tx,
+       while (remaining > 0) {
+               skb_frag_t *frag = &record->frags[i];
+ 
+-              get_page(skb_frag_page(frag));
++              page_ref_inc(skb_frag_page(frag));
+               remaining -= skb_frag_size(frag);
+               info->frags[i++] = *frag;
+       }
+@@ -768,7 +768,7 @@ void mlx5e_ktls_tx_handle_resync_dump_comp(struct 
mlx5e_txqsq *sq,
+       stats = sq->stats;
+ 
+       mlx5e_tx_dma_unmap(sq->pdev, dma);
+-      put_page(wi->resync_dump_frag_page);
++      page_ref_dec(wi->resync_dump_frag_page);
+       stats->tls_dump_packets++;
+       stats->tls_dump_bytes += wi->num_bytes;
+ }
+@@ -821,12 +821,12 @@ mlx5e_ktls_tx_handle_ooo(struct 
mlx5e_ktls_offload_context_tx *priv_tx,
+ 
+ err_out:
+       for (; i < info.nr_frags; i++)
+-              /* The put_page() here undoes the page ref obtained in 
tx_sync_info_get().
++              /* The page_ref_dec() here undoes the page ref obtained in 
tx_sync_info_get().
+                * Page refs obtained for the DUMP WQEs above (by page_ref_add) 
will be
+                * released only upon their completions (or in 
mlx5e_free_txqsq_descs,
+                * if channel closes).
+                */
+-              put_page(skb_frag_page(&info.frags[i]));
++              page_ref_dec(skb_frag_page(&info.frags[i]));
+ 
+       return MLX5E_KTLS_SYNC_FAIL;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 
b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 164e10b5f9b7f7..50fdc3cbb778e6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -1880,13 +1880,22 @@ lookup_fte_locked(struct mlx5_flow_group *g,
+               fte_tmp = NULL;
+               goto out;
+       }
++
++      nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
++
+       if (!fte_tmp->node.active) {
++              up_write_ref_node(&fte_tmp->node, false);
++
++              if (take_write)
++                      up_write_ref_node(&g->node, false);
++              else
++                      up_read_ref_node(&g->node);
++
+               tree_put_node(&fte_tmp->node, false);
+-              fte_tmp = NULL;
+-              goto out;
++
++              return NULL;
+       }
+ 
+-      nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+ out:
+       if (take_write)
+               up_write_ref_node(&g->node, false);
+diff --git a/drivers/net/ethernet/vertexcom/mse102x.c 
b/drivers/net/ethernet/vertexcom/mse102x.c
+index dd766e175f7dbd..8f67c39f479eef 100644
+--- a/drivers/net/ethernet/vertexcom/mse102x.c
++++ b/drivers/net/ethernet/vertexcom/mse102x.c
+@@ -437,13 +437,15 @@ static void mse102x_tx_work(struct work_struct *work)
+       mse = &mses->mse102x;
+ 
+       while ((txb = skb_dequeue(&mse->txq))) {
++              unsigned int len = max_t(unsigned int, txb->len, ETH_ZLEN);
++
+               mutex_lock(&mses->lock);
+               ret = mse102x_tx_pkt_spi(mse, txb, work_timeout);
+               mutex_unlock(&mses->lock);
+               if (ret) {
+                       mse->ndev->stats.tx_dropped++;
+               } else {
+-                      mse->ndev->stats.tx_bytes += txb->len;
++                      mse->ndev->stats.tx_bytes += len;
+                       mse->ndev->stats.tx_packets++;
+               }
+ 
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c 
b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+index 705c5e283c27b0..456a9508fb911e 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+@@ -115,11 +115,6 @@ struct vchiq_arm_state {
+       int first_connect;
+ };
+ 
+-struct vchiq_2835_state {
+-      int inited;
+-      struct vchiq_arm_state arm_state;
+-};
+-
+ struct vchiq_pagelist_info {
+       struct pagelist *pagelist;
+       size_t pagelist_buffer_size;
+@@ -574,29 +569,21 @@ vchiq_arm_init_state(struct vchiq_state *state,
+ int
+ vchiq_platform_init_state(struct vchiq_state *state)
+ {
+-      struct vchiq_2835_state *platform_state;
++      struct vchiq_arm_state *platform_state;
+ 
+-      state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
+-      if (!state->platform_state)
++      platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), 
GFP_KERNEL);
++      if (!platform_state)
+               return -ENOMEM;
+ 
+-      platform_state = (struct vchiq_2835_state *)state->platform_state;
+-
+-      platform_state->inited = 1;
+-      vchiq_arm_init_state(state, &platform_state->arm_state);
++      vchiq_arm_init_state(state, platform_state);
++      state->platform_state = (struct opaque_platform_state *)platform_state;
+ 
+       return 0;
+ }
+ 
+ static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct 
vchiq_state *state)
+ {
+-      struct vchiq_2835_state *platform_state;
+-
+-      platform_state   = (struct vchiq_2835_state *)state->platform_state;
+-
+-      WARN_ON_ONCE(!platform_state->inited);
+-
+-      return &platform_state->arm_state;
++      return (struct vchiq_arm_state *)state->platform_state;
+ }
+ 
+ void
+diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
+index 113aac0446de53..4f0a2edc2333d8 100644
+--- a/drivers/vdpa/mlx5/core/mr.c
++++ b/drivers/vdpa/mlx5/core/mr.c
+@@ -232,7 +232,7 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, 
struct mlx5_vdpa_direct_mr
+       struct page *pg;
+       unsigned int nsg;
+       int sglen;
+-      u64 pa;
++      u64 pa, offset;
+       u64 paend;
+       struct scatterlist *sg;
+       struct device *dma = mvdev->vdev.dma_dev;
+@@ -255,8 +255,10 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, 
struct mlx5_vdpa_direct_mr
+       sg = mr->sg_head.sgl;
+       for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
+            map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) {
+-              paend = map->addr + maplen(map, mr);
+-              for (pa = map->addr; pa < paend; pa += sglen) {
++              offset = mr->start > map->start ? mr->start - map->start : 0;
++              pa = map->addr + offset;
++              paend = map->addr + offset + maplen(map, mr);
++              for (; pa < paend; pa += sglen) {
+                       pg = pfn_to_page(__phys_to_pfn(pa));
+                       if (!sg) {
+                               mlx5_vdpa_warn(mvdev, "sg null. start 0x%llx, 
end 0x%llx\n",
+diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c 
b/drivers/vdpa/virtio_pci/vp_vdpa.c
+index 281287fae89f13..1d6d89c08e6efa 100644
+--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
++++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
+@@ -591,7 +591,11 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const 
struct pci_device_id *id)
+               goto mdev_err;
+       }
+ 
+-      mdev_id = kzalloc(sizeof(struct virtio_device_id), GFP_KERNEL);
++      /*
++       * id_table should be a null terminated array, so allocate one 
additional
++       * entry here, see vdpa_mgmtdev_get_classes().
++       */
++      mdev_id = kcalloc(2, sizeof(struct virtio_device_id), GFP_KERNEL);
+       if (!mdev_id) {
+               err = -ENOMEM;
+               goto mdev_id_err;
+@@ -611,8 +615,8 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const 
struct pci_device_id *id)
+               goto probe_err;
+       }
+ 
+-      mdev_id->device = mdev->id.device;
+-      mdev_id->vendor = mdev->id.vendor;
++      mdev_id[0].device = mdev->id.device;
++      mdev_id[0].vendor = mdev->id.vendor;
+       mgtdev->id_table = mdev_id;
+       mgtdev->max_supported_vqs = vp_modern_get_num_queues(mdev);
+       mgtdev->supported_features = vp_modern_get_features(mdev);
+diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
+index 8f287009545c97..495631eba3a697 100644
+--- a/fs/9p/vfs_inode.c
++++ b/fs/9p/vfs_inode.c
+@@ -392,17 +392,20 @@ void v9fs_evict_inode(struct inode *inode)
+       struct v9fs_inode *v9inode = V9FS_I(inode);
+       __le32 version;
+ 
+-      truncate_inode_pages_final(&inode->i_data);
+-      version = cpu_to_le32(v9inode->qid.version);
+-      fscache_clear_inode_writeback(v9fs_inode_cookie(v9inode), inode,
++      if (!is_bad_inode(inode)) {
++              truncate_inode_pages_final(&inode->i_data);
++              version = cpu_to_le32(v9inode->qid.version);
++              fscache_clear_inode_writeback(v9fs_inode_cookie(v9inode), inode,
+                                     &version);
+-      clear_inode(inode);
+-      filemap_fdatawrite(&inode->i_data);
+-
+-      fscache_relinquish_cookie(v9fs_inode_cookie(v9inode), false);
+-      /* clunk the fid stashed in writeback_fid */
+-      p9_fid_put(v9inode->writeback_fid);
+-      v9inode->writeback_fid = NULL;
++              clear_inode(inode);
++              filemap_fdatawrite(&inode->i_data);
++              if (v9fs_inode_cookie(v9inode))
++                      fscache_relinquish_cookie(v9fs_inode_cookie(v9inode), 
false);
++              /* clunk the fid stashed in writeback_fid */
++              p9_fid_put(v9inode->writeback_fid);
++              v9inode->writeback_fid = NULL;
++      } else
++              clear_inode(inode);
+ }
+ 
+ static int v9fs_test_inode(struct inode *inode, void *data)
+diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
+index 548422b24a7d78..41c750f3447375 100644
+--- a/fs/nfsd/netns.h
++++ b/fs/nfsd/netns.h
+@@ -152,6 +152,7 @@ struct nfsd_net {
+       u32             s2s_cp_cl_id;
+       struct idr      s2s_cp_stateids;
+       spinlock_t      s2s_cp_lock;
++      atomic_t        pending_async_copies;
+ 
+       /*
+        * Version information
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index df9dbd93663e2f..0aebb2dc577612 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -717,15 +717,6 @@ nfsd4_access(struct svc_rqst *rqstp, struct 
nfsd4_compound_state *cstate,
+                          &access->ac_supported);
+ }
+ 
+-static void gen_boot_verifier(nfs4_verifier *verifier, struct net *net)
+-{
+-      __be32 *verf = (__be32 *)verifier->data;
+-
+-      BUILD_BUG_ON(2*sizeof(*verf) != sizeof(verifier->data));
+-
+-      nfsd_copy_write_verifier(verf, net_generic(net, nfsd_net_id));
+-}
+-
+ static __be32
+ nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+            union nfsd4_op_u *u)
+@@ -1252,6 +1243,7 @@ static void nfs4_put_copy(struct nfsd4_copy *copy)
+ {
+       if (!refcount_dec_and_test(&copy->refcount))
+               return;
++      atomic_dec(&copy->cp_nn->pending_async_copies);
+       kfree(copy->cp_src);
+       kfree(copy);
+ }
+@@ -1593,7 +1585,6 @@ static void nfsd4_init_copy_res(struct nfsd4_copy *copy, 
bool sync)
+               test_bit(NFSD4_COPY_F_COMMITTED, &copy->cp_flags) ?
+                       NFS_FILE_SYNC : NFS_UNSTABLE;
+       nfsd4_copy_set_sync(copy, sync);
+-      gen_boot_verifier(&copy->cp_res.wr_verifier, copy->cp_clp->net);
+ }
+ 
+ static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy,
+@@ -1764,10 +1755,16 @@ static __be32
+ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+               union nfsd4_op_u *u)
+ {
++      struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
++      struct nfsd4_copy *async_copy = NULL;
+       struct nfsd4_copy *copy = &u->copy;
++      struct nfsd42_write_res *result;
+       __be32 status;
+-      struct nfsd4_copy *async_copy = NULL;
+ 
++      result = &copy->cp_res;
++      nfsd_copy_write_verifier((__be32 *)&result->wr_verifier.data, nn);
++
++      copy->cp_clp = cstate->clp;
+       if (nfsd4_ssc_is_inter(copy)) {
+               if (!inter_copy_offload_enable || nfsd4_copy_is_sync(copy)) {
+                       status = nfserr_notsupp;
+@@ -1782,25 +1779,26 @@ nfsd4_copy(struct svc_rqst *rqstp, struct 
nfsd4_compound_state *cstate,
+                       return status;
+       }
+ 
+-      copy->cp_clp = cstate->clp;
+       memcpy(&copy->fh, &cstate->current_fh.fh_handle,
+               sizeof(struct knfsd_fh));
+       if (nfsd4_copy_is_async(copy)) {
+-              struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+-
+-              status = nfserrno(-ENOMEM);
+               async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
+               if (!async_copy)
+                       goto out_err;
++              async_copy->cp_nn = nn;
+               INIT_LIST_HEAD(&async_copy->copies);
+               refcount_set(&async_copy->refcount, 1);
++              /* Arbitrary cap on number of pending async copy operations */
++              if (atomic_inc_return(&nn->pending_async_copies) >
++                              (int)rqstp->rq_pool->sp_nrthreads)
++                      goto out_err;
+               async_copy->cp_src = kmalloc(sizeof(*async_copy->cp_src), 
GFP_KERNEL);
+               if (!async_copy->cp_src)
+                       goto out_err;
+               if (!nfs4_init_copy_state(nn, copy))
+                       goto out_err;
+-              memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid.cs_stid,
+-                      sizeof(copy->cp_res.cb_stateid));
++              memcpy(&result->cb_stateid, &copy->cp_stateid.cs_stid,
++                      sizeof(result->cb_stateid));
+               dup_copy_fields(copy, async_copy);
+               async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
+                               async_copy, "%s", "copy thread");
+@@ -1830,7 +1828,7 @@ nfsd4_copy(struct svc_rqst *rqstp, struct 
nfsd4_compound_state *cstate,
+       }
+       if (async_copy)
+               cleanup_async_copy(async_copy);
+-      status = nfserrno(-ENOMEM);
++      status = nfserr_jukebox;
+       goto out;
+ }
+ 
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 893d099e009933..e195012db48ec6 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -8076,6 +8076,7 @@ static int nfs4_state_create_net(struct net *net)
+       spin_lock_init(&nn->client_lock);
+       spin_lock_init(&nn->s2s_cp_lock);
+       idr_init(&nn->s2s_cp_stateids);
++      atomic_set(&nn->pending_async_copies, 0);
+ 
+       spin_lock_init(&nn->blocked_locks_lock);
+       INIT_LIST_HEAD(&nn->blocked_locks_lru);
+diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
+index 510978e602da62..9bd1ade6ba54f9 100644
+--- a/fs/nfsd/xdr4.h
++++ b/fs/nfsd/xdr4.h
+@@ -574,6 +574,7 @@ struct nfsd4_copy {
+       struct nfsd4_ssc_umount_item *ss_nsui;
+       struct nfs_fh           c_fh;
+       nfs4_stateid            stateid;
++      struct nfsd_net         *cp_nn;
+ };
+ 
+ static inline void nfsd4_copy_set_sync(struct nfsd4_copy *copy, bool sync)
+diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
+index 19ed9015bd6606..13d943df871dd0 100644
+--- a/fs/nilfs2/btnode.c
++++ b/fs/nilfs2/btnode.c
+@@ -68,7 +68,6 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 
blocknr)
+               goto failed;
+       }
+       memset(bh->b_data, 0, i_blocksize(inode));
+-      bh->b_bdev = inode->i_sb->s_bdev;
+       bh->b_blocknr = blocknr;
+       set_buffer_mapped(bh);
+       set_buffer_uptodate(bh);
+@@ -133,7 +132,6 @@ int nilfs_btnode_submit_block(struct address_space *btnc, 
__u64 blocknr,
+               goto found;
+       }
+       set_buffer_mapped(bh);
+-      bh->b_bdev = inode->i_sb->s_bdev;
+       bh->b_blocknr = pblocknr; /* set block address for read */
+       bh->b_end_io = end_buffer_read_sync;
+       get_bh(bh);
+diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
+index fcd13da5d0125d..2f612288ea4517 100644
+--- a/fs/nilfs2/gcinode.c
++++ b/fs/nilfs2/gcinode.c
+@@ -83,10 +83,8 @@ int nilfs_gccache_submit_read_data(struct inode *inode, 
sector_t blkoff,
+               goto out;
+       }
+ 
+-      if (!buffer_mapped(bh)) {
+-              bh->b_bdev = inode->i_sb->s_bdev;
++      if (!buffer_mapped(bh))
+               set_buffer_mapped(bh);
+-      }
+       bh->b_blocknr = pbn;
+       bh->b_end_io = end_buffer_read_sync;
+       get_bh(bh);
+diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
+index cbf4fa60eea217..d0808953296ac8 100644
+--- a/fs/nilfs2/mdt.c
++++ b/fs/nilfs2/mdt.c
+@@ -89,7 +89,6 @@ static int nilfs_mdt_create_block(struct inode *inode, 
unsigned long block,
+       if (buffer_uptodate(bh))
+               goto failed_bh;
+ 
+-      bh->b_bdev = sb->s_bdev;
+       err = nilfs_mdt_insert_new_block(inode, block, bh, init_block);
+       if (likely(!err)) {
+               get_bh(bh);
+diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
+index 6bc4cda804e174..b4e2192e87967f 100644
+--- a/fs/nilfs2/page.c
++++ b/fs/nilfs2/page.c
+@@ -39,7 +39,6 @@ __nilfs_get_page_block(struct page *page, unsigned long 
block, pgoff_t index,
+       first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
+       bh = nilfs_page_get_nth_block(page, block - first_block);
+ 
+-      touch_buffer(bh);
+       wait_on_buffer(bh);
+       return bh;
+ }
+@@ -64,6 +63,7 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode,
+               put_page(page);
+               return NULL;
+       }
++      bh->b_bdev = inode->i_sb->s_bdev;
+       return bh;
+ }
+ 
+diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
+index aedd4f5f459e68..70b38465aee364 100644
+--- a/fs/ntfs3/file.c
++++ b/fs/ntfs3/file.c
+@@ -1214,8 +1214,16 @@ static int ntfs_file_release(struct inode *inode, 
struct file *file)
+       int err = 0;
+ 
+       /* If we are last writer on the inode, drop the block reservation. */
+-      if (sbi->options->prealloc && ((file->f_mode & FMODE_WRITE) &&
+-                                    atomic_read(&inode->i_writecount) == 1)) {
++      if (sbi->options->prealloc &&
++          ((file->f_mode & FMODE_WRITE) &&
++           atomic_read(&inode->i_writecount) == 1)
++         /*
++          * The only file when inode->i_fop = &ntfs_file_operations and
++          * init_rwsem(&ni->file.run_lock) is not called explicitly is MFT.
++          *
++          * Add additional check here.
++          */
++          && inode->i_ino != MFT_REC_MFT) {
+               ni_lock(ni);
+               down_write(&ni->file.run_lock);
+ 
+diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c
+index d65d43c61857a4..b2b47bb7952962 100644
+--- a/fs/ocfs2/resize.c
++++ b/fs/ocfs2/resize.c
+@@ -566,6 +566,8 @@ int ocfs2_group_add(struct inode *inode, struct 
ocfs2_new_group_input *input)
+       ocfs2_commit_trans(osb, handle);
+ 
+ out_free_group_bh:
++      if (ret < 0)
++              ocfs2_remove_from_cache(INODE_CACHE(inode), group_bh);
+       brelse(group_bh);
+ 
+ out_unlock:
+diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
+index 988d1c076861b2..b39eff78ca48ac 100644
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -2321,6 +2321,7 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di,
+                              struct ocfs2_blockcheck_stats *stats)
+ {
+       int status = -EAGAIN;
++      u32 blksz_bits;
+ 
+       if (memcmp(di->i_signature, OCFS2_SUPER_BLOCK_SIGNATURE,
+                  strlen(OCFS2_SUPER_BLOCK_SIGNATURE)) == 0) {
+@@ -2335,11 +2336,15 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di,
+                               goto out;
+               }
+               status = -EINVAL;
+-              if ((1 << le32_to_cpu(di->id2.i_super.s_blocksize_bits)) != 
blksz) {
++              /* Acceptable block sizes are 512 bytes, 1K, 2K and 4K. */
++              blksz_bits = le32_to_cpu(di->id2.i_super.s_blocksize_bits);
++              if (blksz_bits < 9 || blksz_bits > 12) {
+                       mlog(ML_ERROR, "found superblock with incorrect block "
+-                           "size: found %u, should be %u\n",
+-                           1 << le32_to_cpu(di->id2.i_super.s_blocksize_bits),
+-                             blksz);
++                           "size bits: found %u, should be 9, 10, 11, or 
12\n",
++                           blksz_bits);
++              } else if ((1 << le32_to_cpu(blksz_bits)) != blksz) {
++                      mlog(ML_ERROR, "found superblock with incorrect block "
++                           "size: found %u, should be %u\n", 1 << blksz_bits, 
blksz);
+               } else if (le16_to_cpu(di->id2.i_super.s_major_rev_level) !=
+                          OCFS2_MAJOR_REV_LEVEL ||
+                          le16_to_cpu(di->id2.i_super.s_minor_rev_level) !=
+diff --git a/fs/smb/server/smb2misc.c b/fs/smb/server/smb2misc.c
+index 03dded29a98042..727cb49926ee52 100644
+--- a/fs/smb/server/smb2misc.c
++++ b/fs/smb/server/smb2misc.c
+@@ -101,13 +101,17 @@ static int smb2_get_data_area_len(unsigned int *off, 
unsigned int *len,
+               *len = le16_to_cpu(((struct smb2_sess_setup_req 
*)hdr)->SecurityBufferLength);
+               break;
+       case SMB2_TREE_CONNECT:
+-              *off = le16_to_cpu(((struct smb2_tree_connect_req 
*)hdr)->PathOffset);
++              *off = max_t(unsigned short int,
++                           le16_to_cpu(((struct smb2_tree_connect_req 
*)hdr)->PathOffset),
++                           offsetof(struct smb2_tree_connect_req, Buffer));
+               *len = le16_to_cpu(((struct smb2_tree_connect_req 
*)hdr)->PathLength);
+               break;
+       case SMB2_CREATE:
+       {
+               unsigned short int name_off =
+-                      le16_to_cpu(((struct smb2_create_req 
*)hdr)->NameOffset);
++                      max_t(unsigned short int,
++                            le16_to_cpu(((struct smb2_create_req 
*)hdr)->NameOffset),
++                            offsetof(struct smb2_create_req, Buffer));
+               unsigned short int name_len =
+                       le16_to_cpu(((struct smb2_create_req 
*)hdr)->NameLength);
+ 
+@@ -128,11 +132,15 @@ static int smb2_get_data_area_len(unsigned int *off, 
unsigned int *len,
+               break;
+       }
+       case SMB2_QUERY_INFO:
+-              *off = le16_to_cpu(((struct smb2_query_info_req 
*)hdr)->InputBufferOffset);
++              *off = max_t(unsigned int,
++                           le16_to_cpu(((struct smb2_query_info_req 
*)hdr)->InputBufferOffset),
++                           offsetof(struct smb2_query_info_req, Buffer));
+               *len = le32_to_cpu(((struct smb2_query_info_req 
*)hdr)->InputBufferLength);
+               break;
+       case SMB2_SET_INFO:
+-              *off = le16_to_cpu(((struct smb2_set_info_req 
*)hdr)->BufferOffset);
++              *off = max_t(unsigned int,
++                           le16_to_cpu(((struct smb2_set_info_req 
*)hdr)->BufferOffset),
++                           offsetof(struct smb2_set_info_req, Buffer));
+               *len = le32_to_cpu(((struct smb2_set_info_req 
*)hdr)->BufferLength);
+               break;
+       case SMB2_READ:
+@@ -142,7 +150,7 @@ static int smb2_get_data_area_len(unsigned int *off, 
unsigned int *len,
+       case SMB2_WRITE:
+               if (((struct smb2_write_req *)hdr)->DataOffset ||
+                   ((struct smb2_write_req *)hdr)->Length) {
+-                      *off = max_t(unsigned int,
++                      *off = max_t(unsigned short int,
+                                    le16_to_cpu(((struct smb2_write_req 
*)hdr)->DataOffset),
+                                    offsetof(struct smb2_write_req, Buffer));
+                       *len = le32_to_cpu(((struct smb2_write_req 
*)hdr)->Length);
+@@ -153,7 +161,9 @@ static int smb2_get_data_area_len(unsigned int *off, 
unsigned int *len,
+               *len = le16_to_cpu(((struct smb2_write_req 
*)hdr)->WriteChannelInfoLength);
+               break;
+       case SMB2_QUERY_DIRECTORY:
+-              *off = le16_to_cpu(((struct smb2_query_directory_req 
*)hdr)->FileNameOffset);
++              *off = max_t(unsigned short int,
++                           le16_to_cpu(((struct smb2_query_directory_req 
*)hdr)->FileNameOffset),
++                           offsetof(struct smb2_query_directory_req, Buffer));
+               *len = le16_to_cpu(((struct smb2_query_directory_req 
*)hdr)->FileNameLength);
+               break;
+       case SMB2_LOCK:
+@@ -168,7 +178,9 @@ static int smb2_get_data_area_len(unsigned int *off, 
unsigned int *len,
+               break;
+       }
+       case SMB2_IOCTL:
+-              *off = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset);
++              *off = max_t(unsigned int,
++                           le32_to_cpu(((struct smb2_ioctl_req 
*)hdr)->InputOffset),
++                           offsetof(struct smb2_ioctl_req, Buffer));
+               *len = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputCount);
+               break;
+       default:
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 9b5847bf9b2a40..7e068c4187a8e8 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -1961,7 +1961,7 @@ int smb2_tree_connect(struct ksmbd_work *work)
+ 
+       WORK_BUFFERS(work, req, rsp);
+ 
+-      treename = smb_strndup_from_utf16(req->Buffer,
++      treename = smb_strndup_from_utf16((char *)req + 
le16_to_cpu(req->PathOffset),
+                                         le16_to_cpu(req->PathLength), true,
+                                         conn->local_nls);
+       if (IS_ERR(treename)) {
+@@ -2723,7 +2723,7 @@ int smb2_open(struct ksmbd_work *work)
+                       goto err_out2;
+               }
+ 
+-              name = smb2_get_name(req->Buffer,
++              name = smb2_get_name((char *)req + le16_to_cpu(req->NameOffset),
+                                    le16_to_cpu(req->NameLength),
+                                    work->conn->local_nls);
+               if (IS_ERR(name)) {
+@@ -4096,7 +4096,7 @@ int smb2_query_dir(struct ksmbd_work *work)
+       }
+ 
+       srch_flag = req->Flags;
+-      srch_ptr = smb_strndup_from_utf16(req->Buffer,
++      srch_ptr = smb_strndup_from_utf16((char *)req + 
le16_to_cpu(req->FileNameOffset),
+                                         le16_to_cpu(req->FileNameLength), 1,
+                                         conn->local_nls);
+       if (IS_ERR(srch_ptr)) {
+@@ -4357,7 +4357,8 @@ static int smb2_get_ea(struct ksmbd_work *work, struct 
ksmbd_file *fp,
+                   sizeof(struct smb2_ea_info_req))
+                       return -EINVAL;
+ 
+-              ea_req = (struct smb2_ea_info_req *)req->Buffer;
++              ea_req = (struct smb2_ea_info_req *)((char *)req +
++                                                   
le16_to_cpu(req->InputBufferOffset));
+       } else {
+               /* need to send all EAs, if no specific EA is requested*/
+               if (le32_to_cpu(req->Flags) & SL_RETURN_SINGLE_ENTRY)
+@@ -5971,6 +5972,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, 
struct ksmbd_file *fp,
+                             struct ksmbd_share_config *share)
+ {
+       unsigned int buf_len = le32_to_cpu(req->BufferLength);
++      char *buffer = (char *)req + le16_to_cpu(req->BufferOffset);
+ 
+       switch (req->FileInfoClass) {
+       case FILE_BASIC_INFORMATION:
+@@ -5978,7 +5980,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, 
struct ksmbd_file *fp,
+               if (buf_len < sizeof(struct smb2_file_basic_info))
+                       return -EINVAL;
+ 
+-              return set_file_basic_info(fp, (struct smb2_file_basic_info 
*)req->Buffer, share);
++              return set_file_basic_info(fp, (struct smb2_file_basic_info 
*)buffer, share);
+       }
+       case FILE_ALLOCATION_INFORMATION:
+       {
+@@ -5986,7 +5988,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, 
struct ksmbd_file *fp,
+                       return -EINVAL;
+ 
+               return set_file_allocation_info(work, fp,
+-                                              (struct smb2_file_alloc_info 
*)req->Buffer);
++                                              (struct smb2_file_alloc_info 
*)buffer);
+       }
+       case FILE_END_OF_FILE_INFORMATION:
+       {
+@@ -5994,7 +5996,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, 
struct ksmbd_file *fp,
+                       return -EINVAL;
+ 
+               return set_end_of_file_info(work, fp,
+-                                          (struct smb2_file_eof_info 
*)req->Buffer);
++                                          (struct smb2_file_eof_info 
*)buffer);
+       }
+       case FILE_RENAME_INFORMATION:
+       {
+@@ -6002,7 +6004,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, 
struct ksmbd_file *fp,
+                       return -EINVAL;
+ 
+               return set_rename_info(work, fp,
+-                                     (struct smb2_file_rename_info 
*)req->Buffer,
++                                     (struct smb2_file_rename_info *)buffer,
+                                      buf_len);
+       }
+       case FILE_LINK_INFORMATION:
+@@ -6011,7 +6013,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, 
struct ksmbd_file *fp,
+                       return -EINVAL;
+ 
+               return smb2_create_link(work, work->tcon->share_conf,
+-                                      (struct smb2_file_link_info 
*)req->Buffer,
++                                      (struct smb2_file_link_info *)buffer,
+                                       buf_len, fp->filp,
+                                       work->conn->local_nls);
+       }
+@@ -6021,7 +6023,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, 
struct ksmbd_file *fp,
+                       return -EINVAL;
+ 
+               return set_file_disposition_info(fp,
+-                                               (struct 
smb2_file_disposition_info *)req->Buffer);
++                                               (struct 
smb2_file_disposition_info *)buffer);
+       }
+       case FILE_FULL_EA_INFORMATION:
+       {
+@@ -6034,7 +6036,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, 
struct ksmbd_file *fp,
+               if (buf_len < sizeof(struct smb2_ea_info))
+                       return -EINVAL;
+ 
+-              return smb2_set_ea((struct smb2_ea_info *)req->Buffer,
++              return smb2_set_ea((struct smb2_ea_info *)buffer,
+                                  buf_len, &fp->filp->f_path, true);
+       }
+       case FILE_POSITION_INFORMATION:
+@@ -6042,14 +6044,14 @@ static int smb2_set_info_file(struct ksmbd_work *work, 
struct ksmbd_file *fp,
+               if (buf_len < sizeof(struct smb2_file_pos_info))
+                       return -EINVAL;
+ 
+-              return set_file_position_info(fp, (struct smb2_file_pos_info 
*)req->Buffer);
++              return set_file_position_info(fp, (struct smb2_file_pos_info 
*)buffer);
+       }
+       case FILE_MODE_INFORMATION:
+       {
+               if (buf_len < sizeof(struct smb2_file_mode_info))
+                       return -EINVAL;
+ 
+-              return set_file_mode_info(fp, (struct smb2_file_mode_info 
*)req->Buffer);
++              return set_file_mode_info(fp, (struct smb2_file_mode_info 
*)buffer);
+       }
+       }
+ 
+@@ -6130,7 +6132,7 @@ int smb2_set_info(struct ksmbd_work *work)
+               }
+               rc = smb2_set_info_sec(fp,
+                                      le32_to_cpu(req->AdditionalInformation),
+-                                     req->Buffer,
++                                     (char *)req + 
le16_to_cpu(req->BufferOffset),
+                                      le32_to_cpu(req->BufferLength));
+               ksmbd_revert_fsids(work);
+               break;
+@@ -7576,7 +7578,7 @@ static int fsctl_pipe_transceive(struct ksmbd_work 
*work, u64 id,
+                                struct smb2_ioctl_rsp *rsp)
+ {
+       struct ksmbd_rpc_command *rpc_resp;
+-      char *data_buf = (char *)&req->Buffer[0];
++      char *data_buf = (char *)req + le32_to_cpu(req->InputOffset);
+       int nbytes = 0;
+ 
+       rpc_resp = ksmbd_rpc_ioctl(work->sess, id, data_buf,
+@@ -7689,6 +7691,7 @@ int smb2_ioctl(struct ksmbd_work *work)
+       u64 id = KSMBD_NO_FID;
+       struct ksmbd_conn *conn = work->conn;
+       int ret = 0;
++      char *buffer;
+ 
+       if (work->next_smb2_rcv_hdr_off) {
+               req = ksmbd_req_buf_next(work);
+@@ -7711,6 +7714,8 @@ int smb2_ioctl(struct ksmbd_work *work)
+               goto out;
+       }
+ 
++      buffer = (char *)req + le32_to_cpu(req->InputOffset);
++
+       cnt_code = le32_to_cpu(req->CtlCode);
+       ret = smb2_calc_max_out_buf_len(work, 48,
+                                       le32_to_cpu(req->MaxOutputResponse));
+@@ -7768,7 +7773,7 @@ int smb2_ioctl(struct ksmbd_work *work)
+               }
+ 
+               ret = fsctl_validate_negotiate_info(conn,
+-                      (struct validate_negotiate_info_req *)&req->Buffer[0],
++                      (struct validate_negotiate_info_req *)buffer,
+                       (struct validate_negotiate_info_rsp *)&rsp->Buffer[0],
+                       in_buf_len);
+               if (ret < 0)
+@@ -7821,7 +7826,7 @@ int smb2_ioctl(struct ksmbd_work *work)
+               rsp->VolatileFileId = req->VolatileFileId;
+               rsp->PersistentFileId = req->PersistentFileId;
+               fsctl_copychunk(work,
+-                              (struct copychunk_ioctl_req *)&req->Buffer[0],
++                              (struct copychunk_ioctl_req *)buffer,
+                               le32_to_cpu(req->CtlCode),
+                               le32_to_cpu(req->InputCount),
+                               req->VolatileFileId,
+@@ -7834,8 +7839,7 @@ int smb2_ioctl(struct ksmbd_work *work)
+                       goto out;
+               }
+ 
+-              ret = fsctl_set_sparse(work, id,
+-                                     (struct file_sparse *)&req->Buffer[0]);
++              ret = fsctl_set_sparse(work, id, (struct file_sparse *)buffer);
+               if (ret < 0)
+                       goto out;
+               break;
+@@ -7858,7 +7862,7 @@ int smb2_ioctl(struct ksmbd_work *work)
+               }
+ 
+               zero_data =
+-                      (struct file_zero_data_information *)&req->Buffer[0];
++                      (struct file_zero_data_information *)buffer;
+ 
+               off = le64_to_cpu(zero_data->FileOffset);
+               bfz = le64_to_cpu(zero_data->BeyondFinalZero);
+@@ -7889,7 +7893,7 @@ int smb2_ioctl(struct ksmbd_work *work)
+               }
+ 
+               ret = fsctl_query_allocated_ranges(work, id,
+-                      (struct file_allocated_range_buffer *)&req->Buffer[0],
++                      (struct file_allocated_range_buffer *)buffer,
+                       (struct file_allocated_range_buffer *)&rsp->Buffer[0],
+                       out_buf_len /
+                       sizeof(struct file_allocated_range_buffer), &nbytes);
+@@ -7933,7 +7937,7 @@ int smb2_ioctl(struct ksmbd_work *work)
+                       goto out;
+               }
+ 
+-              dup_ext = (struct duplicate_extents_to_file *)&req->Buffer[0];
++              dup_ext = (struct duplicate_extents_to_file *)buffer;
+ 
+               fp_in = ksmbd_lookup_fd_slow(work, dup_ext->VolatileFileHandle,
+                                            dup_ext->PersistentFileHandle);
+diff --git a/include/linux/mman.h b/include/linux/mman.h
+index 58b3abd457a38d..21ea08b919d9f2 100644
+--- a/include/linux/mman.h
++++ b/include/linux/mman.h
+@@ -2,6 +2,7 @@
+ #ifndef _LINUX_MMAN_H
+ #define _LINUX_MMAN_H
+ 
++#include <linux/fs.h>
+ #include <linux/mm.h>
+ #include <linux/percpu_counter.h>
+ 
+@@ -90,7 +91,7 @@ static inline void vm_unacct_memory(long pages)
+ #endif
+ 
+ #ifndef arch_calc_vm_flag_bits
+-#define arch_calc_vm_flag_bits(flags) 0
++#define arch_calc_vm_flag_bits(file, flags) 0
+ #endif
+ 
+ #ifndef arch_validate_prot
+@@ -147,12 +148,12 @@ calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
+  * Combine the mmap "flags" argument into "vm_flags" used internally.
+  */
+ static inline unsigned long
+-calc_vm_flag_bits(unsigned long flags)
++calc_vm_flag_bits(struct file *file, unsigned long flags)
+ {
+       return _calc_vm_trans(flags, MAP_GROWSDOWN,  VM_GROWSDOWN ) |
+              _calc_vm_trans(flags, MAP_LOCKED,     VM_LOCKED    ) |
+              _calc_vm_trans(flags, MAP_SYNC,       VM_SYNC      ) |
+-             arch_calc_vm_flag_bits(flags);
++             arch_calc_vm_flag_bits(file, flags);
+ }
+ 
+ unsigned long vm_commit_limit(void);
+diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h
+index bae5e2369b4f7a..0eb3a2b1f81ff0 100644
+--- a/include/linux/sockptr.h
++++ b/include/linux/sockptr.h
+@@ -50,11 +50,38 @@ static inline int copy_from_sockptr_offset(void *dst, 
sockptr_t src,
+       return 0;
+ }
+ 
++/* Deprecated.
++ * This is unsafe, unless caller checked user provided optlen.
++ * Prefer copy_safe_from_sockptr() instead.
++ */
+ static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size)
+ {
+       return copy_from_sockptr_offset(dst, src, 0, size);
+ }
+ 
++/**
++ * copy_safe_from_sockptr: copy a struct from sockptr
++ * @dst:   Destination address, in kernel space. This buffer must be @ksize
++ *         bytes long.
++ * @ksize: Size of @dst struct.
++ * @optval: Source address. (in user or kernel space)
++ * @optlen: Size of @optval data.
++ *
++ * Returns:
++ *  * -EINVAL: @optlen < @ksize
++ *  * -EFAULT: access to userspace failed.
++ *  * 0 : @ksize bytes were copied
++ */
++static inline int copy_safe_from_sockptr(void *dst, size_t ksize,
++                                       sockptr_t optval, unsigned int optlen)
++{
++      if (optlen < ksize)
++              return -EINVAL;
++      if (copy_from_sockptr(dst, optval, ksize))
++              return -EFAULT;
++      return 0;
++}
++
+ static inline int copy_to_sockptr_offset(sockptr_t dst, size_t offset,
+               const void *src, size_t size)
+ {
+diff --git a/include/net/bond_options.h b/include/net/bond_options.h
+index 69292ecc03257f..f631d9f099410c 100644
+--- a/include/net/bond_options.h
++++ b/include/net/bond_options.h
+@@ -160,5 +160,7 @@ void bond_option_arp_ip_targets_clear(struct bonding 
*bond);
+ #if IS_ENABLED(CONFIG_IPV6)
+ void bond_option_ns_ip6_targets_clear(struct bonding *bond);
+ #endif
++void bond_slave_ns_maddrs_add(struct bonding *bond, struct slave *slave);
++void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave);
+ 
+ #endif /* _NET_BOND_OPTIONS_H */
+diff --git a/lib/buildid.c b/lib/buildid.c
+index e41fb0ee405f63..cc5da016b23517 100644
+--- a/lib/buildid.c
++++ b/lib/buildid.c
+@@ -40,7 +40,7 @@ static int parse_build_id_buf(unsigned char *build_id,
+                   name_sz == note_name_sz &&
+                   memcmp(nhdr + 1, note_name, note_name_sz) == 0 &&
+                   desc_sz > 0 && desc_sz <= BUILD_ID_SIZE_MAX) {
+-                      data = note_start + note_off + ALIGN(note_name_sz, 4);
++                      data = note_start + note_off + sizeof(Elf32_Nhdr) + 
ALIGN(note_name_sz, 4);
+                       memcpy(build_id, data, desc_sz);
+                       memset(build_id + desc_sz, 0, BUILD_ID_SIZE_MAX - 
desc_sz);
+                       if (size)
+diff --git a/mm/internal.h b/mm/internal.h
+index a50bc08337d217..16a4a9aece3043 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -52,6 +52,25 @@ struct folio_batch;
+ 
+ void page_writeback_init(void);
+ 
++/*
++ * This is a file-backed mapping, and is about to be memory mapped - invoke 
its
++ * mmap hook and safely handle error conditions. On error, VMA hooks will be
++ * mutated.
++ *
++ * @file: File which backs the mapping.
++ * @vma:  VMA which we are mapping.
++ *
++ * Returns: 0 if success, error otherwise.
++ */
++int mmap_file(struct file *file, struct vm_area_struct *vma);
++
++/*
++ * If the VMA has a close hook then close it, and since closing it might leave
++ * it in an inconsistent state which makes the use of any hooks suspect, clear
++ * them down by installing dummy empty hooks.
++ */
++void vma_close(struct vm_area_struct *vma);
++
+ static inline void *folio_raw_mapping(struct folio *folio)
+ {
+       unsigned long mapping = (unsigned long)folio->mapping;
+diff --git a/mm/mmap.c b/mm/mmap.c
+index c0f9575493debf..9a9933ede5423b 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -136,8 +136,7 @@ void unlink_file_vma(struct vm_area_struct *vma)
+ static void remove_vma(struct vm_area_struct *vma)
+ {
+       might_sleep();
+-      if (vma->vm_ops && vma->vm_ops->close)
+-              vma->vm_ops->close(vma);
++      vma_close(vma);
+       if (vma->vm_file)
+               fput(vma->vm_file);
+       mpol_put(vma_policy(vma));
+@@ -1317,7 +1316,7 @@ unsigned long do_mmap(struct file *file, unsigned long 
addr,
+        * to. we assume access permissions have been handled by the open
+        * of the memory object, so we don't do any here.
+        */
+-      vm_flags = calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
++      vm_flags = calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(file, 
flags) |
+                       mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+ 
+       if (flags & MAP_LOCKED)
+@@ -2388,8 +2387,7 @@ int __split_vma(struct mm_struct *mm, struct 
vm_area_struct *vma,
+       new->vm_start = new->vm_end;
+       new->vm_pgoff = 0;
+       /* Clean everything up if vma_adjust failed. */
+-      if (new->vm_ops && new->vm_ops->close)
+-              new->vm_ops->close(new);
++      vma_close(new);
+       if (new->vm_file)
+               fput(new->vm_file);
+       unlink_anon_vmas(new);
+@@ -2654,7 +2652,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, 
size_t len,
+       return do_mas_munmap(&mas, mm, start, len, uf, false);
+ }
+ 
+-unsigned long mmap_region(struct file *file, unsigned long addr,
++static unsigned long __mmap_region(struct file *file, unsigned long addr,
+               unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
+               struct list_head *uf)
+ {
+@@ -2752,30 +2750,32 @@ unsigned long mmap_region(struct file *file, unsigned 
long addr,
+       vma->vm_page_prot = vm_get_page_prot(vm_flags);
+       vma->vm_pgoff = pgoff;
+ 
+-      if (file) {
+-              if (vm_flags & VM_SHARED) {
+-                      error = mapping_map_writable(file->f_mapping);
+-                      if (error)
+-                              goto free_vma;
+-              }
++      if (mas_preallocate(&mas, vma, GFP_KERNEL)) {
++              error = -ENOMEM;
++              goto free_vma;
++      }
+ 
++      if (file) {
+               vma->vm_file = get_file(file);
+-              error = call_mmap(file, vma);
++              error = mmap_file(file, vma);
+               if (error)
+-                      goto unmap_and_free_vma;
++                      goto unmap_and_free_file_vma;
++
++              /* Drivers cannot alter the address of the VMA. */
++              WARN_ON_ONCE(addr != vma->vm_start);
+ 
+               /*
+-               * Expansion is handled above, merging is handled below.
+-               * Drivers should not alter the address of the VMA.
++               * Drivers should not permit writability when previously it was
++               * disallowed.
+                */
+-              if (WARN_ON((addr != vma->vm_start))) {
+-                      error = -EINVAL;
+-                      goto close_and_free_vma;
+-              }
++              VM_WARN_ON_ONCE(vm_flags != vma->vm_flags &&
++                              !(vm_flags & VM_MAYWRITE) &&
++                              (vma->vm_flags & VM_MAYWRITE));
++
+               mas_reset(&mas);
+ 
+               /*
+-               * If vm_flags changed after call_mmap(), we should try merge
++               * If vm_flags changed after mmap_file(), we should try merge
+                * vma again as we may succeed this time.
+                */
+               if (unlikely(vm_flags != vma->vm_flags && prev)) {
+@@ -2794,7 +2794,8 @@ unsigned long mmap_region(struct file *file, unsigned 
long addr,
+                               vma = merge;
+                               /* Update vm_flags to pick up the change. */
+                               vm_flags = vma->vm_flags;
+-                              goto unmap_writable;
++                              mas_destroy(&mas);
++                              goto file_expanded;
+                       }
+               }
+ 
+@@ -2802,31 +2803,15 @@ unsigned long mmap_region(struct file *file, unsigned 
long addr,
+       } else if (vm_flags & VM_SHARED) {
+               error = shmem_zero_setup(vma);
+               if (error)
+-                      goto free_vma;
++                      goto free_iter_vma;
+       } else {
+               vma_set_anonymous(vma);
+       }
+ 
+-      /* Allow architectures to sanity-check the vm_flags */
+-      if (!arch_validate_flags(vma->vm_flags)) {
+-              error = -EINVAL;
+-              if (file)
+-                      goto close_and_free_vma;
+-              else if (vma->vm_file)
+-                      goto unmap_and_free_vma;
+-              else
+-                      goto free_vma;
+-      }
+-
+-      if (mas_preallocate(&mas, vma, GFP_KERNEL)) {
+-              error = -ENOMEM;
+-              if (file)
+-                      goto close_and_free_vma;
+-              else if (vma->vm_file)
+-                      goto unmap_and_free_vma;
+-              else
+-                      goto free_vma;
+-      }
++#ifdef CONFIG_SPARC64
++      /* TODO: Fix SPARC ADI! */
++      WARN_ON_ONCE(!arch_validate_flags(vm_flags));
++#endif
+ 
+       if (vma->vm_file)
+               i_mmap_lock_write(vma->vm_file->f_mapping);
+@@ -2849,10 +2834,7 @@ unsigned long mmap_region(struct file *file, unsigned 
long addr,
+        */
+       khugepaged_enter_vma(vma, vma->vm_flags);
+ 
+-      /* Once vma denies write, undo our temporary denial count */
+-unmap_writable:
+-      if (file && vm_flags & VM_SHARED)
+-              mapping_unmap_writable(file->f_mapping);
++file_expanded:
+       file = vma->vm_file;
+ expanded:
+       perf_event_mmap(vma);
+@@ -2881,29 +2863,54 @@ unsigned long mmap_region(struct file *file, unsigned 
long addr,
+ 
+       vma_set_page_prot(vma);
+ 
+-      validate_mm(mm);
+       return addr;
+ 
+-close_and_free_vma:
+-      if (vma->vm_ops && vma->vm_ops->close)
+-              vma->vm_ops->close(vma);
+-unmap_and_free_vma:
++unmap_and_free_file_vma:
+       fput(vma->vm_file);
+       vma->vm_file = NULL;
+ 
+       /* Undo any partial mapping done by a device driver. */
+       unmap_region(mm, mas.tree, vma, prev, next, vma->vm_start, vma->vm_end);
+-      if (file && (vm_flags & VM_SHARED))
+-              mapping_unmap_writable(file->f_mapping);
++free_iter_vma:
++      mas_destroy(&mas);
+ free_vma:
+       vm_area_free(vma);
+ unacct_error:
+       if (charged)
+               vm_unacct_memory(charged);
+-      validate_mm(mm);
+       return error;
+ }
+ 
++unsigned long mmap_region(struct file *file, unsigned long addr,
++                        unsigned long len, vm_flags_t vm_flags, unsigned long 
pgoff,
++                        struct list_head *uf)
++{
++      unsigned long ret;
++      bool writable_file_mapping = false;
++
++      /* Allow architectures to sanity-check the vm_flags. */
++      if (!arch_validate_flags(vm_flags))
++              return -EINVAL;
++
++      /* Map writable and ensure this isn't a sealed memfd. */
++      if (file && (vm_flags & VM_SHARED)) {
++              int error = mapping_map_writable(file->f_mapping);
++
++              if (error)
++                      return error;
++              writable_file_mapping = true;
++      }
++
++      ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf);
++
++      /* Clear our write mapping regardless of error. */
++      if (writable_file_mapping)
++              mapping_unmap_writable(file->f_mapping);
++
++      validate_mm(current->mm);
++      return ret;
++}
++
+ static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
+ {
+       int ret;
+@@ -3376,8 +3383,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct 
**vmap,
+       return new_vma;
+ 
+ out_vma_link:
+-      if (new_vma->vm_ops && new_vma->vm_ops->close)
+-              new_vma->vm_ops->close(new_vma);
++      vma_close(new_vma);
+ 
+       if (new_vma->vm_file)
+               fput(new_vma->vm_file);
+diff --git a/mm/nommu.c b/mm/nommu.c
+index 8e8fe491d914a2..859ba6bdeb9ce3 100644
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -650,8 +650,7 @@ static int delete_vma_from_mm(struct vm_area_struct *vma)
+  */
+ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
+ {
+-      if (vma->vm_ops && vma->vm_ops->close)
+-              vma->vm_ops->close(vma);
++      vma_close(vma);
+       if (vma->vm_file)
+               fput(vma->vm_file);
+       put_nommu_region(vma->vm_region);
+@@ -904,7 +903,7 @@ static unsigned long determine_vm_flags(struct file *file,
+ {
+       unsigned long vm_flags;
+ 
+-      vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags);
++      vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(file, flags);
+       /* vm_flags |= mm->def_flags; */
+ 
+       if (!(capabilities & NOMMU_MAP_DIRECT)) {
+@@ -939,7 +938,7 @@ static int do_mmap_shared_file(struct vm_area_struct *vma)
+ {
+       int ret;
+ 
+-      ret = call_mmap(vma->vm_file, vma);
++      ret = mmap_file(vma->vm_file, vma);
+       if (ret == 0) {
+               vma->vm_region->vm_top = vma->vm_region->vm_end;
+               return 0;
+@@ -970,7 +969,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
+        * - VM_MAYSHARE will be set if it may attempt to share
+        */
+       if (capabilities & NOMMU_MAP_DIRECT) {
+-              ret = call_mmap(vma->vm_file, vma);
++              ret = mmap_file(vma->vm_file, vma);
+               if (ret == 0) {
+                       /* shouldn't return success if we're not sharing */
+                       BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index b87b350b2f4059..58a3f70eb39bbd 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5457,7 +5457,8 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int 
preferred_nid,
+       gfp = alloc_gfp;
+ 
+       /* Find an allowed local zone that meets the low watermark. */
+-      for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, 
ac.highest_zoneidx, ac.nodemask) {
++      z = ac.preferred_zoneref;
++      for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, 
ac.nodemask) {
+               unsigned long mark;
+ 
+               if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 0e1fbc53717df2..2e6b7db7f14bde 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1086,9 +1086,7 @@ static int shmem_getattr(struct user_namespace 
*mnt_userns,
+       stat->attributes_mask |= (STATX_ATTR_APPEND |
+                       STATX_ATTR_IMMUTABLE |
+                       STATX_ATTR_NODUMP);
+-      inode_lock_shared(inode);
+       generic_fillattr(&init_user_ns, inode, stat);
+-      inode_unlock_shared(inode);
+ 
+       if (shmem_is_huge(NULL, inode, 0, false))
+               stat->blksize = HPAGE_PMD_SIZE;
+@@ -2308,9 +2306,6 @@ static int shmem_mmap(struct file *file, struct 
vm_area_struct *vma)
+       if (ret)
+               return ret;
+ 
+-      /* arm64 - allow memory tagging on RAM-based files */
+-      vma->vm_flags |= VM_MTE_ALLOWED;
+-
+       file_accessed(file);
+       vma->vm_ops = &shmem_vm_ops;
+       return 0;
+diff --git a/mm/util.c b/mm/util.c
+index 94fff247831b4b..d3a2877c176f98 100644
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -1103,6 +1103,39 @@ int __weak memcmp_pages(struct page *page1, struct page 
*page2)
+       return ret;
+ }
+ 
++int mmap_file(struct file *file, struct vm_area_struct *vma)
++{
++      static const struct vm_operations_struct dummy_vm_ops = {};
++      int err = call_mmap(file, vma);
++
++      if (likely(!err))
++              return 0;
++
++      /*
++       * OK, we tried to call the file hook for mmap(), but an error
++       * arose. The mapping is in an inconsistent state and we most not invoke
++       * any further hooks on it.
++       */
++      vma->vm_ops = &dummy_vm_ops;
++
++      return err;
++}
++
++void vma_close(struct vm_area_struct *vma)
++{
++      static const struct vm_operations_struct dummy_vm_ops = {};
++
++      if (vma->vm_ops && vma->vm_ops->close) {
++              vma->vm_ops->close(vma);
++
++              /*
++               * The mapping is in an inconsistent state, and no further hooks
++               * may be invoked upon it.
++               */
++              vma->vm_ops = &dummy_vm_ops;
++      }
++}
++
+ #ifdef CONFIG_PRINTK
+ /**
+  * mem_dump_obj - Print available provenance information
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index f93f3e7a3d9052..789f7f4a09089b 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -3846,8 +3846,6 @@ static void hci_acldata_packet(struct hci_dev *hdev, 
struct sk_buff *skb)
+ 
+       hci_dev_lock(hdev);
+       conn = hci_conn_hash_lookup_handle(hdev, handle);
+-      if (conn && hci_dev_test_flag(hdev, HCI_MGMT))
+-              mgmt_device_connected(hdev, conn, NULL, 0);
+       hci_dev_unlock(hdev);
+ 
+       if (conn) {
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 7c1df481ebe9d5..b6fe5e15981f83 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -5648,150 +5648,6 @@ static void hci_remote_oob_data_request_evt(struct 
hci_dev *hdev, void *edata,
+       hci_dev_unlock(hdev);
+ }
+ 
+-#if IS_ENABLED(CONFIG_BT_HS)
+-static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
+-                                struct sk_buff *skb)
+-{
+-      struct hci_ev_channel_selected *ev = data;
+-      struct hci_conn *hcon;
+-
+-      bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
+-
+-      hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+-      if (!hcon)
+-              return;
+-
+-      amp_read_loc_assoc_final_data(hdev, hcon);
+-}
+-
+-static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
+-                                    struct sk_buff *skb)
+-{
+-      struct hci_ev_phy_link_complete *ev = data;
+-      struct hci_conn *hcon, *bredr_hcon;
+-
+-      bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
+-                 ev->status);
+-
+-      hci_dev_lock(hdev);
+-
+-      hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+-      if (!hcon)
+-              goto unlock;
+-
+-      if (!hcon->amp_mgr)
+-              goto unlock;
+-
+-      if (ev->status) {
+-              hci_conn_del(hcon);
+-              goto unlock;
+-      }
+-
+-      bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
+-
+-      hcon->state = BT_CONNECTED;
+-      bacpy(&hcon->dst, &bredr_hcon->dst);
+-
+-      hci_conn_hold(hcon);
+-      hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
+-      hci_conn_drop(hcon);
+-
+-      hci_debugfs_create_conn(hcon);
+-      hci_conn_add_sysfs(hcon);
+-
+-      amp_physical_cfm(bredr_hcon, hcon);
+-
+-unlock:
+-      hci_dev_unlock(hdev);
+-}
+-
+-static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
+-                                   struct sk_buff *skb)
+-{
+-      struct hci_ev_logical_link_complete *ev = data;
+-      struct hci_conn *hcon;
+-      struct hci_chan *hchan;
+-      struct amp_mgr *mgr;
+-
+-      bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
+-                 le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
+-
+-      hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+-      if (!hcon)
+-              return;
+-
+-      /* Create AMP hchan */
+-      hchan = hci_chan_create(hcon);
+-      if (!hchan)
+-              return;
+-
+-      hchan->handle = le16_to_cpu(ev->handle);
+-      hchan->amp = true;
+-
+-      BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
+-
+-      mgr = hcon->amp_mgr;
+-      if (mgr && mgr->bredr_chan) {
+-              struct l2cap_chan *bredr_chan = mgr->bredr_chan;
+-
+-              l2cap_chan_lock(bredr_chan);
+-
+-              bredr_chan->conn->mtu = hdev->block_mtu;
+-              l2cap_logical_cfm(bredr_chan, hchan, 0);
+-              hci_conn_hold(hcon);
+-
+-              l2cap_chan_unlock(bredr_chan);
+-      }
+-}
+-
+-static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
+-                                           struct sk_buff *skb)
+-{
+-      struct hci_ev_disconn_logical_link_complete *ev = data;
+-      struct hci_chan *hchan;
+-
+-      bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
+-                 le16_to_cpu(ev->handle), ev->status);
+-
+-      if (ev->status)
+-              return;
+-
+-      hci_dev_lock(hdev);
+-
+-      hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
+-      if (!hchan || !hchan->amp)
+-              goto unlock;
+-
+-      amp_destroy_logical_link(hchan, ev->reason);
+-
+-unlock:
+-      hci_dev_unlock(hdev);
+-}
+-
+-static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
+-                                           struct sk_buff *skb)
+-{
+-      struct hci_ev_disconn_phy_link_complete *ev = data;
+-      struct hci_conn *hcon;
+-
+-      bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+-
+-      if (ev->status)
+-              return;
+-
+-      hci_dev_lock(hdev);
+-
+-      hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+-      if (hcon && hcon->type == AMP_LINK) {
+-              hcon->state = BT_CLOSED;
+-              hci_disconn_cfm(hcon, ev->reason);
+-              hci_conn_del(hcon);
+-      }
+-
+-      hci_dev_unlock(hdev);
+-}
+-#endif
+-
+ static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
+                               u8 bdaddr_type, bdaddr_t *local_rpa)
+ {
+@@ -7473,25 +7329,6 @@ static const struct hci_ev {
+       /* [0x3e = HCI_EV_LE_META] */
+       HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
+                     sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
+-#if IS_ENABLED(CONFIG_BT_HS)
+-      /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
+-      HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
+-             sizeof(struct hci_ev_phy_link_complete)),
+-      /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
+-      HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
+-             sizeof(struct hci_ev_channel_selected)),
+-      /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
+-      HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
+-             hci_disconn_loglink_complete_evt,
+-             sizeof(struct hci_ev_disconn_logical_link_complete)),
+-      /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
+-      HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
+-             sizeof(struct hci_ev_logical_link_complete)),
+-      /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
+-      HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
+-             hci_disconn_phylink_complete_evt,
+-             sizeof(struct hci_ev_disconn_phy_link_complete)),
+-#endif
+       /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
+       HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
+              sizeof(struct hci_ev_num_comp_blocks)),
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 27efca5dc7bbf7..ff15d5192768ab 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -1189,7 +1189,7 @@ static int iso_sock_setsockopt(struct socket *sock, int 
level, int optname,
+                              sockptr_t optval, unsigned int optlen)
+ {
+       struct sock *sk = sock->sk;
+-      int len, err = 0;
++      int err = 0;
+       struct bt_iso_qos qos;
+       u32 opt;
+ 
+@@ -1204,10 +1204,9 @@ static int iso_sock_setsockopt(struct socket *sock, int 
level, int optname,
+                       break;
+               }
+ 
+-              if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+-                      err = -EFAULT;
++              err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
++              if (err)
+                       break;
+-              }
+ 
+               if (opt)
+                       set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+@@ -1222,18 +1221,9 @@ static int iso_sock_setsockopt(struct socket *sock, int 
level, int optname,
+                       break;
+               }
+ 
+-              len = min_t(unsigned int, sizeof(qos), optlen);
+-              if (len != sizeof(qos)) {
+-                      err = -EINVAL;
+-                      break;
+-              }
+-
+-              memset(&qos, 0, sizeof(qos));
+-
+-              if (copy_from_sockptr(&qos, optval, len)) {
+-                      err = -EFAULT;
++              err = bt_copy_from_sockptr(&qos, sizeof(qos), optval, optlen);
++              if (err)
+                       break;
+-              }
+ 
+               if (!check_qos(&qos)) {
+                       err = -EINVAL;
+@@ -1252,18 +1242,16 @@ static int iso_sock_setsockopt(struct socket *sock, 
int level, int optname,
+               }
+ 
+               if (optlen > sizeof(iso_pi(sk)->base)) {
+-                      err = -EOVERFLOW;
++                      err = -EINVAL;
+                       break;
+               }
+ 
+-              len = min_t(unsigned int, sizeof(iso_pi(sk)->base), optlen);
+-
+-              if (copy_from_sockptr(iso_pi(sk)->base, optval, len)) {
+-                      err = -EFAULT;
++              err = bt_copy_from_sockptr(iso_pi(sk)->base, optlen, optval,
++                                         optlen);
++              if (err)
+                       break;
+-              }
+ 
+-              iso_pi(sk)->base_len = len;
++              iso_pi(sk)->base_len = optlen;
+ 
+               break;
+ 
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 49e8156f5388b9..3fd7de56a30fca 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -525,15 +525,13 @@ __lookup_addr_by_id(struct pm_nl_pernet *pernet, 
unsigned int id)
+ }
+ 
+ static struct mptcp_pm_addr_entry *
+-__lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info,
+-            bool lookup_by_id)
++__lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info)
+ {
+       struct mptcp_pm_addr_entry *entry;
+ 
+-      list_for_each_entry(entry, &pernet->local_addr_list, list) {
+-              if ((!lookup_by_id &&
+-                   mptcp_addresses_equal(&entry->addr, info, 
entry->addr.port)) ||
+-                  (lookup_by_id && entry->addr.id == info->id))
++      list_for_each_entry_rcu(entry, &pernet->local_addr_list, list,
++                              lockdep_is_held(&pernet->lock)) {
++              if (mptcp_addresses_equal(&entry->addr, info, entry->addr.port))
+                       return entry;
+       }
+       return NULL;
+@@ -564,7 +562,7 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct 
mptcp_sock *msk)
+ 
+               mptcp_local_address((struct sock_common *)msk->first, 
&mpc_addr);
+               rcu_read_lock();
+-              entry = __lookup_addr(pernet, &mpc_addr, false);
++              entry = __lookup_addr(pernet, &mpc_addr);
+               if (entry) {
+                       __clear_bit(entry->addr.id, msk->pm.id_avail_bitmap);
+                       msk->mpc_endpoint_id = entry->addr.id;
+@@ -2081,7 +2079,8 @@ static int mptcp_nl_cmd_set_flags(struct sk_buff *skb, 
struct genl_info *info)
+                                                   token, &addr, &remote, 
bkup);
+ 
+       spin_lock_bh(&pernet->lock);
+-      entry = __lookup_addr(pernet, &addr.addr, lookup_by_id);
++      entry = lookup_by_id ? __lookup_addr_by_id(pernet, addr.addr.id) :
++                             __lookup_addr(pernet, &addr.addr);
+       if (!entry) {
+               spin_unlock_bh(&pernet->lock);
+               return -EINVAL;
+diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
+index 748e3876ec6d33..9016f8900c1928 100644
+--- a/net/mptcp/pm_userspace.c
++++ b/net/mptcp/pm_userspace.c
+@@ -106,22 +106,29 @@ static int mptcp_userspace_pm_delete_local_addr(struct 
mptcp_sock *msk,
+       return -EINVAL;
+ }
+ 
++static struct mptcp_pm_addr_entry *
++mptcp_userspace_pm_lookup_addr_by_id(struct mptcp_sock *msk, unsigned int id)
++{
++      struct mptcp_pm_addr_entry *entry;
++
++      list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) 
{
++              if (entry->addr.id == id)
++                      return entry;
++      }
++      return NULL;
++}
++
+ int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
+                                                  unsigned int id,
+                                                  u8 *flags, int *ifindex)
+ {
+-      struct mptcp_pm_addr_entry *entry, *match = NULL;
++      struct mptcp_pm_addr_entry *match;
+ 
+       *flags = 0;
+       *ifindex = 0;
+ 
+       spin_lock_bh(&msk->pm.lock);
+-      list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) 
{
+-              if (id == entry->addr.id) {
+-                      match = entry;
+-                      break;
+-              }
+-      }
++      match = mptcp_userspace_pm_lookup_addr_by_id(msk, id);
+       spin_unlock_bh(&msk->pm.lock);
+       if (match) {
+               *flags = match->flags;
+@@ -185,6 +192,7 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct 
genl_info *info)
+       struct mptcp_pm_addr_entry addr_val;
+       struct mptcp_sock *msk;
+       int err = -EINVAL;
++      struct sock *sk;
+       u32 token_val;
+ 
+       if (!addr || !token) {
+@@ -200,6 +208,8 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct 
genl_info *info)
+               return err;
+       }
+ 
++      sk = (struct sock *)msk;
++
+       if (!mptcp_pm_is_userspace(msk)) {
+               GENL_SET_ERR_MSG(info, "invalid request; userspace PM not 
selected");
+               goto announce_err;
+@@ -223,7 +233,7 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct 
genl_info *info)
+               goto announce_err;
+       }
+ 
+-      lock_sock((struct sock *)msk);
++      lock_sock(sk);
+       spin_lock_bh(&msk->pm.lock);
+ 
+       if (mptcp_pm_alloc_anno_list(msk, &addr_val.addr)) {
+@@ -233,11 +243,11 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct 
genl_info *info)
+       }
+ 
+       spin_unlock_bh(&msk->pm.lock);
+-      release_sock((struct sock *)msk);
++      release_sock(sk);
+ 
+       err = 0;
+  announce_err:
+-      sock_put((struct sock *)msk);
++      sock_put(sk);
+       return err;
+ }
+ 
+@@ -279,11 +289,12 @@ int mptcp_nl_cmd_remove(struct sk_buff *skb, struct 
genl_info *info)
+ {
+       struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+       struct nlattr *id = info->attrs[MPTCP_PM_ATTR_LOC_ID];
+-      struct mptcp_pm_addr_entry *match = NULL;
++      struct mptcp_pm_addr_entry *match;
+       struct mptcp_pm_addr_entry *entry;
+       struct mptcp_sock *msk;
+       LIST_HEAD(free_list);
+       int err = -EINVAL;
++      struct sock *sk;
+       u32 token_val;
+       u8 id_val;
+ 
+@@ -301,6 +312,8 @@ int mptcp_nl_cmd_remove(struct sk_buff *skb, struct 
genl_info *info)
+               return err;
+       }
+ 
++      sk = (struct sock *)msk;
++
+       if (!mptcp_pm_is_userspace(msk)) {
+               GENL_SET_ERR_MSG(info, "invalid request; userspace PM not 
selected");
+               goto remove_err;
+@@ -311,34 +324,31 @@ int mptcp_nl_cmd_remove(struct sk_buff *skb, struct 
genl_info *info)
+               goto remove_err;
+       }
+ 
+-      lock_sock((struct sock *)msk);
+-
+-      list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) 
{
+-              if (entry->addr.id == id_val) {
+-                      match = entry;
+-                      break;
+-              }
+-      }
++      lock_sock(sk);
+ 
++      spin_lock_bh(&msk->pm.lock);
++      match = mptcp_userspace_pm_lookup_addr_by_id(msk, id_val);
+       if (!match) {
+               GENL_SET_ERR_MSG(info, "address with specified id not found");
+-              release_sock((struct sock *)msk);
++              spin_unlock_bh(&msk->pm.lock);
++              release_sock(sk);
+               goto remove_err;
+       }
+ 
+       list_move(&match->list, &free_list);
++      spin_unlock_bh(&msk->pm.lock);
+ 
+       mptcp_pm_remove_addrs(msk, &free_list);
+ 
+-      release_sock((struct sock *)msk);
++      release_sock(sk);
+ 
+       list_for_each_entry_safe(match, entry, &free_list, list) {
+-              sock_kfree_s((struct sock *)msk, match, sizeof(*match));
++              sock_kfree_s(sk, match, sizeof(*match));
+       }
+ 
+       err = 0;
+  remove_err:
+-      sock_put((struct sock *)msk);
++      sock_put(sk);
+       return err;
+ }
+ 
+@@ -558,8 +568,10 @@ int mptcp_userspace_pm_set_flags(struct net *net, struct 
nlattr *token,
+                                struct mptcp_pm_addr_entry *loc,
+                                struct mptcp_pm_addr_entry *rem, u8 bkup)
+ {
++      struct mptcp_pm_addr_entry *entry;
+       struct mptcp_sock *msk;
+       int ret = -EINVAL;
++      struct sock *sk;
+       u32 token_val;
+ 
+       token_val = nla_get_u32(token);
+@@ -568,6 +580,8 @@ int mptcp_userspace_pm_set_flags(struct net *net, struct 
nlattr *token,
+       if (!msk)
+               return ret;
+ 
++      sk = (struct sock *)msk;
++
+       if (!mptcp_pm_is_userspace(msk))
+               goto set_flags_err;
+ 
+@@ -575,11 +589,22 @@ int mptcp_userspace_pm_set_flags(struct net *net, struct 
nlattr *token,
+           rem->addr.family == AF_UNSPEC)
+               goto set_flags_err;
+ 
+-      lock_sock((struct sock *)msk);
++      spin_lock_bh(&msk->pm.lock);
++      list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) 
{
++              if (mptcp_addresses_equal(&entry->addr, &loc->addr, false)) {
++                      if (bkup)
++                              entry->flags |= MPTCP_PM_ADDR_FLAG_BACKUP;
++                      else
++                              entry->flags &= ~MPTCP_PM_ADDR_FLAG_BACKUP;
++              }
++      }
++      spin_unlock_bh(&msk->pm.lock);
++
++      lock_sock(sk);
+       ret = mptcp_pm_nl_mp_prio_send_ack(msk, &loc->addr, &rem->addr, bkup);
+-      release_sock((struct sock *)msk);
++      release_sock(sk);
+ 
+ set_flags_err:
+-      sock_put((struct sock *)msk);
++      sock_put(sk);
+       return ret;
+ }
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index d68e93dab88c3e..1acd4e37a0ea6c 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2057,7 +2057,8 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock 
*msk, int copied)
+                               slow = lock_sock_fast(ssk);
+                               WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
+                               tcp_sk(ssk)->window_clamp = window_clamp;
+-                              tcp_cleanup_rbuf(ssk, 1);
++                              if (tcp_can_send_ack(ssk))
++                                      tcp_cleanup_rbuf(ssk, 1);
+                               unlock_sock_fast(ssk, slow);
+                       }
+               }
+@@ -2180,7 +2181,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr 
*msg, size_t len,
+               cmsg_flags = MPTCP_CMSG_INQ;
+ 
+       while (copied < len) {
+-              int bytes_read;
++              int err, bytes_read;
+ 
+               bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, 
flags, &tss, &cmsg_flags);
+               if (unlikely(bytes_read < 0)) {
+@@ -2245,9 +2246,16 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr 
*msg, size_t len,
+               }
+ 
+               pr_debug("block timeout %ld\n", timeo);
+-              sk_wait_data(sk, &timeo, NULL);
++              mptcp_rcv_space_adjust(msk, copied);
++              err = sk_wait_data(sk, &timeo, NULL);
++              if (err < 0) {
++                      err = copied ? : err;
++                      goto out_err;
++              }
+       }
+ 
++      mptcp_rcv_space_adjust(msk, copied);
++
+ out_err:
+       if (cmsg_flags && copied >= 0) {
+               if (cmsg_flags & MPTCP_CMSG_TS)
+@@ -2263,8 +2271,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr 
*msg, size_t len,
+       pr_debug("msk=%p rx queue empty=%d:%d copied=%d\n",
+                msk, skb_queue_empty_lockless(&sk->sk_receive_queue),
+                skb_queue_empty(&msk->receive_queue), copied);
+-      if (!(flags & MSG_PEEK))
+-              mptcp_rcv_space_adjust(msk, copied);
+ 
+       release_sock(sk);
+       return copied;
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 17a1b731a76b1d..18e37b32a5d610 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -1382,18 +1382,18 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct 
ip_vs_service_user_kern *u,
+               sched = NULL;
+       }
+ 
+-      /* Bind the ct retriever */
+-      RCU_INIT_POINTER(svc->pe, pe);
+-      pe = NULL;
+-
+       /* Update the virtual service counters */
+       if (svc->port == FTPPORT)
+               atomic_inc(&ipvs->ftpsvc_counter);
+       else if (svc->port == 0)
+               atomic_inc(&ipvs->nullsvc_counter);
+-      if (svc->pe && svc->pe->conn_out)
++      if (pe && pe->conn_out)
+               atomic_inc(&ipvs->conn_out_counter);
+ 
++      /* Bind the ct retriever */
++      RCU_INIT_POINTER(svc->pe, pe);
++      pe = NULL;
++
+       ip_vs_start_estimator(ipvs, &svc->stats);
+ 
+       /* Count only IPv4 services for old get/setsockopt interface */
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 9eb87f35bc65e2..8a74847dacaf15 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -393,15 +393,6 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, 
struct sock *sk)
+ 
+ static void netlink_sock_destruct(struct sock *sk)
+ {
+-      struct netlink_sock *nlk = nlk_sk(sk);
+-
+-      if (nlk->cb_running) {
+-              if (nlk->cb.done)
+-                      nlk->cb.done(&nlk->cb);
+-              module_put(nlk->cb.module);
+-              kfree_skb(nlk->cb.skb);
+-      }
+-
+       skb_queue_purge(&sk->sk_receive_queue);
+ 
+       if (!sock_flag(sk, SOCK_DEAD)) {
+@@ -414,14 +405,6 @@ static void netlink_sock_destruct(struct sock *sk)
+       WARN_ON(nlk_sk(sk)->groups);
+ }
+ 
+-static void netlink_sock_destruct_work(struct work_struct *work)
+-{
+-      struct netlink_sock *nlk = container_of(work, struct netlink_sock,
+-                                              work);
+-
+-      sk_free(&nlk->sk);
+-}
+-
+ /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
+  * SMP. Look, when several writers sleep and reader wakes them up, all but one
+  * immediately hit write lock and grab all the cpus. Exclusive sleep solves
+@@ -736,12 +719,6 @@ static void deferred_put_nlk_sk(struct rcu_head *head)
+       if (!refcount_dec_and_test(&sk->sk_refcnt))
+               return;
+ 
+-      if (nlk->cb_running && nlk->cb.done) {
+-              INIT_WORK(&nlk->work, netlink_sock_destruct_work);
+-              schedule_work(&nlk->work);
+-              return;
+-      }
+-
+       sk_free(sk);
+ }
+ 
+@@ -791,6 +768,14 @@ static int netlink_release(struct socket *sock)
+                               NETLINK_URELEASE, &n);
+       }
+ 
++      /* Terminate any outstanding dump */
++      if (nlk->cb_running) {
++              if (nlk->cb.done)
++                      nlk->cb.done(&nlk->cb);
++              module_put(nlk->cb.module);
++              kfree_skb(nlk->cb.skb);
++      }
++
+       module_put(nlk->module);
+ 
+       if (netlink_is_kernel(sk)) {
+diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
+index b30b8fc760f71a..aa430e4d58d805 100644
+--- a/net/netlink/af_netlink.h
++++ b/net/netlink/af_netlink.h
+@@ -4,7 +4,6 @@
+ 
+ #include <linux/rhashtable.h>
+ #include <linux/atomic.h>
+-#include <linux/workqueue.h>
+ #include <net/sock.h>
+ 
+ /* flags */
+@@ -48,7 +47,6 @@ struct netlink_sock {
+ 
+       struct rhash_head       node;
+       struct rcu_head         rcu;
+-      struct work_struct      work;
+ };
+ 
+ static inline struct netlink_sock *nlk_sk(struct sock *sk)
+diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
+index 645677f84dba25..cd0fd26196b8b6 100644
+--- a/net/nfc/llcp_sock.c
++++ b/net/nfc/llcp_sock.c
+@@ -252,10 +252,10 @@ static int nfc_llcp_setsockopt(struct socket *sock, int 
level, int optname,
+                       break;
+               }
+ 
+-              if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+-                      err = -EFAULT;
++              err = copy_safe_from_sockptr(&opt, sizeof(opt),
++                                           optval, optlen);
++              if (err)
+                       break;
+-              }
+ 
+               if (opt > LLCP_MAX_RW) {
+                       err = -EINVAL;
+@@ -274,10 +274,10 @@ static int nfc_llcp_setsockopt(struct socket *sock, int 
level, int optname,
+                       break;
+               }
+ 
+-              if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+-                      err = -EFAULT;
++              err = copy_safe_from_sockptr(&opt, sizeof(opt),
++                                           optval, optlen);
++              if (err)
+                       break;
+-              }
+ 
+               if (opt > LLCP_MAX_MIUX) {
+                       err = -EINVAL;
+diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
+index 04448bfb4d3db0..e87d79d043d54c 100644
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -70,7 +70,7 @@ struct tc_u_hnode {
+       struct tc_u_hnode __rcu *next;
+       u32                     handle;
+       u32                     prio;
+-      int                     refcnt;
++      refcount_t              refcnt;
+       unsigned int            divisor;
+       struct idr              handle_idr;
+       bool                    is_root;
+@@ -85,12 +85,22 @@ struct tc_u_hnode {
+ struct tc_u_common {
+       struct tc_u_hnode __rcu *hlist;
+       void                    *ptr;
+-      int                     refcnt;
++      refcount_t              refcnt;
+       struct idr              handle_idr;
+       struct hlist_node       hnode;
+       long                    knodes;
+ };
+ 
++static u32 handle2id(u32 h)
++{
++      return ((h & 0x80000000) ? ((h >> 20) & 0x7FF) : h);
++}
++
++static u32 id2handle(u32 id)
++{
++      return (id | 0x800U) << 20;
++}
++
+ static inline unsigned int u32_hash_fold(__be32 key,
+                                        const struct tc_u32_sel *sel,
+                                        u8 fshift)
+@@ -308,7 +318,7 @@ static u32 gen_new_htid(struct tc_u_common *tp_c, struct 
tc_u_hnode *ptr)
+       int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
+       if (id < 0)
+               return 0;
+-      return (id | 0x800U) << 20;
++      return id2handle(id);
+ }
+ 
+ static struct hlist_head *tc_u_common_hash;
+@@ -357,8 +367,8 @@ static int u32_init(struct tcf_proto *tp)
+       if (root_ht == NULL)
+               return -ENOBUFS;
+ 
+-      root_ht->refcnt++;
+-      root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
++      refcount_set(&root_ht->refcnt, 1);
++      root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : id2handle(0);
+       root_ht->prio = tp->prio;
+       root_ht->is_root = true;
+       idr_init(&root_ht->handle_idr);
+@@ -369,18 +379,20 @@ static int u32_init(struct tcf_proto *tp)
+                       kfree(root_ht);
+                       return -ENOBUFS;
+               }
++              refcount_set(&tp_c->refcnt, 1);
+               tp_c->ptr = key;
+               INIT_HLIST_NODE(&tp_c->hnode);
+               idr_init(&tp_c->handle_idr);
+ 
+               hlist_add_head(&tp_c->hnode, tc_u_hash(key));
++      } else {
++              refcount_inc(&tp_c->refcnt);
+       }
+ 
+-      tp_c->refcnt++;
+       RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
+       rcu_assign_pointer(tp_c->hlist, root_ht);
+ 
+-      root_ht->refcnt++;
++      /* root_ht must be destroyed when tcf_proto is destroyed */
+       rcu_assign_pointer(tp->root, root_ht);
+       tp->data = tp_c;
+       return 0;
+@@ -391,7 +403,7 @@ static void __u32_destroy_key(struct tc_u_knode *n)
+       struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
+ 
+       tcf_exts_destroy(&n->exts);
+-      if (ht && --ht->refcnt == 0)
++      if (ht && refcount_dec_and_test(&ht->refcnt))
+               kfree(ht);
+       kfree(n);
+ }
+@@ -599,8 +611,6 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct 
tc_u_hnode *ht,
+       struct tc_u_hnode __rcu **hn;
+       struct tc_u_hnode *phn;
+ 
+-      WARN_ON(--ht->refcnt);
+-
+       u32_clear_hnode(tp, ht, extack);
+ 
+       hn = &tp_c->hlist;
+@@ -610,7 +620,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct 
tc_u_hnode *ht,
+               if (phn == ht) {
+                       u32_clear_hw_hnode(tp, ht, extack);
+                       idr_destroy(&ht->handle_idr);
+-                      idr_remove(&tp_c->handle_idr, ht->handle);
++                      idr_remove(&tp_c->handle_idr, handle2id(ht->handle));
+                       RCU_INIT_POINTER(*hn, ht->next);
+                       kfree_rcu(ht, rcu);
+                       return 0;
+@@ -628,10 +638,10 @@ static void u32_destroy(struct tcf_proto *tp, bool 
rtnl_held,
+ 
+       WARN_ON(root_ht == NULL);
+ 
+-      if (root_ht && --root_ht->refcnt == 1)
++      if (root_ht && refcount_dec_and_test(&root_ht->refcnt))
+               u32_destroy_hnode(tp, root_ht, extack);
+ 
+-      if (--tp_c->refcnt == 0) {
++      if (refcount_dec_and_test(&tp_c->refcnt)) {
+               struct tc_u_hnode *ht;
+ 
+               hlist_del(&tp_c->hnode);
+@@ -643,7 +653,7 @@ static void u32_destroy(struct tcf_proto *tp, bool 
rtnl_held,
+                       /* u32_destroy_key() will later free ht for us, if it's
+                        * still referenced by some knode
+                        */
+-                      if (--ht->refcnt == 0)
++                      if (refcount_dec_and_test(&ht->refcnt))
+                               kfree_rcu(ht, rcu);
+               }
+ 
+@@ -672,7 +682,7 @@ static int u32_delete(struct tcf_proto *tp, void *arg, 
bool *last,
+               return -EINVAL;
+       }
+ 
+-      if (ht->refcnt == 1) {
++      if (refcount_dec_if_one(&ht->refcnt)) {
+               u32_destroy_hnode(tp, ht, extack);
+       } else {
+               NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
+@@ -680,7 +690,7 @@ static int u32_delete(struct tcf_proto *tp, void *arg, 
bool *last,
+       }
+ 
+ out:
+-      *last = tp_c->refcnt == 1 && tp_c->knodes == 0;
++      *last = refcount_read(&tp_c->refcnt) == 1 && tp_c->knodes == 0;
+       return ret;
+ }
+ 
+@@ -764,14 +774,14 @@ static int u32_set_parms(struct net *net, struct 
tcf_proto *tp,
+                               NL_SET_ERR_MSG_MOD(extack, "Not linking to root 
node");
+                               return -EINVAL;
+                       }
+-                      ht_down->refcnt++;
++                      refcount_inc(&ht_down->refcnt);
+               }
+ 
+               ht_old = rtnl_dereference(n->ht_down);
+               rcu_assign_pointer(n->ht_down, ht_down);
+ 
+               if (ht_old)
+-                      ht_old->refcnt--;
++                      refcount_dec(&ht_old->refcnt);
+       }
+ 
+       if (ifindex >= 0)
+@@ -850,7 +860,7 @@ static struct tc_u_knode *u32_init_knode(struct net *net, 
struct tcf_proto *tp,
+ 
+       /* bump reference count as long as we hold pointer to structure */
+       if (ht)
+-              ht->refcnt++;
++              refcount_inc(&ht->refcnt);
+ 
+       return new;
+ }
+@@ -930,7 +940,7 @@ static int u32_change(struct net *net, struct sk_buff 
*in_skb,
+ 
+                               ht_old = rtnl_dereference(n->ht_down);
+                               if (ht_old)
+-                                      ht_old->refcnt++;
++                                      refcount_inc(&ht_old->refcnt);
+                       }
+                       __u32_destroy_key(new);
+                       return err;
+@@ -978,7 +988,7 @@ static int u32_change(struct net *net, struct sk_buff 
*in_skb,
+                               return err;
+                       }
+               }
+-              ht->refcnt = 1;
++              refcount_set(&ht->refcnt, 1);
+               ht->divisor = divisor;
+               ht->handle = handle;
+               ht->prio = tp->prio;
+@@ -987,7 +997,7 @@ static int u32_change(struct net *net, struct sk_buff 
*in_skb,
+ 
+               err = u32_replace_hw_hnode(tp, ht, userflags, extack);
+               if (err) {
+-                      idr_remove(&tp_c->handle_idr, handle);
++                      idr_remove(&tp_c->handle_idr, handle2id(handle));
+                       kfree(ht);
+                       return err;
+               }
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 1d5cdc987abde4..62219f23f76ab1 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -915,11 +915,6 @@ static int parse_taprio_schedule(struct taprio_sched *q, 
struct nlattr **tb,
+               list_for_each_entry(entry, &new->entries, list)
+                       cycle = ktime_add_ns(cycle, entry->interval);
+ 
+-              if (!cycle) {
+-                      NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
+-                      return -EINVAL;
+-              }
+-
+               if (cycle < 0 || cycle > INT_MAX) {
+                       NL_SET_ERR_MSG(extack, "'cycle_time' is too big");
+                       return -EINVAL;
+@@ -928,6 +923,11 @@ static int parse_taprio_schedule(struct taprio_sched *q, 
struct nlattr **tb,
+               new->cycle_time = cycle;
+       }
+ 
++      if (new->cycle_time < new->num_entries * length_to_duration(q, 
ETH_ZLEN)) {
++              NL_SET_ERR_MSG(extack, "'cycle_time' is too small");
++              return -EINVAL;
++      }
++
+       return 0;
+ }
+ 
+diff --git a/net/vmw_vsock/virtio_transport_common.c 
b/net/vmw_vsock/virtio_transport_common.c
+index b22dc7bed21824..3bc573cbf8a6e2 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -1220,6 +1220,14 @@ virtio_transport_recv_listen(struct sock *sk, struct 
sk_buff *skb,
+               return -ENOMEM;
+       }
+ 
++      /* __vsock_release() might have already flushed accept_queue.
++       * Subsequent enqueues would lead to a memory leak.
++       */
++      if (sk->sk_shutdown == SHUTDOWN_MASK) {
++              virtio_transport_reset_no_sock(t, skb);
++              return -ESHUTDOWN;
++      }
++
+       child = vsock_create_connected(sk);
+       if (!child) {
+               virtio_transport_reset_no_sock(t, skb);
+diff --git a/samples/pktgen/pktgen_sample01_simple.sh 
b/samples/pktgen/pktgen_sample01_simple.sh
+index 09a92ea963f98b..c8e75888a9c200 100755
+--- a/samples/pktgen/pktgen_sample01_simple.sh
++++ b/samples/pktgen/pktgen_sample01_simple.sh
+@@ -72,7 +72,7 @@ if [ -n "$DST_PORT" ]; then
+     pg_set $DEV "udp_dst_max $UDP_DST_MAX"
+ fi
+ 
+-[ ! -z "$UDP_CSUM" ] && pg_set $dev "flag UDPCSUM"
++[ ! -z "$UDP_CSUM" ] && pg_set $DEV "flag UDPCSUM"
+ 
+ # Setup random UDP port src range
+ pg_set $DEV "flag UDPSRC_RND"
+diff --git a/security/integrity/ima/ima_template_lib.c 
b/security/integrity/ima/ima_template_lib.c
+index 41ec31debe8709..bf5267bcd7fe40 100644
+--- a/security/integrity/ima/ima_template_lib.c
++++ b/security/integrity/ima/ima_template_lib.c
+@@ -318,15 +318,21 @@ static int ima_eventdigest_init_common(const u8 *digest, 
u32 digestsize,
+                                     hash_algo_name[hash_algo]);
+       }
+ 
+-      if (digest)
++      if (digest) {
+               memcpy(buffer + offset, digest, digestsize);
+-      else
++      } else {
+               /*
+                * If digest is NULL, the event being recorded is a violation.
+                * Make room for the digest by increasing the offset by the
+-               * hash algorithm digest size.
++               * hash algorithm digest size. If the hash algorithm is not
++               * specified increase the offset by IMA_DIGEST_SIZE which
++               * fits SHA1 or MD5
+                */
+-              offset += hash_digest_size[hash_algo];
++              if (hash_algo < HASH_ALGO__LAST)
++                      offset += hash_digest_size[hash_algo];
++              else
++                      offset += IMA_DIGEST_SIZE;
++      }
+ 
+       return ima_write_template_field_data(buffer, offset + digestsize,
+                                            fmt, field_data);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1b5527cb59a938..a56ec9bd90fae8 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9840,6 +9840,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x103c, 0x8b59, "HP Elite mt645 G7 Mobile Thin Client 
U89", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", 
ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", 
ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++      SND_PCI_QUIRK(0x103c, 0x8b5f, "HP", 
ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8b63, "HP Elite Dragonfly 13.5 inch G4", 
ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8b65, "HP ProBook 455 15.6 inch G10 Notebook 
PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8b66, "HP", 
ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+@@ -10872,6 +10873,8 @@ static const struct snd_hda_pin_quirk 
alc269_fallback_pin_fixup_tbl[] = {
+               {0x1a, 0x40000000}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC2XX_FIXUP_HEADSET_MIC,
+               {0x19, 0x40000000}),
++      SND_HDA_PIN_QUIRK(0x10ec0255, 0x1558, "Clevo", ALC2XX_FIXUP_HEADSET_MIC,
++              {0x19, 0x40000000}),
+       {}
+ };
+ 
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json 
b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
+index 08d4861c2e7826..d04fed83332cc0 100644
+--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
++++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
+@@ -132,6 +132,28 @@
+             "echo \"1\" > /sys/bus/netdevsim/del_device"
+         ]
+     },
++    {
++        "id": "831f",
++        "name": "Add taprio Qdisc with too short cycle-time",
++        "category": [
++            "qdisc",
++            "taprio"
++        ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
++        "setup": [
++            "echo \"1 1 8\" > /sys/bus/netdevsim/new_device"
++        ],
++        "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: taprio num_tc 
2 queues 1@0 1@1 sched-entry S 01 200000 sched-entry S 02 200000 cycle-time 100 
clockid CLOCK_TAI",
++        "expExitCode": "2",
++        "verifyCmd": "$TC qdisc show dev $ETH",
++        "matchPattern": "qdisc taprio 1: root refcnt",
++        "matchCount": "0",
++        "teardown": [
++            "echo \"1\" > /sys/bus/netdevsim/del_device"
++        ]
++    },
+     {
+         "id": "3e1e",
+         "name": "Add taprio Qdisc with an invalid cycle-time",

Reply via email to