commit:     7347f4b54f358562d2abfb3b7242983e4bf8beb0
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Nov 10 17:59:10 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Nov 10 17:59:10 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7347f4b5

Linux patch 5.4.224

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1223_linux-5.4.224.patch | 2502 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2506 insertions(+)

diff --git a/0000_README b/0000_README
index 80fbb0b9..60c86d23 100644
--- a/0000_README
+++ b/0000_README
@@ -935,6 +935,10 @@ Patch:  1222_linux-5.4.223.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.4.223
 
+Patch:  1223_linux-5.4.224.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.4.224
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1223_linux-5.4.224.patch b/1223_linux-5.4.224.patch
new file mode 100644
index 00000000..df738e8a
--- /dev/null
+++ b/1223_linux-5.4.224.patch
@@ -0,0 +1,2502 @@
+diff --git a/Documentation/trace/histogram.rst 
b/Documentation/trace/histogram.rst
+index 3f3d1b960fe79..931f3b71745a6 100644
+--- a/Documentation/trace/histogram.rst
++++ b/Documentation/trace/histogram.rst
+@@ -39,7 +39,7 @@ Documentation written by Tom Zanussi
+   will use the event's kernel stacktrace as the key.  The keywords
+   'keys' or 'key' can be used to specify keys, and the keywords
+   'values', 'vals', or 'val' can be used to specify values.  Compound
+-  keys consisting of up to two fields can be specified by the 'keys'
++  keys consisting of up to three fields can be specified by the 'keys'
+   keyword.  Hashing a compound key produces a unique entry in the
+   table for each unique combination of component keys, and can be
+   useful for providing more fine-grained summaries of event data.
+diff --git a/Makefile b/Makefile
+index f32470518d3f5..3d46653e4b1cb 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 223
++SUBLEVEL = 224
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/parisc/include/asm/hardware.h 
b/arch/parisc/include/asm/hardware.h
+index 9d3d7737c58b1..a005ebc547793 100644
+--- a/arch/parisc/include/asm/hardware.h
++++ b/arch/parisc/include/asm/hardware.h
+@@ -10,12 +10,12 @@
+ #define SVERSION_ANY_ID               PA_SVERSION_ANY_ID
+ 
+ struct hp_hardware {
+-      unsigned short  hw_type:5;      /* HPHW_xxx */
+-      unsigned short  hversion;
+-      unsigned long   sversion:28;
+-      unsigned short  opt;
+-      const char      name[80];       /* The hardware description */
+-};
++      unsigned int    hw_type:8;      /* HPHW_xxx */
++      unsigned int    hversion:12;
++      unsigned int    sversion:12;
++      unsigned char   opt;
++      unsigned char   name[59];       /* The hardware description */
++} __packed;
+ 
+ struct parisc_device;
+ 
+diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
+index 516f3891e793f..a1476673062e6 100644
+--- a/arch/parisc/kernel/drivers.c
++++ b/arch/parisc/kernel/drivers.c
+@@ -882,15 +882,13 @@ void __init walk_central_bus(void)
+                       &root);
+ }
+ 
+-static void print_parisc_device(struct parisc_device *dev)
++static __init void print_parisc_device(struct parisc_device *dev)
+ {
+-      char hw_path[64];
+-      static int count;
++      static int count __initdata;
+ 
+-      print_pa_hwpath(dev, hw_path);
+-      pr_info("%d. %s at %pap [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
+-              ++count, dev->name, &(dev->hpa.start), hw_path, dev->id.hw_type,
+-              dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
++      pr_info("%d. %s at %pap { type:%d, hv:%#x, sv:%#x, rev:%#x }",
++              ++count, dev->name, &(dev->hpa.start), dev->id.hw_type,
++              dev->id.hversion, dev->id.sversion, dev->id.hversion_rev);
+ 
+       if (dev->num_addrs) {
+               int k;
+@@ -1079,7 +1077,7 @@ static __init int qemu_print_iodc_data(struct device 
*lin_dev, void *data)
+ 
+ 
+ 
+-static int print_one_device(struct device * dev, void * data)
++static __init int print_one_device(struct device * dev, void * data)
+ {
+       struct parisc_device * pdev = to_parisc_device(dev);
+ 
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index f2976204e8b5d..8ae7b8b4d4607 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -4009,6 +4009,7 @@ static const struct x86_cpu_desc isolation_ucodes[] = {
+       INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,             5, 0x00000000),
+       INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,             6, 0x00000000),
+       INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,             7, 0x00000000),
++      INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,            11, 0x00000000),
+       INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L,             3, 0x0000007c),
+       INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE,               3, 0x0000007c),
+       INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE,              9, 0x0000004e),
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index 5965d341350ca..6da90c6f33903 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -852,8 +852,13 @@ struct event_constraint 
intel_icl_pebs_event_constraints[] = {
+       INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL),  /* SLOTS */
+ 
+       INTEL_PLD_CONSTRAINT(0x1cd, 0xff),                      /* 
MEM_TRANS_RETIRED.LOAD_LATENCY */
+-      INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf),    /* 
MEM_INST_RETIRED.LOAD */
+-      INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf),    /* 
MEM_INST_RETIRED.STORE */
++      INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf),   /* 
MEM_INST_RETIRED.STLB_MISS_LOADS */
++      INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf),   /* 
MEM_INST_RETIRED.STLB_MISS_STORES */
++      INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf),   /* 
MEM_INST_RETIRED.LOCK_LOADS */
++      INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf),   /* 
MEM_INST_RETIRED.SPLIT_LOADS */
++      INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf),   /* 
MEM_INST_RETIRED.SPLIT_STORES */
++      INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf),   /* 
MEM_INST_RETIRED.ALL_LOADS */
++      INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf),   /* 
MEM_INST_RETIRED.ALL_STORES */
+ 
+       INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* 
MEM_LOAD_*_RETIRED.* */
+ 
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 62c7f771a7cf8..db38386674661 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -759,6 +759,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 
*entry, u32 function,
+                       g_phys_as = phys_as;
+ 
+               entry->eax = g_phys_as | (virt_as << 8);
++              entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
+               entry->edx = 0;
+               entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
+               cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
+@@ -791,6 +792,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 
*entry, u32 function,
+               entry->ecx = entry->edx = 0;
+               break;
+       case 0x8000001a:
++              entry->eax &= GENMASK(2, 0);
++              entry->ebx = entry->ecx = entry->edx = 0;
++              break;
+       case 0x8000001e:
+               break;
+       /*Add support for Centaur's CPUID instruction*/
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 1a1c9c9f3a31f..1a9fa29038526 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -776,8 +776,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
+                          ctxt->mode, linear);
+ }
+ 
+-static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
+-                           enum x86emul_mode mode)
++static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
+ {
+       ulong linear;
+       int rc;
+@@ -787,41 +786,71 @@ static inline int assign_eip(struct x86_emulate_ctxt 
*ctxt, ulong dst,
+ 
+       if (ctxt->op_bytes != sizeof(unsigned long))
+               addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
+-      rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
++      rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, 
&linear);
+       if (rc == X86EMUL_CONTINUE)
+               ctxt->_eip = addr.ea;
+       return rc;
+ }
+ 
++static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt)
++{
++      u64 efer;
++      struct desc_struct cs;
++      u16 selector;
++      u32 base3;
++
++      ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
++
++      if (!(ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE)) {
++              /* Real mode. cpu must not have long mode active */
++              if (efer & EFER_LMA)
++                      return X86EMUL_UNHANDLEABLE;
++              ctxt->mode = X86EMUL_MODE_REAL;
++              return X86EMUL_CONTINUE;
++      }
++
++      if (ctxt->eflags & X86_EFLAGS_VM) {
++              /* Protected/VM86 mode. cpu must not have long mode active */
++              if (efer & EFER_LMA)
++                      return X86EMUL_UNHANDLEABLE;
++              ctxt->mode = X86EMUL_MODE_VM86;
++              return X86EMUL_CONTINUE;
++      }
++
++      if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS))
++              return X86EMUL_UNHANDLEABLE;
++
++      if (efer & EFER_LMA) {
++              if (cs.l) {
++                      /* Proper long mode */
++                      ctxt->mode = X86EMUL_MODE_PROT64;
++              } else if (cs.d) {
++                      /* 32 bit compatibility mode*/
++                      ctxt->mode = X86EMUL_MODE_PROT32;
++              } else {
++                      ctxt->mode = X86EMUL_MODE_PROT16;
++              }
++      } else {
++              /* Legacy 32 bit / 16 bit mode */
++              ctxt->mode = cs.d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
++      }
++
++      return X86EMUL_CONTINUE;
++}
++
+ static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
+ {
+-      return assign_eip(ctxt, dst, ctxt->mode);
++      return assign_eip(ctxt, dst);
+ }
+ 
+-static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
+-                        const struct desc_struct *cs_desc)
++static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst)
+ {
+-      enum x86emul_mode mode = ctxt->mode;
+-      int rc;
++      int rc = emulator_recalc_and_set_mode(ctxt);
+ 
+-#ifdef CONFIG_X86_64
+-      if (ctxt->mode >= X86EMUL_MODE_PROT16) {
+-              if (cs_desc->l) {
+-                      u64 efer = 0;
++      if (rc != X86EMUL_CONTINUE)
++              return rc;
+ 
+-                      ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+-                      if (efer & EFER_LMA)
+-                              mode = X86EMUL_MODE_PROT64;
+-              } else
+-                      mode = X86EMUL_MODE_PROT32; /* temporary value */
+-      }
+-#endif
+-      if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
+-              mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
+-      rc = assign_eip(ctxt, dst, mode);
+-      if (rc == X86EMUL_CONTINUE)
+-              ctxt->mode = mode;
+-      return rc;
++      return assign_eip(ctxt, dst);
+ }
+ 
+ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
+@@ -2237,7 +2266,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+ 
+-      rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
++      rc = assign_eip_far(ctxt, ctxt->src.val);
+       /* Error handling is not implemented. */
+       if (rc != X86EMUL_CONTINUE)
+               return X86EMUL_UNHANDLEABLE;
+@@ -2318,7 +2347,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+                                      &new_desc);
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+-      rc = assign_eip_far(ctxt, eip, &new_desc);
++      rc = assign_eip_far(ctxt, eip);
+       /* Error handling is not implemented. */
+       if (rc != X86EMUL_CONTINUE)
+               return X86EMUL_UNHANDLEABLE;
+@@ -2953,6 +2982,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+       ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
+ 
+       ctxt->_eip = rdx;
++      ctxt->mode = usermode;
+       *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
+ 
+       return X86EMUL_CONTINUE;
+@@ -3549,7 +3579,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+ 
+-      rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
++      rc = assign_eip_far(ctxt, ctxt->src.val);
+       if (rc != X86EMUL_CONTINUE)
+               goto fail;
+ 
+@@ -3696,11 +3726,25 @@ static int em_movbe(struct x86_emulate_ctxt *ctxt)
+ 
+ static int em_cr_write(struct x86_emulate_ctxt *ctxt)
+ {
+-      if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
++      int cr_num = ctxt->modrm_reg;
++      int r;
++
++      if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val))
+               return emulate_gp(ctxt, 0);
+ 
+       /* Disable writeback. */
+       ctxt->dst.type = OP_NONE;
++
++      if (cr_num == 0) {
++              /*
++               * CR0 write might have updated CR0.PE and/or CR0.PG
++               * which can affect the cpu's execution mode.
++               */
++              r = emulator_recalc_and_set_mode(ctxt);
++              if (r != X86EMUL_CONTINUE)
++                      return r;
++      }
++
+       return X86EMUL_CONTINUE;
+ }
+ 
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 962701d3f46bd..c73c8b0f5e400 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -420,6 +420,8 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data 
*bfqd,
+  */
+ void bfq_schedule_dispatch(struct bfq_data *bfqd)
+ {
++      lockdep_assert_held(&bfqd->lock);
++
+       if (bfqd->queued != 0) {
+               bfq_log(bfqd, "schedule dispatch");
+               blk_mq_run_hw_queues(bfqd->queue, true);
+@@ -6257,8 +6259,8 @@ bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct 
bfq_queue *bfqq)
+       bfq_bfqq_expire(bfqd, bfqq, true, reason);
+ 
+ schedule_dispatch:
+-      spin_unlock_irqrestore(&bfqd->lock, flags);
+       bfq_schedule_dispatch(bfqd);
++      spin_unlock_irqrestore(&bfqd->lock, flags);
+ }
+ 
+ /*
+diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
+index b5022a7f6bae1..7e48ed7c9c8e8 100644
+--- a/drivers/android/binder_alloc.c
++++ b/drivers/android/binder_alloc.c
+@@ -212,7 +212,7 @@ static int binder_update_page_range(struct binder_alloc 
*alloc, int allocate,
+               mm = alloc->vma_vm_mm;
+ 
+       if (mm) {
+-              down_read(&mm->mmap_sem);
++              down_write(&mm->mmap_sem);
+               vma = alloc->vma;
+       }
+ 
+@@ -271,7 +271,7 @@ static int binder_update_page_range(struct binder_alloc 
*alloc, int allocate,
+               /* vm_insert_page does not seem to increment the refcount */
+       }
+       if (mm) {
+-              up_read(&mm->mmap_sem);
++              up_write(&mm->mmap_sem);
+               mmput(mm);
+       }
+       return 0;
+@@ -304,7 +304,7 @@ err_page_ptr_cleared:
+       }
+ err_no_vma:
+       if (mm) {
+-              up_read(&mm->mmap_sem);
++              up_write(&mm->mmap_sem);
+               mmput(mm);
+       }
+       return vma ? -ENOMEM : -ESRCH;
+diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
+index d91ba47f2fc44..4405d255e3aa2 100644
+--- a/drivers/ata/pata_legacy.c
++++ b/drivers/ata/pata_legacy.c
+@@ -278,9 +278,10 @@ static void pdc20230_set_piomode(struct ata_port *ap, 
struct ata_device *adev)
+       outb(inb(0x1F4) & 0x07, 0x1F4);
+ 
+       rt = inb(0x1F3);
+-      rt &= 0x07 << (3 * adev->devno);
++      rt &= ~(0x07 << (3 * !adev->devno));
+       if (pio)
+-              rt |= (1 + 3 * pio) << (3 * adev->devno);
++              rt |= (1 + 3 * pio) << (3 * !adev->devno);
++      outb(rt, 0x1F3);
+ 
+       udelay(100);
+       outb(inb(0x1F2) | 0x01, 0x1F2);
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 8fd74a7501d40..ac9fb336c80ff 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -546,7 +546,7 @@ int __init efi_config_parse_tables(void *config_tables, 
int count, int sz,
+ 
+               seed = early_memremap(efi.rng_seed, sizeof(*seed));
+               if (seed != NULL) {
+-                      size = READ_ONCE(seed->size);
++                      size = min(seed->size, EFI_RANDOM_SEED_SIZE);
+                       early_memunmap(seed, sizeof(*seed));
+               } else {
+                       pr_err("Could not map UEFI random seed!\n");
+diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c 
b/drivers/gpu/drm/i915/display/intel_sdvo.c
+index adeb1c8409768..4d09b247474da 100644
+--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
+@@ -2693,13 +2693,10 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int 
device)
+       if (!intel_sdvo_connector)
+               return false;
+ 
+-      if (device == 0) {
+-              intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
++      if (device == 0)
+               intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+-      } else if (device == 1) {
+-              intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
++      else if (device == 1)
+               intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+-      }
+ 
+       intel_connector = &intel_sdvo_connector->base;
+       connector = &intel_connector->base;
+@@ -2753,7 +2750,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int 
type)
+       encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+       connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+ 
+-      intel_sdvo->controlled_output |= type;
+       intel_sdvo_connector->output_flag = type;
+ 
+       if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+@@ -2794,13 +2790,10 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, 
int device)
+       encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+       connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+ 
+-      if (device == 0) {
+-              intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
++      if (device == 0)
+               intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+-      } else if (device == 1) {
+-              intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
++      else if (device == 1)
+               intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+-      }
+ 
+       if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+               kfree(intel_sdvo_connector);
+@@ -2830,13 +2823,10 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, 
int device)
+       encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+       connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+ 
+-      if (device == 0) {
+-              intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
++      if (device == 0)
+               intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+-      } else if (device == 1) {
+-              intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
++      else if (device == 1)
+               intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+-      }
+ 
+       if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+               kfree(intel_sdvo_connector);
+@@ -2869,16 +2859,39 @@ err:
+       return false;
+ }
+ 
++static u16 intel_sdvo_filter_output_flags(u16 flags)
++{
++      flags &= SDVO_OUTPUT_MASK;
++
++      /* SDVO requires XXX1 function may not exist unless it has XXX0 
function.*/
++      if (!(flags & SDVO_OUTPUT_TMDS0))
++              flags &= ~SDVO_OUTPUT_TMDS1;
++
++      if (!(flags & SDVO_OUTPUT_RGB0))
++              flags &= ~SDVO_OUTPUT_RGB1;
++
++      if (!(flags & SDVO_OUTPUT_LVDS0))
++              flags &= ~SDVO_OUTPUT_LVDS1;
++
++      return flags;
++}
++
+ static bool
+ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
+ {
+-      /* SDVO requires XXX1 function may not exist unless it has XXX0 
function.*/
++      struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
++
++      flags = intel_sdvo_filter_output_flags(flags);
++
++      intel_sdvo->controlled_output = flags;
++
++      intel_sdvo_select_ddc_bus(i915, intel_sdvo);
+ 
+       if (flags & SDVO_OUTPUT_TMDS0)
+               if (!intel_sdvo_dvi_init(intel_sdvo, 0))
+                       return false;
+ 
+-      if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
++      if (flags & SDVO_OUTPUT_TMDS1)
+               if (!intel_sdvo_dvi_init(intel_sdvo, 1))
+                       return false;
+ 
+@@ -2899,7 +2912,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, 
u16 flags)
+               if (!intel_sdvo_analog_init(intel_sdvo, 0))
+                       return false;
+ 
+-      if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
++      if (flags & SDVO_OUTPUT_RGB1)
+               if (!intel_sdvo_analog_init(intel_sdvo, 1))
+                       return false;
+ 
+@@ -2907,14 +2920,13 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, 
u16 flags)
+               if (!intel_sdvo_lvds_init(intel_sdvo, 0))
+                       return false;
+ 
+-      if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
++      if (flags & SDVO_OUTPUT_LVDS1)
+               if (!intel_sdvo_lvds_init(intel_sdvo, 1))
+                       return false;
+ 
+-      if ((flags & SDVO_OUTPUT_MASK) == 0) {
++      if (flags == 0) {
+               unsigned char bytes[2];
+ 
+-              intel_sdvo->controlled_output = 0;
+               memcpy(bytes, &intel_sdvo->caps.output_flags, 2);
+               DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
+                             SDVO_NAME(intel_sdvo),
+@@ -3321,8 +3333,6 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
+        */
+       intel_sdvo->base.cloneable = 0;
+ 
+-      intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo);
+-
+       /* Set the input timing to the screen. Assume always input 0. */
+       if (!intel_sdvo_set_target_input(intel_sdvo))
+               goto err_output;
+diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c 
b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+index f7191ae2266fd..8e2f8410b0ddc 100644
+--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
++++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+@@ -1123,5 +1123,11 @@ struct platform_driver dw_mipi_dsi_rockchip_driver = {
+               .of_match_table = dw_mipi_dsi_rockchip_dt_ids,
+               .pm     = &dw_mipi_dsi_rockchip_pm_ops,
+               .name   = "dw-mipi-dsi-rockchip",
++              /*
++               * For dual-DSI display, one DSI pokes at the other DSI's
++               * drvdata in dw_mipi_dsi_rockchip_find_second(). This is not
++               * safe for asynchronous probe.
++               */
++              .probe_type = PROBE_FORCE_SYNCHRONOUS,
+       },
+ };
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index c587a77d493c1..d6cd94cad571d 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -818,6 +818,7 @@
+ #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
+ #define USB_DEVICE_ID_MADCATZ_RAT5    0x1705
+ #define USB_DEVICE_ID_MADCATZ_RAT9    0x1709
++#define USB_DEVICE_ID_MADCATZ_MMO7  0x1713
+ 
+ #define USB_VENDOR_ID_MCC             0x09db
+ #define USB_DEVICE_ID_MCC_PMD1024LS   0x0076
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 45eba224cdc77..89e236b71ddfa 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -615,6 +615,7 @@ static const struct hid_device_id 
hid_have_special_driver[] = {
+       { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
++      { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7) },
+ #endif
+ #if IS_ENABLED(CONFIG_HID_SAMSUNG)
+       { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, 
USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
+diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
+index c7bf14c019605..b84e975977c42 100644
+--- a/drivers/hid/hid-saitek.c
++++ b/drivers/hid/hid-saitek.c
+@@ -187,6 +187,8 @@ static const struct hid_device_id saitek_devices[] = {
+               .driver_data = SAITEK_RELEASE_MODE_RAT7 },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7),
+               .driver_data = SAITEK_RELEASE_MODE_MMO7 },
++      { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7),
++              .driver_data = SAITEK_RELEASE_MODE_MMO7 },
+       { }
+ };
+ 
+diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
+index a48bee59dcdec..c92ea6990ec69 100644
+--- a/drivers/i2c/busses/i2c-xiic.c
++++ b/drivers/i2c/busses/i2c-xiic.c
+@@ -884,6 +884,7 @@ static struct platform_driver xiic_i2c_driver = {
+ 
+ module_platform_driver(xiic_i2c_driver);
+ 
++MODULE_ALIAS("platform:" DRIVER_NAME);
+ MODULE_AUTHOR("[email protected]");
+ MODULE_DESCRIPTION("Xilinx I2C bus driver");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index cf174aa7fe25b..052d156291539 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -1434,7 +1434,7 @@ static bool validate_ipv4_net_dev(struct net_device 
*net_dev,
+               return false;
+ 
+       memset(&fl4, 0, sizeof(fl4));
+-      fl4.flowi4_iif = net_dev->ifindex;
++      fl4.flowi4_oif = net_dev->ifindex;
+       fl4.daddr = daddr;
+       fl4.saddr = saddr;
+ 
+diff --git a/drivers/infiniband/core/device.c 
b/drivers/infiniband/core/device.c
+index de66d7da1bf6e..372ca5347d3ce 100644
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -2762,10 +2762,18 @@ static int __init ib_core_init(void)
+ 
+       nldev_init();
+       rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
+-      roce_gid_mgmt_init();
++      ret = roce_gid_mgmt_init();
++      if (ret) {
++              pr_warn("Couldn't init RoCE GID management\n");
++              goto err_parent;
++      }
+ 
+       return 0;
+ 
++err_parent:
++      rdma_nl_unregister(RDMA_NL_LS);
++      nldev_exit();
++      unregister_pernet_device(&rdma_dev_net_ops);
+ err_compat:
+       unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
+ err_sa:
+diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
+index e4905d9fecb05..81b70f1f1290b 100644
+--- a/drivers/infiniband/core/nldev.c
++++ b/drivers/infiniband/core/nldev.c
+@@ -2098,7 +2098,7 @@ void __init nldev_init(void)
+       rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
+ }
+ 
+-void __exit nldev_exit(void)
++void nldev_exit(void)
+ {
+       rdma_nl_unregister(RDMA_NL_NLDEV);
+ }
+diff --git a/drivers/infiniband/hw/hfi1/pio.c 
b/drivers/infiniband/hw/hfi1/pio.c
+index 1a82ea73a0fc2..fa5de362010f2 100644
+--- a/drivers/infiniband/hw/hfi1/pio.c
++++ b/drivers/infiniband/hw/hfi1/pio.c
+@@ -955,8 +955,7 @@ void sc_disable(struct send_context *sc)
+       spin_unlock(&sc->release_lock);
+ 
+       write_seqlock(&sc->waitlock);
+-      if (!list_empty(&sc->piowait))
+-              list_move(&sc->piowait, &wake_list);
++      list_splice_init(&sc->piowait, &wake_list);
+       write_sequnlock(&sc->waitlock);
+       while (!list_empty(&wake_list)) {
+               struct iowait *wait;
+diff --git a/drivers/infiniband/hw/qedr/main.c 
b/drivers/infiniband/hw/qedr/main.c
+index 93040c994e2e3..50b75bd4633c3 100644
+--- a/drivers/infiniband/hw/qedr/main.c
++++ b/drivers/infiniband/hw/qedr/main.c
+@@ -362,6 +362,10 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
+       if (IS_IWARP(dev)) {
+               xa_init(&dev->qps);
+               dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
++              if (!dev->iwarp_wq) {
++                      rc = -ENOMEM;
++                      goto err1;
++              }
+       }
+ 
+       /* Allocate Status blocks for CNQ */
+@@ -369,7 +373,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
+                               GFP_KERNEL);
+       if (!dev->sb_array) {
+               rc = -ENOMEM;
+-              goto err1;
++              goto err_destroy_wq;
+       }
+ 
+       dev->cnq_array = kcalloc(dev->num_cnq,
+@@ -423,6 +427,9 @@ err3:
+       kfree(dev->cnq_array);
+ err2:
+       kfree(dev->sb_array);
++err_destroy_wq:
++      if (IS_IWARP(dev))
++              destroy_workqueue(dev->iwarp_wq);
+ err1:
+       kfree(dev->sgid_tbl);
+       return rc;
+diff --git a/drivers/isdn/hardware/mISDN/netjet.c 
b/drivers/isdn/hardware/mISDN/netjet.c
+index 8299defff55ae..6d818d5d1377c 100644
+--- a/drivers/isdn/hardware/mISDN/netjet.c
++++ b/drivers/isdn/hardware/mISDN/netjet.c
+@@ -956,7 +956,7 @@ nj_release(struct tiger_hw *card)
+       }
+       if (card->irq > 0)
+               free_irq(card->irq, card);
+-      if (card->isac.dch.dev.dev.class)
++      if (device_is_registered(&card->isac.dch.dev.dev))
+               mISDN_unregister_device(&card->isac.dch.dev);
+ 
+       for (i = 0; i < 2; i++) {
+diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
+index a41b4b2645941..7ea0100f218a0 100644
+--- a/drivers/isdn/mISDN/core.c
++++ b/drivers/isdn/mISDN/core.c
+@@ -233,11 +233,12 @@ mISDN_register_device(struct mISDNdevice *dev,
+       if (debug & DEBUG_CORE)
+               printk(KERN_DEBUG "mISDN_register %s %d\n",
+                      dev_name(&dev->dev), dev->id);
++      dev->dev.class = &mISDN_class;
++
+       err = create_stack(dev);
+       if (err)
+               goto error1;
+ 
+-      dev->dev.class = &mISDN_class;
+       dev->dev.platform_data = dev;
+       dev->dev.parent = parent;
+       dev_set_drvdata(&dev->dev, dev);
+@@ -249,8 +250,8 @@ mISDN_register_device(struct mISDNdevice *dev,
+ 
+ error3:
+       delete_stack(dev);
+-      return err;
+ error1:
++      put_device(&dev->dev);
+       return err;
+ 
+ }
+diff --git a/drivers/media/dvb-frontends/drxk_hard.c 
b/drivers/media/dvb-frontends/drxk_hard.c
+index 0a4875b391d9c..2dccc9d0be121 100644
+--- a/drivers/media/dvb-frontends/drxk_hard.c
++++ b/drivers/media/dvb-frontends/drxk_hard.c
+@@ -6684,7 +6684,7 @@ static int drxk_read_snr(struct dvb_frontend *fe, u16 
*snr)
+ static int drxk_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+ {
+       struct drxk_state *state = fe->demodulator_priv;
+-      u16 err;
++      u16 err = 0;
+ 
+       dprintk(1, "\n");
+ 
+diff --git a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c 
b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
+index 31390ce2dbf2d..ae274a7aa3a94 100644
+--- a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
++++ b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
+@@ -45,6 +45,8 @@ static void handle_cec_message(struct cros_ec_cec 
*cros_ec_cec)
+       uint8_t *cec_message = cros_ec->event_data.data.cec_message;
+       unsigned int len = cros_ec->event_size;
+ 
++      if (len > CEC_MAX_MSG_SIZE)
++              len = CEC_MAX_MSG_SIZE;
+       cros_ec_cec->rx_msg.len = len;
+       memcpy(cros_ec_cec->rx_msg.msg, cec_message, len);
+ 
+diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c 
b/drivers/media/platform/s5p-cec/s5p_cec.c
+index 828792b854f5c..0c668d4a3daa6 100644
+--- a/drivers/media/platform/s5p-cec/s5p_cec.c
++++ b/drivers/media/platform/s5p-cec/s5p_cec.c
+@@ -115,6 +115,8 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv)
+                               dev_dbg(cec->dev, "Buffer overrun (worker did 
not process previous message)\n");
+                       cec->rx = STATE_BUSY;
+                       cec->msg.len = status >> 24;
++                      if (cec->msg.len > CEC_MAX_MSG_SIZE)
++                              cec->msg.len = CEC_MAX_MSG_SIZE;
+                       cec->msg.rx_status = CEC_RX_STATUS_OK;
+                       s5p_cec_get_rx_buf(cec, cec->msg.len,
+                                       cec->msg.msg);
+diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c 
b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+index 02218c3b548f9..b806a762d079f 100644
+--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
++++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+@@ -652,8 +652,9 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data 
*this,
+       unsigned int tRP_ps;
+       bool use_half_period;
+       int sample_delay_ps, sample_delay_factor;
+-      u16 busy_timeout_cycles;
++      unsigned int busy_timeout_cycles;
+       u8 wrn_dly_sel;
++      u64 busy_timeout_ps;
+ 
+       if (sdr->tRC_min >= 30000) {
+               /* ONFI non-EDO modes [0-3] */
+@@ -677,7 +678,8 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data 
*this,
+       addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
+       data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
+       data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
+-      busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
++      busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max);
++      busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps);
+ 
+       hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
+                     BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
+diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
+index 0df6c2b9484a4..e99e38c6738e4 100644
+--- a/drivers/net/dsa/dsa_loop.c
++++ b/drivers/net/dsa/dsa_loop.c
+@@ -329,6 +329,17 @@ static struct mdio_driver dsa_loop_drv = {
+ 
+ #define NUM_FIXED_PHYS        (DSA_LOOP_NUM_PORTS - 2)
+ 
++static void dsa_loop_phydevs_unregister(void)
++{
++      unsigned int i;
++
++      for (i = 0; i < NUM_FIXED_PHYS; i++)
++              if (!IS_ERR(phydevs[i])) {
++                      fixed_phy_unregister(phydevs[i]);
++                      phy_device_free(phydevs[i]);
++              }
++}
++
+ static int __init dsa_loop_init(void)
+ {
+       struct fixed_phy_status status = {
+@@ -336,23 +347,23 @@ static int __init dsa_loop_init(void)
+               .speed = SPEED_100,
+               .duplex = DUPLEX_FULL,
+       };
+-      unsigned int i;
++      unsigned int i, ret;
+ 
+       for (i = 0; i < NUM_FIXED_PHYS; i++)
+               phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL);
+ 
+-      return mdio_driver_register(&dsa_loop_drv);
++      ret = mdio_driver_register(&dsa_loop_drv);
++      if (ret)
++              dsa_loop_phydevs_unregister();
++
++      return ret;
+ }
+ module_init(dsa_loop_init);
+ 
+ static void __exit dsa_loop_exit(void)
+ {
+-      unsigned int i;
+-
+       mdio_driver_unregister(&dsa_loop_drv);
+-      for (i = 0; i < NUM_FIXED_PHYS; i++)
+-              if (!IS_ERR(phydevs[i]))
+-                      fixed_phy_unregister(phydevs[i]);
++      dsa_loop_phydevs_unregister();
+ }
+ module_exit(dsa_loop_exit);
+ 
+diff --git a/drivers/net/ethernet/freescale/fec_main.c 
b/drivers/net/ethernet/freescale/fec_main.c
+index a31f891d51fbc..e1b8c58c4d6b2 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -626,7 +626,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, 
struct sk_buff *skb,
+               dev_kfree_skb_any(skb);
+               if (net_ratelimit())
+                       netdev_err(ndev, "Tx DMA memory map failed\n");
+-              return NETDEV_TX_BUSY;
++              return NETDEV_TX_OK;
+       }
+ 
+       bdp->cbd_datlen = cpu_to_fec16(size);
+@@ -688,7 +688,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
+                       dev_kfree_skb_any(skb);
+                       if (net_ratelimit())
+                               netdev_err(ndev, "Tx DMA memory map failed\n");
+-                      return NETDEV_TX_BUSY;
++                      return NETDEV_TX_OK;
+               }
+       }
+ 
+diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
+index 05c24db507a2c..757763735e1f5 100644
+--- a/drivers/net/phy/mdio_bus.c
++++ b/drivers/net/phy/mdio_bus.c
+@@ -419,7 +419,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module 
*owner)
+               bus->reset(bus);
+ 
+       for (i = 0; i < PHY_MAX_ADDR; i++) {
+-              if ((bus->phy_mask & (1 << i)) == 0) {
++              if ((bus->phy_mask & BIT(i)) == 0) {
+                       struct phy_device *phydev;
+ 
+                       phydev = mdiobus_scan(bus, i);
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index dd02fcc972774..22a46a1382bae 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1492,7 +1492,8 @@ static struct sk_buff *tun_napi_alloc_frags(struct 
tun_file *tfile,
+       int err;
+       int i;
+ 
+-      if (it->nr_segs > MAX_SKB_FRAGS + 1)
++      if (it->nr_segs > MAX_SKB_FRAGS + 1 ||
++          len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN))
+               return ERR_PTR(-EMSGSIZE);
+ 
+       local_bh_disable();
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c 
b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+index a30fcfbf2ee7c..94f8431588601 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+@@ -228,6 +228,10 @@ static void brcmf_fweh_event_worker(struct work_struct 
*work)
+                         brcmf_fweh_event_name(event->code), event->code,
+                         event->emsg.ifidx, event->emsg.bsscfgidx,
+                         event->emsg.addr);
++              if (event->emsg.bsscfgidx >= BRCMF_MAX_IFS) {
++                      bphy_err(drvr, "invalid bsscfg index: %u\n", 
event->emsg.bsscfgidx);
++                      goto event_free;
++              }
+ 
+               /* convert event message */
+               emsg_be = &event->emsg;
+diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
+index 919b4d2f5d8b5..fa6db971bee97 100644
+--- a/drivers/nfc/nfcmrvl/i2c.c
++++ b/drivers/nfc/nfcmrvl/i2c.c
+@@ -151,10 +151,15 @@ static int nfcmrvl_i2c_nci_send(struct nfcmrvl_private 
*priv,
+                       ret = -EREMOTEIO;
+               } else
+                       ret = 0;
++      }
++
++      if (ret) {
+               kfree_skb(skb);
++              return ret;
+       }
+ 
+-      return ret;
++      consume_skb(skb);
++      return 0;
+ }
+ 
+ static void nfcmrvl_i2c_nci_update_config(struct nfcmrvl_private *priv,
+diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c
+index ba6c486d64659..9b43cd3a45afc 100644
+--- a/drivers/nfc/s3fwrn5/core.c
++++ b/drivers/nfc/s3fwrn5/core.c
+@@ -97,11 +97,15 @@ static int s3fwrn5_nci_send(struct nci_dev *ndev, struct 
sk_buff *skb)
+       }
+ 
+       ret = s3fwrn5_write(info, skb);
+-      if (ret < 0)
++      if (ret < 0) {
+               kfree_skb(skb);
++              mutex_unlock(&info->mutex);
++              return ret;
++      }
+ 
++      consume_skb(skb);
+       mutex_unlock(&info->mutex);
+-      return ret;
++      return 0;
+ }
+ 
+ static int s3fwrn5_nci_post_setup(struct nci_dev *ndev)
+diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
+index 32f506f00c897..7914cf3fd24ff 100644
+--- a/drivers/parisc/iosapic.c
++++ b/drivers/parisc/iosapic.c
+@@ -875,6 +875,7 @@ int iosapic_serial_irq(struct parisc_device *dev)
+ 
+       return vi->txn_irq;
+ }
++EXPORT_SYMBOL(iosapic_serial_irq);
+ #endif
+ 
+ 
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index 6faf1d6451b0c..530b14685fd71 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -795,6 +795,14 @@ store_state_field(struct device *dev, struct 
device_attribute *attr,
+       }
+ 
+       mutex_lock(&sdev->state_mutex);
++      switch (sdev->sdev_state) {
++      case SDEV_RUNNING:
++      case SDEV_OFFLINE:
++              break;
++      default:
++              mutex_unlock(&sdev->state_mutex);
++              return -EINVAL;
++      }
+       if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) {
+               ret = 0;
+       } else {
+diff --git a/drivers/staging/media/meson/vdec/vdec.c 
b/drivers/staging/media/meson/vdec/vdec.c
+index 8dd1396909d7e..a242bbe23ba24 100644
+--- a/drivers/staging/media/meson/vdec/vdec.c
++++ b/drivers/staging/media/meson/vdec/vdec.c
+@@ -1074,6 +1074,7 @@ static int vdec_probe(struct platform_device *pdev)
+ 
+ err_vdev_release:
+       video_device_release(vdev);
++      v4l2_device_unregister(&core->v4l2_dev);
+       return ret;
+ }
+ 
+@@ -1082,6 +1083,7 @@ static int vdec_remove(struct platform_device *pdev)
+       struct amvdec_core *core = platform_get_drvdata(pdev);
+ 
+       video_unregister_device(core->vdev_dec);
++      v4l2_device_unregister(&core->v4l2_dev);
+ 
+       return 0;
+ }
+diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
+index 7ef60f8b6e2cf..577612ab2a2cb 100644
+--- a/drivers/tty/serial/8250/Kconfig
++++ b/drivers/tty/serial/8250/Kconfig
+@@ -108,7 +108,7 @@ config SERIAL_8250_CONSOLE
+ 
+ config SERIAL_8250_GSC
+       tristate
+-      depends on SERIAL_8250 && GSC
++      depends on SERIAL_8250 && PARISC
+       default SERIAL_8250
+ 
+ config SERIAL_8250_DMA
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 7147bb66a4821..3cbca2ebdeb0a 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -287,8 +287,10 @@ static void prelim_release(struct preftree *preftree)
+       struct prelim_ref *ref, *next_ref;
+ 
+       rbtree_postorder_for_each_entry_safe(ref, next_ref,
+-                                           &preftree->root.rb_root, rbnode)
++                                           &preftree->root.rb_root, rbnode) {
++              free_inode_elem_list(ref->inode_list);
+               free_pref(ref);
++      }
+ 
+       preftree->root = RB_ROOT_CACHED;
+       preftree->count = 0;
+@@ -642,6 +644,18 @@ unode_aux_to_inode_list(struct ulist_node *node)
+       return (struct extent_inode_elem *)(uintptr_t)node->aux;
+ }
+ 
++static void free_leaf_list(struct ulist *ulist)
++{
++      struct ulist_node *node;
++      struct ulist_iterator uiter;
++
++      ULIST_ITER_INIT(&uiter);
++      while ((node = ulist_next(ulist, &uiter)))
++              free_inode_elem_list(unode_aux_to_inode_list(node));
++
++      ulist_free(ulist);
++}
++
+ /*
+  * We maintain three separate rbtrees: one for direct refs, one for
+  * indirect refs which have a key, and one for indirect refs which do not
+@@ -756,7 +770,11 @@ static int resolve_indirect_refs(struct btrfs_fs_info 
*fs_info,
+               cond_resched();
+       }
+ out:
+-      ulist_free(parents);
++      /*
++       * We may have inode lists attached to refs in the parents ulist, so we
++       * must free them before freeing the ulist and its refs.
++       */
++      free_leaf_list(parents);
+       return ret;
+ }
+ 
+@@ -1367,6 +1385,12 @@ again:
+                               if (ret < 0)
+                                       goto out;
+                               ref->inode_list = eie;
++                              /*
++                               * We transferred the list ownership to the ref,
++                               * so set to NULL to avoid a double free in case
++                               * an error happens after this.
++                               */
++                              eie = NULL;
+                       }
+                       ret = ulist_add_merge_ptr(refs, ref->parent,
+                                                 ref->inode_list,
+@@ -1392,6 +1416,14 @@ again:
+                               eie->next = ref->inode_list;
+                       }
+                       eie = NULL;
++                      /*
++                       * We have transferred the inode list ownership from
++                       * this ref to the ref we added to the 'refs' ulist.
++                       * So set this ref's inode list to NULL to avoid
++                       * use-after-free when our caller uses it or double
++                       * frees in case an error happens before we return.
++                       */
++                      ref->inode_list = NULL;
+               }
+               cond_resched();
+       }
+@@ -1408,24 +1440,6 @@ out:
+       return ret;
+ }
+ 
+-static void free_leaf_list(struct ulist *blocks)
+-{
+-      struct ulist_node *node = NULL;
+-      struct extent_inode_elem *eie;
+-      struct ulist_iterator uiter;
+-
+-      ULIST_ITER_INIT(&uiter);
+-      while ((node = ulist_next(blocks, &uiter))) {
+-              if (!node->aux)
+-                      continue;
+-              eie = unode_aux_to_inode_list(node);
+-              free_inode_elem_list(eie);
+-              node->aux = 0;
+-      }
+-
+-      ulist_free(blocks);
+-}
+-
+ /*
+  * Finds all leafs with a reference to the specified combination of bytenr and
+  * offset. key_list_head will point to a list of corresponding keys (caller 
must
+diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
+index 93cceeba484cc..6e4727304b7b4 100644
+--- a/fs/btrfs/export.c
++++ b/fs/btrfs/export.c
+@@ -58,7 +58,7 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int 
*max_len,
+ }
+ 
+ struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
+-                              u64 root_objectid, u32 generation,
++                              u64 root_objectid, u64 generation,
+                               int check_generation)
+ {
+       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h
+index f32f4113c976a..5afb7ca428289 100644
+--- a/fs/btrfs/export.h
++++ b/fs/btrfs/export.h
+@@ -19,7 +19,7 @@ struct btrfs_fid {
+ } __attribute__ ((packed));
+ 
+ struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
+-                              u64 root_objectid, u32 generation,
++                              u64 root_objectid, u64 generation,
+                               int check_generation);
+ struct dentry *btrfs_get_parent(struct dentry *child);
+ 
+diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
+index ac035a6fa003d..f312ed5abb19d 100644
+--- a/fs/btrfs/tests/qgroup-tests.c
++++ b/fs/btrfs/tests/qgroup-tests.c
+@@ -237,8 +237,10 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
+ 
+       ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
+                               BTRFS_FS_TREE_OBJECTID);
+-      if (ret)
++      if (ret) {
++              ulist_free(old_roots);
+               return ret;
++      }
+ 
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
+                       false);
+@@ -273,8 +275,10 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
+       }
+ 
+       ret = remove_extent_item(root, nodesize, nodesize);
+-      if (ret)
++      if (ret) {
++              ulist_free(old_roots);
+               return -EINVAL;
++      }
+ 
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
+                       false);
+@@ -338,8 +342,10 @@ static int test_multiple_refs(struct btrfs_root *root,
+ 
+       ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
+                               BTRFS_FS_TREE_OBJECTID);
+-      if (ret)
++      if (ret) {
++              ulist_free(old_roots);
+               return ret;
++      }
+ 
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
+                       false);
+@@ -373,8 +379,10 @@ static int test_multiple_refs(struct btrfs_root *root,
+ 
+       ret = add_tree_ref(root, nodesize, nodesize, 0,
+                       BTRFS_FIRST_FREE_OBJECTID);
+-      if (ret)
++      if (ret) {
++              ulist_free(old_roots);
+               return ret;
++      }
+ 
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
+                       false);
+@@ -414,8 +422,10 @@ static int test_multiple_refs(struct btrfs_root *root,
+ 
+       ret = remove_extent_ref(root, nodesize, nodesize, 0,
+                               BTRFS_FIRST_FREE_OBJECTID);
+-      if (ret)
++      if (ret) {
++              ulist_free(old_roots);
+               return ret;
++      }
+ 
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
+                       false);
+diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
+index 1faa8e4ffb9df..dbba3c3a2f064 100644
+--- a/fs/ext4/migrate.c
++++ b/fs/ext4/migrate.c
+@@ -443,7 +443,8 @@ int ext4_ext_migrate(struct inode *inode)
+        * already is extent-based, error out.
+        */
+       if (!ext4_has_feature_extents(inode->i_sb) ||
+-          (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
++          ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
++          ext4_has_inline_data(inode))
+               return -EINVAL;
+ 
+       if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index aaf1ed8ba87c1..c0f4703e64d54 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2141,8 +2141,16 @@ static int make_indexed_dir(handle_t *handle, struct 
ext4_filename *fname,
+       memcpy(data2, de, len);
+       de = (struct ext4_dir_entry_2 *) data2;
+       top = data2 + len;
+-      while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top)
++      while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top) {
++              if (ext4_check_dir_entry(dir, NULL, de, bh2, data2, len,
++                                       (data2 + (blocksize - csum_size) -
++                                        (char *) de))) {
++                      brelse(bh2);
++                      brelse(bh);
++                      return -EFSCORRUPTED;
++              }
+               de = de2;
++      }
+       de->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) -
+                                          (char *) de, blocksize);
+ 
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index efb2a48712919..8c799250ff396 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -3239,6 +3239,10 @@ static long fuse_file_fallocate(struct file *file, int 
mode, loff_t offset,
+                       goto out;
+       }
+ 
++      err = file_modified(file);
++      if (err)
++              goto out;
++
+       if (!(mode & FALLOC_FL_KEEP_SIZE))
+               set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
+ 
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index 3671a51fe5ebc..1f4bdcda3fdad 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -346,6 +346,7 @@ int nfs40_init_client(struct nfs_client *clp)
+       ret = nfs4_setup_slot_table(tbl, NFS4_MAX_SLOT_TABLE,
+                                       "NFSv4.0 transport Slot table");
+       if (ret) {
++              nfs4_shutdown_slot_table(tbl);
+               kfree(tbl);
+               return ret;
+       }
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 1d2b81a233bbb..c60b3a1f6d2bc 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1743,6 +1743,7 @@ static void nfs4_state_mark_reclaim_helper(struct 
nfs_client *clp,
+ 
+ static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
+ {
++      set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
+       /* Mark all delegations for reclaim */
+       nfs_delegation_mark_reclaim(clp);
+       nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
+@@ -2588,6 +2589,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
+                       if (status < 0)
+                               goto out_error;
+                       nfs4_state_end_reclaim_reboot(clp);
++                      continue;
+               }
+ 
+               /* Detect expired delegations... */
+diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
+index e69332d8f1cb5..3d5e09f7e3a7c 100644
+--- a/fs/xfs/libxfs/xfs_attr_leaf.c
++++ b/fs/xfs/libxfs/xfs_attr_leaf.c
+@@ -250,14 +250,6 @@ xfs_attr3_leaf_verify(
+       if (fa)
+               return fa;
+ 
+-      /*
+-       * In recovery there is a transient state where count == 0 is valid
+-       * because we may have transitioned an empty shortform attr to a leaf
+-       * if the attr didn't fit in shortform.
+-       */
+-      if (!xfs_log_in_recovery(mp) && ichdr.count == 0)
+-              return __this_address;
+-
+       /*
+        * firstused is the block offset of the first name info structure.
+        * Make sure it doesn't go off the block or crash into the header.
+diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
+index 22557527cfdb6..8cc3faa624049 100644
+--- a/fs/xfs/libxfs/xfs_defer.c
++++ b/fs/xfs/libxfs/xfs_defer.c
+@@ -234,10 +234,13 @@ xfs_defer_trans_roll(
+       struct xfs_log_item             *lip;
+       struct xfs_buf                  *bplist[XFS_DEFER_OPS_NR_BUFS];
+       struct xfs_inode                *iplist[XFS_DEFER_OPS_NR_INODES];
++      unsigned int                    ordered = 0; /* bitmap */
+       int                             bpcount = 0, ipcount = 0;
+       int                             i;
+       int                             error;
+ 
++      BUILD_BUG_ON(NBBY * sizeof(ordered) < XFS_DEFER_OPS_NR_BUFS);
++
+       list_for_each_entry(lip, &tp->t_items, li_trans) {
+               switch (lip->li_type) {
+               case XFS_LI_BUF:
+@@ -248,7 +251,10 @@ xfs_defer_trans_roll(
+                                       ASSERT(0);
+                                       return -EFSCORRUPTED;
+                               }
+-                              xfs_trans_dirty_buf(tp, bli->bli_buf);
++                              if (bli->bli_flags & XFS_BLI_ORDERED)
++                                      ordered |= (1U << bpcount);
++                              else
++                                      xfs_trans_dirty_buf(tp, bli->bli_buf);
+                               bplist[bpcount++] = bli->bli_buf;
+                       }
+                       break;
+@@ -289,6 +295,8 @@ xfs_defer_trans_roll(
+       /* Rejoin the buffers and dirty them so the log moves forward. */
+       for (i = 0; i < bpcount; i++) {
+               xfs_trans_bjoin(tp, bplist[i]);
++              if (ordered & (1U << i))
++                      xfs_trans_ordered_buf(tp, bplist[i]);
+               xfs_trans_bhold(tp, bplist[i]);
+       }
+ 
+diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
+index 9596b86e7de90..6231b155e7f34 100644
+--- a/fs/xfs/xfs_dquot.c
++++ b/fs/xfs/xfs_dquot.c
+@@ -205,16 +205,18 @@ xfs_qm_adjust_dqtimers(
+  */
+ STATIC void
+ xfs_qm_init_dquot_blk(
+-      xfs_trans_t     *tp,
+-      xfs_mount_t     *mp,
+-      xfs_dqid_t      id,
+-      uint            type,
+-      xfs_buf_t       *bp)
++      struct xfs_trans        *tp,
++      struct xfs_mount        *mp,
++      xfs_dqid_t              id,
++      uint                    type,
++      struct xfs_buf          *bp)
+ {
+       struct xfs_quotainfo    *q = mp->m_quotainfo;
+-      xfs_dqblk_t     *d;
+-      xfs_dqid_t      curid;
+-      int             i;
++      struct xfs_dqblk        *d;
++      xfs_dqid_t              curid;
++      unsigned int            qflag;
++      unsigned int            blftype;
++      int                     i;
+ 
+       ASSERT(tp);
+       ASSERT(xfs_buf_islocked(bp));
+@@ -238,11 +240,39 @@ xfs_qm_init_dquot_blk(
+               }
+       }
+ 
+-      xfs_trans_dquot_buf(tp, bp,
+-                          (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
+-                          ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
+-                           XFS_BLF_GDQUOT_BUF)));
+-      xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
++      if (type & XFS_DQ_USER) {
++              qflag = XFS_UQUOTA_CHKD;
++              blftype = XFS_BLF_UDQUOT_BUF;
++      } else if (type & XFS_DQ_PROJ) {
++              qflag = XFS_PQUOTA_CHKD;
++              blftype = XFS_BLF_PDQUOT_BUF;
++      } else {
++              qflag = XFS_GQUOTA_CHKD;
++              blftype = XFS_BLF_GDQUOT_BUF;
++      }
++
++      xfs_trans_dquot_buf(tp, bp, blftype);
++
++      /*
++       * quotacheck uses delayed writes to update all the dquots on disk in an
++       * efficient manner instead of logging the individual dquot changes as
++       * they are made. However if we log the buffer allocated here and crash
++       * after quotacheck while the logged initialisation is still in the
++       * active region of the log, log recovery can replay the dquot buffer
++       * initialisation over the top of the checked dquots and corrupt quota
++       * accounting.
++       *
++       * To avoid this problem, quotacheck cannot log the initialised buffer.
++       * We must still dirty the buffer and write it back before the
++       * allocation transaction clears the log. Therefore, mark the buffer as
++       * ordered instead of logging it directly. This is safe for quotacheck
++       * because it detects and repairs allocated but initialized dquot blocks
++       * in the quota inodes.
++       */
++      if (!(mp->m_qflags & qflag))
++              xfs_trans_ordered_buf(tp, bp);
++      else
++              xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
+ }
+ 
+ /*
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index f8b5a37134f8f..e5a90a0b8f8a2 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -2592,8 +2592,10 @@ xfs_ifree_cluster(
+                                       mp->m_bsize * igeo->blocks_per_cluster,
+                                       XBF_UNMAPPED);
+ 
+-              if (!bp)
++              if (!bp) {
++                      xfs_perag_put(pag);
+                       return -ENOMEM;
++              }
+ 
+               /*
+                * This buffer may not have been correctly initialised as we
+diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
+index b6f85e488d5c1..70880422057d6 100644
+--- a/fs/xfs/xfs_iomap.c
++++ b/fs/xfs/xfs_iomap.c
+@@ -789,7 +789,7 @@ xfs_iomap_write_unwritten(
+               xfs_trans_ijoin(tp, ip, 0);
+ 
+               error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
+-                              XFS_QMOPT_RES_REGBLKS);
++                              XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES);
+               if (error)
+                       goto error_on_bmapi_transaction;
+ 
+diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
+index b32a66452d441..2ba9f071c5e97 100644
+--- a/fs/xfs/xfs_trans.c
++++ b/fs/xfs/xfs_trans.c
+@@ -532,57 +532,9 @@ xfs_trans_apply_sb_deltas(
+                                 sizeof(sbp->sb_frextents) - 1);
+ }
+ 
+-STATIC int
+-xfs_sb_mod8(
+-      uint8_t                 *field,
+-      int8_t                  delta)
+-{
+-      int8_t                  counter = *field;
+-
+-      counter += delta;
+-      if (counter < 0) {
+-              ASSERT(0);
+-              return -EINVAL;
+-      }
+-      *field = counter;
+-      return 0;
+-}
+-
+-STATIC int
+-xfs_sb_mod32(
+-      uint32_t                *field,
+-      int32_t                 delta)
+-{
+-      int32_t                 counter = *field;
+-
+-      counter += delta;
+-      if (counter < 0) {
+-              ASSERT(0);
+-              return -EINVAL;
+-      }
+-      *field = counter;
+-      return 0;
+-}
+-
+-STATIC int
+-xfs_sb_mod64(
+-      uint64_t                *field,
+-      int64_t                 delta)
+-{
+-      int64_t                 counter = *field;
+-
+-      counter += delta;
+-      if (counter < 0) {
+-              ASSERT(0);
+-              return -EINVAL;
+-      }
+-      *field = counter;
+-      return 0;
+-}
+-
+ /*
+- * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
+- * and apply superblock counter changes to the in-core superblock.  The
++ * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations 
and
++ * apply superblock counter changes to the in-core superblock.  The
+  * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
+  * applied to the in-core superblock.  The idea is that that has already been
+  * done.
+@@ -627,20 +579,17 @@ xfs_trans_unreserve_and_mod_sb(
+       /* apply the per-cpu counters */
+       if (blkdelta) {
+               error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
+-              if (error)
+-                      goto out;
++              ASSERT(!error);
+       }
+ 
+       if (idelta) {
+               error = xfs_mod_icount(mp, idelta);
+-              if (error)
+-                      goto out_undo_fdblocks;
++              ASSERT(!error);
+       }
+ 
+       if (ifreedelta) {
+               error = xfs_mod_ifree(mp, ifreedelta);
+-              if (error)
+-                      goto out_undo_icount;
++              ASSERT(!error);
+       }
+ 
+       if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
+@@ -648,95 +597,23 @@ xfs_trans_unreserve_and_mod_sb(
+ 
+       /* apply remaining deltas */
+       spin_lock(&mp->m_sb_lock);
+-      if (rtxdelta) {
+-              error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
+-              if (error)
+-                      goto out_undo_ifree;
+-      }
+-
+-      if (tp->t_dblocks_delta != 0) {
+-              error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
+-              if (error)
+-                      goto out_undo_frextents;
+-      }
+-      if (tp->t_agcount_delta != 0) {
+-              error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
+-              if (error)
+-                      goto out_undo_dblocks;
+-      }
+-      if (tp->t_imaxpct_delta != 0) {
+-              error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
+-              if (error)
+-                      goto out_undo_agcount;
+-      }
+-      if (tp->t_rextsize_delta != 0) {
+-              error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
+-                                   tp->t_rextsize_delta);
+-              if (error)
+-                      goto out_undo_imaxpct;
+-      }
+-      if (tp->t_rbmblocks_delta != 0) {
+-              error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
+-                                   tp->t_rbmblocks_delta);
+-              if (error)
+-                      goto out_undo_rextsize;
+-      }
+-      if (tp->t_rblocks_delta != 0) {
+-              error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
+-              if (error)
+-                      goto out_undo_rbmblocks;
+-      }
+-      if (tp->t_rextents_delta != 0) {
+-              error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
+-                                   tp->t_rextents_delta);
+-              if (error)
+-                      goto out_undo_rblocks;
+-      }
+-      if (tp->t_rextslog_delta != 0) {
+-              error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
+-                                   tp->t_rextslog_delta);
+-              if (error)
+-                      goto out_undo_rextents;
+-      }
++      mp->m_sb.sb_frextents += rtxdelta;
++      mp->m_sb.sb_dblocks += tp->t_dblocks_delta;
++      mp->m_sb.sb_agcount += tp->t_agcount_delta;
++      mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta;
++      mp->m_sb.sb_rextsize += tp->t_rextsize_delta;
++      mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta;
++      mp->m_sb.sb_rblocks += tp->t_rblocks_delta;
++      mp->m_sb.sb_rextents += tp->t_rextents_delta;
++      mp->m_sb.sb_rextslog += tp->t_rextslog_delta;
+       spin_unlock(&mp->m_sb_lock);
+-      return;
+ 
+-out_undo_rextents:
+-      if (tp->t_rextents_delta)
+-              xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
+-out_undo_rblocks:
+-      if (tp->t_rblocks_delta)
+-              xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
+-out_undo_rbmblocks:
+-      if (tp->t_rbmblocks_delta)
+-              xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
+-out_undo_rextsize:
+-      if (tp->t_rextsize_delta)
+-              xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
+-out_undo_imaxpct:
+-      if (tp->t_rextsize_delta)
+-              xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
+-out_undo_agcount:
+-      if (tp->t_agcount_delta)
+-              xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
+-out_undo_dblocks:
+-      if (tp->t_dblocks_delta)
+-              xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
+-out_undo_frextents:
+-      if (rtxdelta)
+-              xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
+-out_undo_ifree:
+-      spin_unlock(&mp->m_sb_lock);
+-      if (ifreedelta)
+-              xfs_mod_ifree(mp, -ifreedelta);
+-out_undo_icount:
+-      if (idelta)
+-              xfs_mod_icount(mp, -idelta);
+-out_undo_fdblocks:
+-      if (blkdelta)
+-              xfs_mod_fdblocks(mp, -blkdelta, rsvd);
+-out:
+-      ASSERT(error == 0);
++      /*
++       * Debug checks outside of the spinlock so they don't lock up the
++       * machine if they fail.
++       */
++      ASSERT(mp->m_sb.sb_imax_pct >= 0);
++      ASSERT(mp->m_sb.sb_rextslog >= 0);
+       return;
+ }
+ 
+diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
+index 2a85c393cb710..c1238a2dbd6a5 100644
+--- a/fs/xfs/xfs_trans_dquot.c
++++ b/fs/xfs/xfs_trans_dquot.c
+@@ -756,7 +756,8 @@ xfs_trans_reserve_quota_bydquots(
+       }
+ 
+       if (gdqp) {
+-              error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags);
++              error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos,
++                                      (flags & ~XFS_QMOPT_ENOSPC));
+               if (error)
+                       goto unwind_usr;
+       }
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index f9b9f9a2fd4a5..880cd86c829df 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -1715,7 +1715,7 @@ efi_status_t efi_exit_boot_services(efi_system_table_t 
*sys_table,
+                                   void *priv,
+                                   efi_exit_boot_map_processing priv_func);
+ 
+-#define EFI_RANDOM_SEED_SIZE          64U
++#define EFI_RANDOM_SEED_SIZE          32U // BLAKE2S_HASH_SIZE
+ 
+ struct linux_efi_random_seed {
+       u32     size;
+diff --git a/include/net/protocol.h b/include/net/protocol.h
+index 2b778e1d2d8f1..0fd2df844fc71 100644
+--- a/include/net/protocol.h
++++ b/include/net/protocol.h
+@@ -35,8 +35,6 @@
+ 
+ /* This is used to register protocols. */
+ struct net_protocol {
+-      int                     (*early_demux)(struct sk_buff *skb);
+-      int                     (*early_demux_handler)(struct sk_buff *skb);
+       int                     (*handler)(struct sk_buff *skb);
+ 
+       /* This returns an error if we weren't able to handle the error. */
+@@ -53,8 +51,6 @@ struct net_protocol {
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_protocol {
+-      void    (*early_demux)(struct sk_buff *skb);
+-      void    (*early_demux_handler)(struct sk_buff *skb);
+       int     (*handler)(struct sk_buff *skb);
+ 
+       /* This returns an error if we weren't able to handle the error. */
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 5b2473a08241a..077feeca6c99e 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -922,6 +922,8 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb)
+ #endif
+       return 0;
+ }
++
++void tcp_v6_early_demux(struct sk_buff *skb);
+ #endif
+ 
+ static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
+diff --git a/include/net/udp.h b/include/net/udp.h
+index e66854e767dcc..bbd607fb939a6 100644
+--- a/include/net/udp.h
++++ b/include/net/udp.h
+@@ -169,6 +169,7 @@ typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, 
__be16 sport,
+ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
+                               struct udphdr *uh, udp_lookup_t lookup);
+ int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
++void udp_v6_early_demux(struct sk_buff *skb);
+ 
+ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
+                                 netdev_features_t features);
+diff --git a/ipc/msg.c b/ipc/msg.c
+index 767587ab45a39..46a870e31e250 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -137,7 +137,7 @@ static int newque(struct ipc_namespace *ns, struct 
ipc_params *params)
+       key_t key = params->key;
+       int msgflg = params->flg;
+ 
+-      msq = kvmalloc(sizeof(*msq), GFP_KERNEL);
++      msq = kvmalloc(sizeof(*msq), GFP_KERNEL_ACCOUNT);
+       if (unlikely(!msq))
+               return -ENOMEM;
+ 
+diff --git a/ipc/sem.c b/ipc/sem.c
+index fe12ea8dd2b3d..bd907ed2ce00a 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -492,7 +492,7 @@ static struct sem_array *sem_alloc(size_t nsems)
+       if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
+               return NULL;
+ 
+-      sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL);
++      sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL_ACCOUNT);
+       if (unlikely(!sma))
+               return NULL;
+ 
+@@ -1835,7 +1835,7 @@ static inline int get_undo_list(struct sem_undo_list 
**undo_listp)
+ 
+       undo_list = current->sysvsem.undo_list;
+       if (!undo_list) {
+-              undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
++              undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL_ACCOUNT);
+               if (undo_list == NULL)
+                       return -ENOMEM;
+               spin_lock_init(&undo_list->lock);
+@@ -1920,7 +1920,7 @@ static struct sem_undo *find_alloc_undo(struct 
ipc_namespace *ns, int semid)
+       rcu_read_unlock();
+ 
+       /* step 2: allocate new undo structure */
+-      new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, 
GFP_KERNEL);
++      new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, 
GFP_KERNEL_ACCOUNT);
+       if (!new) {
+               ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
+               return ERR_PTR(-ENOMEM);
+diff --git a/ipc/shm.c b/ipc/shm.c
+index 984addb5aeb5e..0145767da1c12 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -711,7 +711,7 @@ static int newseg(struct ipc_namespace *ns, struct 
ipc_params *params)
+                       ns->shm_tot + numpages > ns->shm_ctlall)
+               return -ENOSPC;
+ 
+-      shp = kvmalloc(sizeof(*shp), GFP_KERNEL);
++      shp = kvmalloc(sizeof(*shp), GFP_KERNEL_ACCOUNT);
+       if (unlikely(!shp))
+               return -ENOMEM;
+ 
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 6e9f5a10e04ae..f8ea8cf694c6c 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -2199,8 +2199,11 @@ int enable_kprobe(struct kprobe *kp)
+       if (!kprobes_all_disarmed && kprobe_disabled(p)) {
+               p->flags &= ~KPROBE_FLAG_DISABLED;
+               ret = arm_kprobe(p);
+-              if (ret)
++              if (ret) {
+                       p->flags |= KPROBE_FLAG_DISABLED;
++                      if (p != kp)
++                              kp->flags |= KPROBE_FLAG_DISABLED;
++              }
+       }
+ out:
+       mutex_unlock(&kprobe_mutex);
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 2d28b4e49b7a3..c0cdc28e1d1ed 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -3560,7 +3560,8 @@ done:
+                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+                                          sizeof(rfc), (unsigned long) &rfc, 
endptr - ptr);
+ 
+-                      if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
++                      if (remote_efs &&
++                          test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+                               chan->remote_id = efs.id;
+                               chan->remote_stype = efs.stype;
+                               chan->remote_msdu = le16_to_cpu(efs.msdu);
+@@ -6273,6 +6274,7 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
+                              struct l2cap_ctrl *control,
+                              struct sk_buff *skb, u8 event)
+ {
++      struct l2cap_ctrl local_control;
+       int err = 0;
+       bool skb_in_use = false;
+ 
+@@ -6297,15 +6299,32 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
+                       chan->buffer_seq = chan->expected_tx_seq;
+                       skb_in_use = true;
+ 
++                      /* l2cap_reassemble_sdu may free skb, hence invalidate
++                       * control, so make a copy in advance to use it after
++                       * l2cap_reassemble_sdu returns and to avoid the race
++                       * condition, for example:
++                       *
++                       * The current thread calls:
++                       *   l2cap_reassemble_sdu
++                       *     chan->ops->recv == l2cap_sock_recv_cb
++                       *       __sock_queue_rcv_skb
++                       * Another thread calls:
++                       *   bt_sock_recvmsg
++                       *     skb_recv_datagram
++                       *     skb_free_datagram
++                       * Then the current thread tries to access control, but
++                       * it was freed by skb_free_datagram.
++                       */
++                      local_control = *control;
+                       err = l2cap_reassemble_sdu(chan, skb, control);
+                       if (err)
+                               break;
+ 
+-                      if (control->final) {
++                      if (local_control.final) {
+                               if (!test_and_clear_bit(CONN_REJ_ACT,
+                                                       &chan->conn_state)) {
+-                                      control->final = 0;
+-                                      l2cap_retransmit_all(chan, control);
++                                      local_control.final = 0;
++                                      l2cap_retransmit_all(chan, 
&local_control);
+                                       l2cap_ertm_send(chan);
+                               }
+                       }
+@@ -6685,11 +6704,27 @@ static int l2cap_rx(struct l2cap_chan *chan, struct 
l2cap_ctrl *control,
+ static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl 
*control,
+                          struct sk_buff *skb)
+ {
++      /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
++       * the txseq field in advance to use it after l2cap_reassemble_sdu
++       * returns and to avoid the race condition, for example:
++       *
++       * The current thread calls:
++       *   l2cap_reassemble_sdu
++       *     chan->ops->recv == l2cap_sock_recv_cb
++       *       __sock_queue_rcv_skb
++       * Another thread calls:
++       *   bt_sock_recvmsg
++       *     skb_recv_datagram
++       *     skb_free_datagram
++       * Then the current thread tries to access control, but it was freed by
++       * skb_free_datagram.
++       */
++      u16 txseq = control->txseq;
++
+       BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
+              chan->rx_state);
+ 
+-      if (l2cap_classify_txseq(chan, control->txseq) ==
+-          L2CAP_TXSEQ_EXPECTED) {
++      if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
+               l2cap_pass_to_tx(chan, control);
+ 
+               BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
+@@ -6712,8 +6747,8 @@ static int l2cap_stream_rx(struct l2cap_chan *chan, 
struct l2cap_ctrl *control,
+               }
+       }
+ 
+-      chan->last_acked_seq = control->txseq;
+-      chan->expected_tx_seq = __next_seq(chan, control->txseq);
++      chan->last_acked_seq = txseq;
++      chan->expected_tx_seq = __next_seq(chan, txseq);
+ 
+       return 0;
+ }
+@@ -6967,6 +7002,7 @@ static void l2cap_data_channel(struct l2cap_conn *conn, 
u16 cid,
+                               return;
+                       }
+ 
++                      l2cap_chan_hold(chan);
+                       l2cap_chan_lock(chan);
+               } else {
+                       BT_DBG("unknown cid 0x%4.4x", cid);
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index aa81aead0a654..67820219e3b60 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -373,7 +373,7 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct 
net_device *dev,
+       write_lock_bh(&tbl->lock);
+       neigh_flush_dev(tbl, dev, skip_perm);
+       pneigh_ifdown_and_unlock(tbl, dev);
+-      pneigh_queue_purge(&tbl->proxy_queue, dev_net(dev));
++      pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL);
+       if (skb_queue_empty_lockless(&tbl->proxy_queue))
+               del_timer_sync(&tbl->proxy_timer);
+       return 0;
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 058dbcb90541e..3c6412cb4b486 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1702,12 +1702,7 @@ static const struct net_protocol igmp_protocol = {
+ };
+ #endif
+ 
+-/* thinking of making this const? Don't.
+- * early_demux can change based on sysctl.
+- */
+-static struct net_protocol tcp_protocol = {
+-      .early_demux    =       tcp_v4_early_demux,
+-      .early_demux_handler =  tcp_v4_early_demux,
++static const struct net_protocol tcp_protocol = {
+       .handler        =       tcp_v4_rcv,
+       .err_handler    =       tcp_v4_err,
+       .no_policy      =       1,
+@@ -1715,12 +1710,7 @@ static struct net_protocol tcp_protocol = {
+       .icmp_strict_tag_validation = 1,
+ };
+ 
+-/* thinking of making this const? Don't.
+- * early_demux can change based on sysctl.
+- */
+-static struct net_protocol udp_protocol = {
+-      .early_demux =  udp_v4_early_demux,
+-      .early_demux_handler =  udp_v4_early_demux,
++static const struct net_protocol udp_protocol = {
+       .handler =      udp_rcv,
+       .err_handler =  udp_err,
+       .no_policy =    1,
+diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
+index c59a78a267c37..1464e27382112 100644
+--- a/net/ipv4/ip_input.c
++++ b/net/ipv4/ip_input.c
+@@ -302,31 +302,38 @@ drop:
+       return true;
+ }
+ 
+-INDIRECT_CALLABLE_DECLARE(int udp_v4_early_demux(struct sk_buff *));
+-INDIRECT_CALLABLE_DECLARE(int tcp_v4_early_demux(struct sk_buff *));
++int udp_v4_early_demux(struct sk_buff *);
++int tcp_v4_early_demux(struct sk_buff *);
+ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
+                             struct sk_buff *skb, struct net_device *dev)
+ {
+       const struct iphdr *iph = ip_hdr(skb);
+-      int (*edemux)(struct sk_buff *skb);
+       struct rtable *rt;
+       int err;
+ 
+-      if (net->ipv4.sysctl_ip_early_demux &&
++      if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) &&
+           !skb_dst(skb) &&
+           !skb->sk &&
+           !ip_is_fragment(iph)) {
+-              const struct net_protocol *ipprot;
+-              int protocol = iph->protocol;
+-
+-              ipprot = rcu_dereference(inet_protos[protocol]);
+-              if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) {
+-                      err = INDIRECT_CALL_2(edemux, tcp_v4_early_demux,
+-                                            udp_v4_early_demux, skb);
+-                      if (unlikely(err))
+-                              goto drop_error;
+-                      /* must reload iph, skb->head might have changed */
+-                      iph = ip_hdr(skb);
++              switch (iph->protocol) {
++              case IPPROTO_TCP:
++                      if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux)) {
++                              tcp_v4_early_demux(skb);
++
++                              /* must reload iph, skb->head might have 
changed */
++                              iph = ip_hdr(skb);
++                      }
++                      break;
++              case IPPROTO_UDP:
++                      if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) {
++                              err = udp_v4_early_demux(skb);
++                              if (unlikely(err))
++                                      goto drop_error;
++
++                              /* must reload iph, skb->head might have 
changed */
++                              iph = ip_hdr(skb);
++                      }
++                      break;
+               }
+       }
+ 
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index c83a5d05aeaaf..4d4dba1d42ae1 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -363,61 +363,6 @@ bad_key:
+       return ret;
+ }
+ 
+-static void proc_configure_early_demux(int enabled, int protocol)
+-{
+-      struct net_protocol *ipprot;
+-#if IS_ENABLED(CONFIG_IPV6)
+-      struct inet6_protocol *ip6prot;
+-#endif
+-
+-      rcu_read_lock();
+-
+-      ipprot = rcu_dereference(inet_protos[protocol]);
+-      if (ipprot)
+-              ipprot->early_demux = enabled ? ipprot->early_demux_handler :
+-                                              NULL;
+-
+-#if IS_ENABLED(CONFIG_IPV6)
+-      ip6prot = rcu_dereference(inet6_protos[protocol]);
+-      if (ip6prot)
+-              ip6prot->early_demux = enabled ? ip6prot->early_demux_handler :
+-                                               NULL;
+-#endif
+-      rcu_read_unlock();
+-}
+-
+-static int proc_tcp_early_demux(struct ctl_table *table, int write,
+-                              void __user *buffer, size_t *lenp, loff_t *ppos)
+-{
+-      int ret = 0;
+-
+-      ret = proc_dointvec(table, write, buffer, lenp, ppos);
+-
+-      if (write && !ret) {
+-              int enabled = init_net.ipv4.sysctl_tcp_early_demux;
+-
+-              proc_configure_early_demux(enabled, IPPROTO_TCP);
+-      }
+-
+-      return ret;
+-}
+-
+-static int proc_udp_early_demux(struct ctl_table *table, int write,
+-                              void __user *buffer, size_t *lenp, loff_t *ppos)
+-{
+-      int ret = 0;
+-
+-      ret = proc_dointvec(table, write, buffer, lenp, ppos);
+-
+-      if (write && !ret) {
+-              int enabled = init_net.ipv4.sysctl_udp_early_demux;
+-
+-              proc_configure_early_demux(enabled, IPPROTO_UDP);
+-      }
+-
+-      return ret;
+-}
+-
+ static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table,
+                                            int write,
+                                            void __user *buffer,
+@@ -701,14 +646,14 @@ static struct ctl_table ipv4_net_table[] = {
+               .data           = &init_net.ipv4.sysctl_udp_early_demux,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+-              .proc_handler   = proc_udp_early_demux
++              .proc_handler   = proc_douintvec_minmax,
+       },
+       {
+               .procname       = "tcp_early_demux",
+               .data           = &init_net.ipv4.sysctl_tcp_early_demux,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+-              .proc_handler   = proc_tcp_early_demux
++              .proc_handler   = proc_douintvec_minmax,
+       },
+       {
+               .procname       = "ip_default_ttl",
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index e6c4966aa956c..ebf90bce063ad 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -44,21 +44,25 @@
+ #include <net/inet_ecn.h>
+ #include <net/dst_metadata.h>
+ 
+-INDIRECT_CALLABLE_DECLARE(void udp_v6_early_demux(struct sk_buff *));
+-INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *));
++void udp_v6_early_demux(struct sk_buff *);
++void tcp_v6_early_demux(struct sk_buff *);
+ static void ip6_rcv_finish_core(struct net *net, struct sock *sk,
+                               struct sk_buff *skb)
+ {
+-      void (*edemux)(struct sk_buff *skb);
+-
+-      if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == 
NULL) {
+-              const struct inet6_protocol *ipprot;
+-
+-              ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]);
+-              if (ipprot && (edemux = READ_ONCE(ipprot->early_demux)))
+-                      INDIRECT_CALL_2(edemux, tcp_v6_early_demux,
+-                                      udp_v6_early_demux, skb);
++      if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) &&
++          !skb_dst(skb) && !skb->sk) {
++              switch (ipv6_hdr(skb)->nexthdr) {
++              case IPPROTO_TCP:
++                      if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux))
++                              tcp_v6_early_demux(skb);
++                      break;
++              case IPPROTO_UDP:
++                      if (READ_ONCE(net->ipv4.sysctl_udp_early_demux))
++                              udp_v6_early_demux(skb);
++                      break;
++              }
+       }
++
+       if (!skb_valid_dst(skb))
+               ip6_route_input(skb);
+ }
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index 5352c7e68c42c..1d7fad8269e60 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -164,6 +164,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, 
int optname,
+               rtnl_lock();
+       lock_sock(sk);
+ 
++      /* Another thread has converted the socket into IPv4 with
++       * IPV6_ADDRFORM concurrently.
++       */
++      if (unlikely(sk->sk_family != AF_INET6))
++              goto unlock;
++
+       switch (optname) {
+ 
+       case IPV6_ADDRFORM:
+@@ -924,6 +930,7 @@ pref_skip_coa:
+               break;
+       }
+ 
++unlock:
+       release_sock(sk);
+       if (needs_rtnl)
+               rtnl_unlock();
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 00732ee6bbd8a..badfe69396387 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -6359,10 +6359,16 @@ static void __net_exit ip6_route_net_exit(struct net 
*net)
+ static int __net_init ip6_route_net_init_late(struct net *net)
+ {
+ #ifdef CONFIG_PROC_FS
+-      proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops,
+-                      sizeof(struct ipv6_route_iter));
+-      proc_create_net_single("rt6_stats", 0444, net->proc_net,
+-                      rt6_stats_seq_show, NULL);
++      if (!proc_create_net("ipv6_route", 0, net->proc_net,
++                           &ipv6_route_seq_ops,
++                           sizeof(struct ipv6_route_iter)))
++              return -ENOMEM;
++
++      if (!proc_create_net_single("rt6_stats", 0444, net->proc_net,
++                                  rt6_stats_seq_show, NULL)) {
++              remove_proc_entry("ipv6_route", net->proc_net);
++              return -ENOMEM;
++      }
+ #endif
+       return 0;
+ }
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 397c4597c4389..831f779aba7b0 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1729,7 +1729,7 @@ do_time_wait:
+       goto discard_it;
+ }
+ 
+-INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
++void tcp_v6_early_demux(struct sk_buff *skb)
+ {
+       const struct ipv6hdr *hdr;
+       const struct tcphdr *th;
+@@ -2084,12 +2084,7 @@ struct proto tcpv6_prot = {
+       .diag_destroy           = tcp_abort,
+ };
+ 
+-/* thinking of making this const? Don't.
+- * early_demux can change based on sysctl.
+- */
+-static struct inet6_protocol tcpv6_protocol = {
+-      .early_demux    =       tcp_v6_early_demux,
+-      .early_demux_handler =  tcp_v6_early_demux,
++static const struct inet6_protocol tcpv6_protocol = {
+       .handler        =       tcp_v6_rcv,
+       .err_handler    =       tcp_v6_err,
+       .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 62c0db6df5630..fd1ce0405b7ea 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -973,7 +973,7 @@ static struct sock *__udp6_lib_demux_lookup(struct net 
*net,
+       return NULL;
+ }
+ 
+-INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb)
++void udp_v6_early_demux(struct sk_buff *skb)
+ {
+       struct net *net = dev_net(skb->dev);
+       const struct udphdr *uh;
+@@ -1603,12 +1603,7 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, 
int optname,
+ }
+ #endif
+ 
+-/* thinking of making this const? Don't.
+- * early_demux can change based on sysctl.
+- */
+-static struct inet6_protocol udpv6_protocol = {
+-      .early_demux    =       udp_v6_early_demux,
+-      .early_demux_handler =  udp_v6_early_demux,
++static const struct inet6_protocol udpv6_protocol = {
+       .handler        =       udpv6_rcv,
+       .err_handler    =       udpv6_err,
+       .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
+diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
+index f9b16f2b22191..fdacbc3c15bef 100644
+--- a/net/netfilter/ipvs/ip_vs_app.c
++++ b/net/netfilter/ipvs/ip_vs_app.c
+@@ -599,13 +599,19 @@ static const struct seq_operations ip_vs_app_seq_ops = {
+ int __net_init ip_vs_app_net_init(struct netns_ipvs *ipvs)
+ {
+       INIT_LIST_HEAD(&ipvs->app_list);
+-      proc_create_net("ip_vs_app", 0, ipvs->net->proc_net, &ip_vs_app_seq_ops,
+-                      sizeof(struct seq_net_private));
++#ifdef CONFIG_PROC_FS
++      if (!proc_create_net("ip_vs_app", 0, ipvs->net->proc_net,
++                           &ip_vs_app_seq_ops,
++                           sizeof(struct seq_net_private)))
++              return -ENOMEM;
++#endif
+       return 0;
+ }
+ 
+ void __net_exit ip_vs_app_net_cleanup(struct netns_ipvs *ipvs)
+ {
+       unregister_ip_vs_app(ipvs, NULL /* all */);
++#ifdef CONFIG_PROC_FS
+       remove_proc_entry("ip_vs_app", ipvs->net->proc_net);
++#endif
+ }
+diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
+index a189079a6ea50..d66548d2e5de4 100644
+--- a/net/netfilter/ipvs/ip_vs_conn.c
++++ b/net/netfilter/ipvs/ip_vs_conn.c
+@@ -1225,8 +1225,8 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
+        * The drop rate array needs tuning for real environments.
+        * Called from timer bh only => no locking
+        */
+-      static const char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
+-      static char todrop_counter[9] = {0};
++      static const signed char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
++      static signed char todrop_counter[9] = {0};
+       int i;
+ 
+       /* if the conn entry hasn't lasted for 60 seconds, don't drop it.
+@@ -1373,20 +1373,36 @@ int __net_init ip_vs_conn_net_init(struct netns_ipvs 
*ipvs)
+ {
+       atomic_set(&ipvs->conn_count, 0);
+ 
+-      proc_create_net("ip_vs_conn", 0, ipvs->net->proc_net,
+-                      &ip_vs_conn_seq_ops, sizeof(struct ip_vs_iter_state));
+-      proc_create_net("ip_vs_conn_sync", 0, ipvs->net->proc_net,
+-                      &ip_vs_conn_sync_seq_ops,
+-                      sizeof(struct ip_vs_iter_state));
++#ifdef CONFIG_PROC_FS
++      if (!proc_create_net("ip_vs_conn", 0, ipvs->net->proc_net,
++                           &ip_vs_conn_seq_ops,
++                           sizeof(struct ip_vs_iter_state)))
++              goto err_conn;
++
++      if (!proc_create_net("ip_vs_conn_sync", 0, ipvs->net->proc_net,
++                           &ip_vs_conn_sync_seq_ops,
++                           sizeof(struct ip_vs_iter_state)))
++              goto err_conn_sync;
++#endif
++
+       return 0;
++
++#ifdef CONFIG_PROC_FS
++err_conn_sync:
++      remove_proc_entry("ip_vs_conn", ipvs->net->proc_net);
++err_conn:
++      return -ENOMEM;
++#endif
+ }
+ 
+ void __net_exit ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs)
+ {
+       /* flush all the connection entries first */
+       ip_vs_conn_flush(ipvs);
++#ifdef CONFIG_PROC_FS
+       remove_proc_entry("ip_vs_conn", ipvs->net->proc_net);
+       remove_proc_entry("ip_vs_conn_sync", ipvs->net->proc_net);
++#endif
+ }
+ 
+ int __init ip_vs_conn_init(void)
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index f9cecd30f1bad..140c24f1b6c60 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -6576,9 +6576,6 @@ static void nft_commit_release(struct nft_trans *trans)
+               nf_tables_chain_destroy(&trans->ctx);
+               break;
+       case NFT_MSG_DELRULE:
+-              if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
+-                      nft_flow_rule_destroy(nft_trans_flow_rule(trans));
+-
+               nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
+               break;
+       case NFT_MSG_DELSET:
+@@ -6913,6 +6910,9 @@ static int nf_tables_commit(struct net *net, struct 
sk_buff *skb)
+                       nft_rule_expr_deactivate(&trans->ctx,
+                                                nft_trans_rule(trans),
+                                                NFT_TRANS_COMMIT);
++
++                      if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
++                              
nft_flow_rule_destroy(nft_trans_flow_rule(trans));
+                       break;
+               case NFT_MSG_NEWSET:
+                       nft_clear(net, nft_trans_set(trans));
+diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
+index f6102e6f51617..730d2205f1976 100644
+--- a/net/rose/rose_link.c
++++ b/net/rose/rose_link.c
+@@ -236,6 +236,9 @@ void rose_transmit_clear_request(struct rose_neigh *neigh, 
unsigned int lci, uns
+       unsigned char *dptr;
+       int len;
+ 
++      if (!neigh->dev)
++              return;
++
+       len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 3;
+ 
+       if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
+diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
+index 7741f102be4a0..476853ff69894 100644
+--- a/net/sched/sch_red.c
++++ b/net/sched/sch_red.c
+@@ -59,6 +59,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
+ {
+       struct red_sched_data *q = qdisc_priv(sch);
+       struct Qdisc *child = q->qdisc;
++      unsigned int len;
+       int ret;
+ 
+       q->vars.qavg = red_calc_qavg(&q->parms,
+@@ -94,9 +95,10 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
+               break;
+       }
+ 
++      len = qdisc_pkt_len(skb);
+       ret = qdisc_enqueue(skb, child, to_free);
+       if (likely(ret == NET_XMIT_SUCCESS)) {
+-              qdisc_qstats_backlog_inc(sch, skb);
++              sch->qstats.backlog += len;
+               sch->q.qlen++;
+       } else if (net_xmit_drop_count(ret)) {
+               q->stats.pdrop++;
+diff --git a/security/commoncap.c b/security/commoncap.c
+index 1c70d11491863..d1890a6e64750 100644
+--- a/security/commoncap.c
++++ b/security/commoncap.c
+@@ -391,8 +391,10 @@ int cap_inode_getsecurity(struct inode *inode, const char 
*name, void **buffer,
+                                &tmpbuf, size, GFP_NOFS);
+       dput(dentry);
+ 
+-      if (ret < 0 || !tmpbuf)
+-              return ret;
++      if (ret < 0 || !tmpbuf) {
++              size = ret;
++              goto out_free;
++      }
+ 
+       fs_ns = inode->i_sb->s_user_ns;
+       cap = (struct vfs_cap_data *) tmpbuf;
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index c29ccdf9e8bca..5f44b142a8b0b 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -3759,6 +3759,64 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 
Creator */
+       }
+ },
+ 
++/*
++ * MacroSilicon MS2100/MS2106 based AV capture cards
++ *
++ * These claim 96kHz 1ch in the descriptors, but are actually 48kHz 2ch.
++ * They also need QUIRK_AUDIO_ALIGN_TRANSFER, which makes one wonder if
++ * they pretend to be 96kHz mono as a workaround for stereo being broken
++ * by that...
++ *
++ * They also have an issue with initial stream alignment that causes the
++ * channels to be swapped and out of phase, which is dealt with in quirks.c.
++ */
++{
++      .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
++                     USB_DEVICE_ID_MATCH_INT_CLASS |
++                     USB_DEVICE_ID_MATCH_INT_SUBCLASS,
++      .idVendor = 0x534d,
++      .idProduct = 0x0021,
++      .bInterfaceClass = USB_CLASS_AUDIO,
++      .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
++      .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++              .vendor_name = "MacroSilicon",
++              .product_name = "MS210x",
++              .ifnum = QUIRK_ANY_INTERFACE,
++              .type = QUIRK_COMPOSITE,
++              .data = &(const struct snd_usb_audio_quirk[]) {
++                      {
++                              .ifnum = 2,
++                              .type = QUIRK_AUDIO_ALIGN_TRANSFER,
++                      },
++                      {
++                              .ifnum = 2,
++                              .type = QUIRK_AUDIO_STANDARD_MIXER,
++                      },
++                      {
++                              .ifnum = 3,
++                              .type = QUIRK_AUDIO_FIXED_ENDPOINT,
++                              .data = &(const struct audioformat) {
++                                      .formats = SNDRV_PCM_FMTBIT_S16_LE,
++                                      .channels = 2,
++                                      .iface = 3,
++                                      .altsetting = 1,
++                                      .altset_idx = 1,
++                                      .attributes = 0,
++                                      .endpoint = 0x82,
++                                      .ep_attr = USB_ENDPOINT_XFER_ISOC |
++                                              USB_ENDPOINT_SYNC_ASYNC,
++                                      .rates = SNDRV_PCM_RATE_CONTINUOUS,
++                                      .rate_min = 48000,
++                                      .rate_max = 48000,
++                              }
++                      },
++                      {
++                              .ifnum = -1
++                      }
++              }
++      }
++},
++
+ /*
+  * MacroSilicon MS2109 based HDMI capture cards
+  *
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 72223545abfd8..d61f95dc1abf7 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1440,6 +1440,7 @@ void snd_usb_set_format_quirk(struct snd_usb_substream 
*subs,
+       case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */
+               set_format_emu_quirk(subs, fmt);
+               break;
++      case USB_ID(0x534d, 0x0021): /* MacroSilicon MS2100/MS2106 */
+       case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
+               subs->stream_offset_adj = 2;
+               break;
+diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h
+index b8cecb66d28b7..c20d2fe7cebaa 100644
+--- a/tools/include/nolibc/nolibc.h
++++ b/tools/include/nolibc/nolibc.h
+@@ -2318,9 +2318,9 @@ static __attribute__((unused))
+ int memcmp(const void *s1, const void *s2, size_t n)
+ {
+       size_t ofs = 0;
+-      char c1 = 0;
++      int c1 = 0;
+ 
+-      while (ofs < n && !(c1 = ((char *)s1)[ofs] - ((char *)s2)[ofs])) {
++      while (ofs < n && !(c1 = ((unsigned char *)s1)[ofs] - ((unsigned char 
*)s2)[ofs])) {
+               ofs++;
+       }
+       return c1;

Reply via email to