commit:     b664604626feb273070cdeb2b09618299ca4b519
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Sep  3 11:21:04 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Sep  3 11:21:04 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b6646046

Linux patch 5.4.144

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1143_linux-5.4.144.patch | 1738 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1742 insertions(+)

diff --git a/0000_README b/0000_README
index ff37777..856bee4 100644
--- a/0000_README
+++ b/0000_README
@@ -615,6 +615,10 @@ Patch:  1142_linux-5.4.143.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.4.143
 
+Patch:  1143_linux-5.4.144.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.4.144
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1143_linux-5.4.144.patch b/1143_linux-5.4.144.patch
new file mode 100644
index 0000000..9bcf3a4
--- /dev/null
+++ b/1143_linux-5.4.144.patch
@@ -0,0 +1,1738 @@
+diff --git a/Makefile b/Makefile
+index e99fabc4dfc8c..3c3804197b511 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 143
++SUBLEVEL = 144
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
+index 6c693a9d29b6d..0391b8293ad85 100644
+--- a/arch/arc/kernel/vmlinux.lds.S
++++ b/arch/arc/kernel/vmlinux.lds.S
+@@ -88,6 +88,8 @@ SECTIONS
+               CPUIDLE_TEXT
+               LOCK_TEXT
+               KPROBES_TEXT
++              IRQENTRY_TEXT
++              SOFTIRQENTRY_TEXT
+               *(.fixup)
+               *(.gnu.warning)
+       }
+diff --git a/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts 
b/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts
+index a5f9a6ab512c4..9b989cc30edc8 100644
+--- a/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts
++++ b/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts
+@@ -30,3 +30,7 @@
+               };
+       };
+ };
++
++&msmgpio {
++      gpio-reserved-ranges = <85 4>;
++};
+diff --git a/arch/parisc/include/asm/string.h 
b/arch/parisc/include/asm/string.h
+index 4a0c9dbd62fd0..f6e1132f4e352 100644
+--- a/arch/parisc/include/asm/string.h
++++ b/arch/parisc/include/asm/string.h
+@@ -8,19 +8,4 @@ extern void * memset(void *, int, size_t);
+ #define __HAVE_ARCH_MEMCPY
+ void * memcpy(void * dest,const void *src,size_t count);
+ 
+-#define __HAVE_ARCH_STRLEN
+-extern size_t strlen(const char *s);
+-
+-#define __HAVE_ARCH_STRCPY
+-extern char *strcpy(char *dest, const char *src);
+-
+-#define __HAVE_ARCH_STRNCPY
+-extern char *strncpy(char *dest, const char *src, size_t count);
+-
+-#define __HAVE_ARCH_STRCAT
+-extern char *strcat(char *dest, const char *src);
+-
+-#define __HAVE_ARCH_MEMSET
+-extern void *memset(void *, int, size_t);
+-
+ #endif
+diff --git a/arch/parisc/kernel/parisc_ksyms.c 
b/arch/parisc/kernel/parisc_ksyms.c
+index 8ed409ecec933..e8a6a751dfd8e 100644
+--- a/arch/parisc/kernel/parisc_ksyms.c
++++ b/arch/parisc/kernel/parisc_ksyms.c
+@@ -17,10 +17,6 @@
+ 
+ #include <linux/string.h>
+ EXPORT_SYMBOL(memset);
+-EXPORT_SYMBOL(strlen);
+-EXPORT_SYMBOL(strcpy);
+-EXPORT_SYMBOL(strncpy);
+-EXPORT_SYMBOL(strcat);
+ 
+ #include <linux/atomic.h>
+ EXPORT_SYMBOL(__xchg8);
+diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile
+index 2d7a9974dbaef..7b197667faf6c 100644
+--- a/arch/parisc/lib/Makefile
++++ b/arch/parisc/lib/Makefile
+@@ -3,7 +3,7 @@
+ # Makefile for parisc-specific library files
+ #
+ 
+-lib-y := lusercopy.o bitops.o checksum.o io.o memcpy.o \
+-         ucmpdi2.o delay.o string.o
++lib-y := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
++         ucmpdi2.o delay.o
+ 
+ obj-y := iomap.o
+diff --git a/arch/parisc/lib/memset.c b/arch/parisc/lib/memset.c
+new file mode 100644
+index 0000000000000..133e4809859a3
+--- /dev/null
++++ b/arch/parisc/lib/memset.c
+@@ -0,0 +1,72 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++#include <linux/types.h>
++#include <asm/string.h>
++
++#define OPSIZ (BITS_PER_LONG/8)
++typedef unsigned long op_t;
++
++void *
++memset (void *dstpp, int sc, size_t len)
++{
++  unsigned int c = sc;
++  long int dstp = (long int) dstpp;
++
++  if (len >= 8)
++    {
++      size_t xlen;
++      op_t cccc;
++
++      cccc = (unsigned char) c;
++      cccc |= cccc << 8;
++      cccc |= cccc << 16;
++      if (OPSIZ > 4)
++      /* Do the shift in two steps to avoid warning if long has 32 bits.  */
++      cccc |= (cccc << 16) << 16;
++
++      /* There are at least some bytes to set.
++       No need to test for LEN == 0 in this alignment loop.  */
++      while (dstp % OPSIZ != 0)
++      {
++        ((unsigned char *) dstp)[0] = c;
++        dstp += 1;
++        len -= 1;
++      }
++
++      /* Write 8 `op_t' per iteration until less than 8 `op_t' remain.  */
++      xlen = len / (OPSIZ * 8);
++      while (xlen > 0)
++      {
++        ((op_t *) dstp)[0] = cccc;
++        ((op_t *) dstp)[1] = cccc;
++        ((op_t *) dstp)[2] = cccc;
++        ((op_t *) dstp)[3] = cccc;
++        ((op_t *) dstp)[4] = cccc;
++        ((op_t *) dstp)[5] = cccc;
++        ((op_t *) dstp)[6] = cccc;
++        ((op_t *) dstp)[7] = cccc;
++        dstp += 8 * OPSIZ;
++        xlen -= 1;
++      }
++      len %= OPSIZ * 8;
++
++      /* Write 1 `op_t' per iteration until less than OPSIZ bytes remain.  */
++      xlen = len / OPSIZ;
++      while (xlen > 0)
++      {
++        ((op_t *) dstp)[0] = cccc;
++        dstp += OPSIZ;
++        xlen -= 1;
++      }
++      len %= OPSIZ;
++    }
++
++  /* Write the last few bytes.  */
++  while (len > 0)
++    {
++      ((unsigned char *) dstp)[0] = c;
++      dstp += 1;
++      len -= 1;
++    }
++
++  return dstpp;
++}
+diff --git a/arch/parisc/lib/string.S b/arch/parisc/lib/string.S
+deleted file mode 100644
+index 4a64264427a63..0000000000000
+--- a/arch/parisc/lib/string.S
++++ /dev/null
+@@ -1,136 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- *    PA-RISC assembly string functions
+- *
+- *    Copyright (C) 2019 Helge Deller <[email protected]>
+- */
+-
+-#include <asm/assembly.h>
+-#include <linux/linkage.h>
+-
+-      .section .text.hot
+-      .level PA_ASM_LEVEL
+-
+-      t0 = r20
+-      t1 = r21
+-      t2 = r22
+-
+-ENTRY_CFI(strlen, frame=0,no_calls)
+-      or,COND(<>) arg0,r0,ret0
+-      b,l,n   .Lstrlen_null_ptr,r0
+-      depwi   0,31,2,ret0
+-      cmpb,COND(<>) arg0,ret0,.Lstrlen_not_aligned
+-      ldw,ma  4(ret0),t0
+-      cmpib,tr 0,r0,.Lstrlen_loop
+-      uxor,nbz r0,t0,r0
+-.Lstrlen_not_aligned:
+-      uaddcm  arg0,ret0,t1
+-      shladd  t1,3,r0,t1
+-      mtsar   t1
+-      depwi   -1,%sar,32,t0
+-      uxor,nbz r0,t0,r0
+-.Lstrlen_loop:
+-      b,l,n   .Lstrlen_end_loop,r0
+-      ldw,ma  4(ret0),t0
+-      cmpib,tr 0,r0,.Lstrlen_loop
+-      uxor,nbz r0,t0,r0
+-.Lstrlen_end_loop:
+-      extrw,u,<> t0,7,8,r0
+-      addib,tr,n -3,ret0,.Lstrlen_out
+-      extrw,u,<> t0,15,8,r0
+-      addib,tr,n -2,ret0,.Lstrlen_out
+-      extrw,u,<> t0,23,8,r0
+-      addi    -1,ret0,ret0
+-.Lstrlen_out:
+-      bv r0(rp)
+-      uaddcm ret0,arg0,ret0
+-.Lstrlen_null_ptr:
+-      bv,n r0(rp)
+-ENDPROC_CFI(strlen)
+-
+-
+-ENTRY_CFI(strcpy, frame=0,no_calls)
+-      ldb     0(arg1),t0
+-      stb     t0,0(arg0)
+-      ldo     0(arg0),ret0
+-      ldo     1(arg1),t1
+-      cmpb,=  r0,t0,2f
+-      ldo     1(arg0),t2
+-1:    ldb     0(t1),arg1
+-      stb     arg1,0(t2)
+-      ldo     1(t1),t1
+-      cmpb,<> r0,arg1,1b
+-      ldo     1(t2),t2
+-2:    bv,n    r0(rp)
+-ENDPROC_CFI(strcpy)
+-
+-
+-ENTRY_CFI(strncpy, frame=0,no_calls)
+-      ldb     0(arg1),t0
+-      stb     t0,0(arg0)
+-      ldo     1(arg1),t1
+-      ldo     0(arg0),ret0
+-      cmpb,=  r0,t0,2f
+-      ldo     1(arg0),arg1
+-1:    ldo     -1(arg2),arg2
+-      cmpb,COND(=),n r0,arg2,2f
+-      ldb     0(t1),arg0
+-      stb     arg0,0(arg1)
+-      ldo     1(t1),t1
+-      cmpb,<> r0,arg0,1b
+-      ldo     1(arg1),arg1
+-2:    bv,n    r0(rp)
+-ENDPROC_CFI(strncpy)
+-
+-
+-ENTRY_CFI(strcat, frame=0,no_calls)
+-      ldb     0(arg0),t0
+-      cmpb,=  t0,r0,2f
+-      ldo     0(arg0),ret0
+-      ldo     1(arg0),arg0
+-1:    ldb     0(arg0),t1
+-      cmpb,<>,n r0,t1,1b
+-      ldo     1(arg0),arg0
+-2:    ldb     0(arg1),t2
+-      stb     t2,0(arg0)
+-      ldo     1(arg0),arg0
+-      ldb     0(arg1),t0
+-      cmpb,<> r0,t0,2b
+-      ldo     1(arg1),arg1
+-      bv,n    r0(rp)
+-ENDPROC_CFI(strcat)
+-
+-
+-ENTRY_CFI(memset, frame=0,no_calls)
+-      copy    arg0,ret0
+-      cmpb,COND(=) r0,arg0,4f
+-      copy    arg0,t2
+-      cmpb,COND(=) r0,arg2,4f
+-      ldo     -1(arg2),arg3
+-      subi    -1,arg3,t0
+-      subi    0,t0,t1
+-      cmpiclr,COND(>=) 0,t1,arg2
+-      ldo     -1(t1),arg2
+-      extru arg2,31,2,arg0
+-2:    stb     arg1,0(t2)
+-      ldo     1(t2),t2
+-      addib,>= -1,arg0,2b
+-      ldo     -1(arg3),arg3
+-      cmpiclr,COND(<=) 4,arg2,r0
+-      b,l,n   4f,r0
+-#ifdef CONFIG_64BIT
+-      depd,*  r0,63,2,arg2
+-#else
+-      depw    r0,31,2,arg2
+-#endif
+-      ldo     1(t2),t2
+-3:    stb     arg1,-1(t2)
+-      stb     arg1,0(t2)
+-      stb     arg1,1(t2)
+-      stb     arg1,2(t2)
+-      addib,COND(>) -4,arg2,3b
+-      ldo     4(t2),t2
+-4:    bv,n    r0(rp)
+-ENDPROC_CFI(memset)
+-
+-      .end
+diff --git a/arch/x86/events/intel/uncore_snbep.c 
b/arch/x86/events/intel/uncore_snbep.c
+index 40751af62dd3d..9096a1693942d 100644
+--- a/arch/x86/events/intel/uncore_snbep.c
++++ b/arch/x86/events/intel/uncore_snbep.c
+@@ -4382,7 +4382,7 @@ static void snr_uncore_mmio_init_box(struct 
intel_uncore_box *box)
+               return;
+ 
+       pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
+-      addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
++      addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
+ 
+       pci_read_config_dword(pdev, SNR_IMC_MMIO_MEM0_OFFSET, &pci_dword);
+       addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 260c64c205b8c..d3877dd713aef 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -4666,7 +4666,15 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu 
*vcpu,
+ void
+ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
+ {
+-      bool uses_nx = context->nx ||
++      /*
++       * KVM uses NX when TDP is disabled to handle a variety of scenarios,
++       * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
++       * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
++       * The iTLB multi-hit workaround can be toggled at any time, so assume
++       * NX can be used by any non-nested shadow MMU to avoid having to reset
++       * MMU contexts.  Note, KVM forces EFER.NX=1 when TDP is disabled.
++       */
++      bool uses_nx = context->nx || !tdp_enabled ||
+               context->mmu_role.base.smep_andnot_wp;
+       struct rsvd_bits_validate *shadow_zero_check;
+       int i;
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index 40ea1a425c431..ac97a1e2e5ddc 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -4063,22 +4063,21 @@ static int floppy_open(struct block_device *bdev, 
fmode_t mode)
+       if (UFDCS->rawcmd == 1)
+               UFDCS->rawcmd = 2;
+ 
+-      if (mode & (FMODE_READ|FMODE_WRITE)) {
+-              UDRS->last_checked = 0;
+-              clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+-              check_disk_change(bdev);
+-              if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
+-                      goto out;
+-              if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
++      if (!(mode & FMODE_NDELAY)) {
++              if (mode & (FMODE_READ|FMODE_WRITE)) {
++                      UDRS->last_checked = 0;
++                      clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
++                      check_disk_change(bdev);
++                      if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
++                              goto out;
++                      if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
++                              goto out;
++              }
++              res = -EROFS;
++              if ((mode & FMODE_WRITE) &&
++                  !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
+                       goto out;
+       }
+-
+-      res = -EROFS;
+-
+-      if ((mode & FMODE_WRITE) &&
+-                      !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
+-              goto out;
+-
+       mutex_unlock(&open_lock);
+       mutex_unlock(&floppy_mutex);
+       return 0;
+diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
+index 2cf053fb8d54b..1c691bdb89141 100644
+--- a/drivers/gpu/drm/drm_ioc32.c
++++ b/drivers/gpu/drm/drm_ioc32.c
+@@ -863,8 +863,6 @@ static int compat_drm_wait_vblank(struct file *file, 
unsigned int cmd,
+       req.request.sequence = req32.request.sequence;
+       req.request.signal = req32.request.signal;
+       err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
+-      if (err)
+-              return err;
+ 
+       req32.reply.type = req.reply.type;
+       req32.reply.sequence = req.reply.sequence;
+@@ -873,7 +871,7 @@ static int compat_drm_wait_vblank(struct file *file, 
unsigned int cmd,
+       if (copy_to_user(argp, &req32, sizeof(req32)))
+               return -EFAULT;
+ 
+-      return 0;
++      return err;
+ }
+ 
+ #if defined(CONFIG_X86)
+diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c 
b/drivers/gpu/drm/i915/gt/intel_timeline.c
+index 9cb01d9828f1d..c970e3deb0085 100644
+--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
++++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
+@@ -289,6 +289,14 @@ void intel_timeline_fini(struct intel_timeline *timeline)
+               i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
+ 
+       i915_vma_put(timeline->hwsp_ggtt);
++
++      /*
++       * A small race exists between intel_gt_retire_requests_timeout and
++       * intel_timeline_exit which could result in the syncmap not getting
++       * free'd. Rather than work to hard to seal this race, simply cleanup
++       * the syncmap on fini.
++       */
++      i915_syncmap_free(&timeline->sync);
+ }
+ 
+ struct intel_timeline *
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c 
b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+index 818d21bd28d31..1d2837c5a8f29 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+@@ -419,7 +419,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
+       return ret;
+ }
+ 
+-static void
++void
+ nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
+ {
+       struct nvkm_dp *dp = nvkm_dp(outp);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h 
b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
+index 428b3f488f033..e484d0c3b0d42 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
+@@ -32,6 +32,7 @@ struct nvkm_dp {
+ 
+ int nvkm_dp_new(struct nvkm_disp *, int index, struct dcb_output *,
+               struct nvkm_outp **);
++void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *);
+ 
+ /* DPCD Receiver Capabilities */
+ #define DPCD_RC00_DPCD_REV                                              
0x00000
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c 
b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+index c62030c96fba0..4b1c72fd8f039 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+@@ -22,6 +22,7 @@
+  * Authors: Ben Skeggs
+  */
+ #include "outp.h"
++#include "dp.h"
+ #include "ior.h"
+ 
+ #include <subdev/bios.h>
+@@ -216,6 +217,14 @@ nvkm_outp_init_route(struct nvkm_outp *outp)
+       if (!ior->arm.head || ior->arm.proto != proto) {
+               OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head,
+                        ior->arm.proto, proto);
++
++              /* The EFI GOP driver on Ampere can leave unused DP links 
routed,
++               * which we don't expect.  The DisableLT IED script *should* get
++               * us back to where we need to be.
++               */
++              if (ior->func->route.get && !ior->arm.head && outp->info.type 
== DCB_OUTPUT_DP)
++                      nvkm_dp_disable(outp, ior);
++
+               return;
+       }
+ 
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c 
b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index 58c021648b7c8..a96f9142fe08e 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -1404,6 +1404,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
+       if (nq)
+               nq->budget++;
+       atomic_inc(&rdev->srq_count);
++      spin_lock_init(&srq->lock);
+ 
+       return 0;
+ 
+diff --git a/drivers/infiniband/hw/efa/efa_main.c 
b/drivers/infiniband/hw/efa/efa_main.c
+index 83858f7e83d0f..75dfe9d1564c1 100644
+--- a/drivers/infiniband/hw/efa/efa_main.c
++++ b/drivers/infiniband/hw/efa/efa_main.c
+@@ -340,6 +340,7 @@ static int efa_enable_msix(struct efa_dev *dev)
+       }
+ 
+       if (irq_num != msix_vecs) {
++              efa_disable_msix(dev);
+               dev_err(&dev->pdev->dev,
+                       "Allocated %d MSI-X (out of %d requested)\n",
+                       irq_num, msix_vecs);
+diff --git a/drivers/infiniband/hw/hfi1/sdma.c 
b/drivers/infiniband/hw/hfi1/sdma.c
+index c61b6022575e1..248be21acdbed 100644
+--- a/drivers/infiniband/hw/hfi1/sdma.c
++++ b/drivers/infiniband/hw/hfi1/sdma.c
+@@ -3056,6 +3056,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
+ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq 
*tx)
+ {
+       int i;
++      struct sdma_desc *descp;
+ 
+       /* Handle last descriptor */
+       if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
+@@ -3076,12 +3077,10 @@ static int _extend_sdma_tx_descs(struct hfi1_devdata 
*dd, struct sdma_txreq *tx)
+       if (unlikely(tx->num_desc == MAX_DESC))
+               goto enomem;
+ 
+-      tx->descp = kmalloc_array(
+-                      MAX_DESC,
+-                      sizeof(struct sdma_desc),
+-                      GFP_ATOMIC);
+-      if (!tx->descp)
++      descp = kmalloc_array(MAX_DESC, sizeof(struct sdma_desc), GFP_ATOMIC);
++      if (!descp)
+               goto enomem;
++      tx->descp = descp;
+ 
+       /* reserve last descriptor for coalescing */
+       tx->desc_limit = MAX_DESC - 1;
+diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
+index 8bed81cf03adc..8ab963055238a 100644
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -1589,6 +1589,23 @@ out:
+       __sdhci_msm_set_clock(host, clock);
+ }
+ 
++static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command 
*cmd)
++{
++      u32 count, start = 15;
++
++      __sdhci_set_timeout(host, cmd);
++      count = sdhci_readb(host, SDHCI_TIMEOUT_CONTROL);
++      /*
++       * Update software timeout value if its value is less than hardware data
++       * timeout value. Qcom SoC hardware data timeout value was calculated
++       * using 4 * MCLK * 2^(count + 13). where MCLK = 1 / host->clock.
++       */
++      if (cmd && cmd->data && host->clock > 400000 &&
++          host->clock <= 50000000 &&
++          ((1 << (count + start)) > (10 * host->clock)))
++              host->data_timeout = 22LL * NSEC_PER_SEC;
++}
++
+ /*
+  * Platform specific register write functions. This is so that, if any
+  * register write needs to be followed up by platform specific actions,
+@@ -1753,6 +1770,7 @@ static const struct sdhci_ops sdhci_msm_ops = {
+       .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
+       .write_w = sdhci_msm_writew,
+       .write_b = sdhci_msm_writeb,
++      .set_timeout = sdhci_msm_set_timeout,
+ };
+ 
+ static const struct sdhci_pltfm_data sdhci_msm_pdata = {
+diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
+index 485e20e0dec2c..8847942a8d97e 100644
+--- a/drivers/net/can/usb/esd_usb2.c
++++ b/drivers/net/can/usb/esd_usb2.c
+@@ -224,8 +224,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv 
*priv,
+       if (id == ESD_EV_CAN_ERROR_EXT) {
+               u8 state = msg->msg.rx.data[0];
+               u8 ecc = msg->msg.rx.data[1];
+-              u8 txerr = msg->msg.rx.data[2];
+-              u8 rxerr = msg->msg.rx.data[3];
++              u8 rxerr = msg->msg.rx.data[2];
++              u8 txerr = msg->msg.rx.data[3];
+ 
+               skb = alloc_can_err_skb(priv->netdev, &cf);
+               if (skb == NULL) {
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index e1a3c33fdad90..2d8382eb9add3 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -842,11 +842,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
+               /* Remove this port from the port matrix of the other ports
+                * in the same bridge. If the port is disabled, port matrix
+                * is kept and not being setup until the port becomes enabled.
+-               * And the other port's port matrix cannot be broken when the
+-               * other port is still a VLAN-aware port.
+                */
+-              if (dsa_is_user_port(ds, i) && i != port &&
+-                 !dsa_port_is_vlan_filtering(&ds->ports[i])) {
++              if (dsa_is_user_port(ds, i) && i != port) {
+                       if (dsa_to_port(ds, i)->bridge_dev != bridge)
+                               continue;
+                       if (priv->ports[i].enable)
+diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c 
b/drivers/net/ethernet/apm/xgene-v2/main.c
+index 02b4f3af02b54..848be6bf2fd1f 100644
+--- a/drivers/net/ethernet/apm/xgene-v2/main.c
++++ b/drivers/net/ethernet/apm/xgene-v2/main.c
+@@ -677,11 +677,13 @@ static int xge_probe(struct platform_device *pdev)
+       ret = register_netdev(ndev);
+       if (ret) {
+               netdev_err(ndev, "Failed to register netdev\n");
+-              goto err;
++              goto err_mdio_remove;
+       }
+ 
+       return 0;
+ 
++err_mdio_remove:
++      xge_mdio_remove(ndev);
+ err:
+       free_netdev(ndev);
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h 
b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+index e34e0854635c3..d64cded30eeb4 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+@@ -257,6 +257,9 @@ enum hclge_opcode_type {
+       /* Led command */
+       HCLGE_OPC_LED_STATUS_CFG        = 0xB000,
+ 
++      /* clear hardware resource command */
++      HCLGE_OPC_CLEAR_HW_RESOURCE     = 0x700B,
++
+       /* NCL config command */
+       HCLGE_OPC_QUERY_NCL_CONFIG      = 0x7011,
+       /* M7 stats command */
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c 
b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+index a1790af73096d..d16488bab86f5 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+@@ -281,21 +281,12 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, 
struct ieee_pfc *pfc)
+       u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
+       struct hclge_vport *vport = hclge_get_vport(h);
+       struct hclge_dev *hdev = vport->back;
+-      u8 i, j, pfc_map, *prio_tc;
+       int ret;
++      u8 i;
+ 
+       memset(pfc, 0, sizeof(*pfc));
+       pfc->pfc_cap = hdev->pfc_max;
+-      prio_tc = hdev->tm_info.prio_tc;
+-      pfc_map = hdev->tm_info.hw_pfc_map;
+-
+-      /* Pfc setting is based on TC */
+-      for (i = 0; i < hdev->tm_info.num_tc; i++) {
+-              for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
+-                      if ((prio_tc[j] == i) && (pfc_map & BIT(i)))
+-                              pfc->pfc_en |= BIT(j);
+-              }
+-      }
++      pfc->pfc_en = hdev->tm_info.pfc_en;
+ 
+       ret = hclge_pfc_tx_stats_get(hdev, requests);
+       if (ret)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 
b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 93f3865b679bf..aa402e2671212 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -8006,7 +8006,11 @@ static int hclge_init_vlan_config(struct hclge_dev 
*hdev)
+ static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+                                      bool writen_to_tbl)
+ {
+-      struct hclge_vport_vlan_cfg *vlan;
++      struct hclge_vport_vlan_cfg *vlan, *tmp;
++
++      list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
++              if (vlan->vlan_id == vlan_id)
++                      return;
+ 
+       vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+       if (!vlan)
+@@ -9165,6 +9169,28 @@ static void hclge_clear_resetting_state(struct 
hclge_dev *hdev)
+       }
+ }
+ 
++static int hclge_clear_hw_resource(struct hclge_dev *hdev)
++{
++      struct hclge_desc desc;
++      int ret;
++
++      hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
++
++      ret = hclge_cmd_send(&hdev->hw, &desc, 1);
++      /* This new command is only supported by new firmware, it will
++       * fail with older firmware. Error value -EOPNOSUPP can only be
++       * returned by older firmware running this command, to keep code
++       * backward compatible we will override this value and return
++       * success.
++       */
++      if (ret && ret != -EOPNOTSUPP) {
++              dev_err(&hdev->pdev->dev,
++                      "failed to clear hw resource, ret = %d\n", ret);
++              return ret;
++      }
++      return 0;
++}
++
+ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+ {
+       struct pci_dev *pdev = ae_dev->pdev;
+@@ -9206,6 +9232,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev 
*ae_dev)
+       if (ret)
+               goto err_cmd_uninit;
+ 
++      ret  = hclge_clear_hw_resource(hdev);
++      if (ret)
++              goto err_cmd_uninit;
++
+       ret = hclge_get_cap(hdev);
+       if (ret) {
+               dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c 
b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+index a1fab77b2096a..58ff747a42ae6 100644
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+@@ -995,6 +995,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, 
bool link)
+ {
+       u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
+           link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
++      u16 max_ltr_enc_d = 0;  /* maximum LTR decoded by platform */
++      u16 lat_enc_d = 0;      /* latency decoded */
+       u16 lat_enc = 0;        /* latency encoded */
+ 
+       if (link) {
+@@ -1048,7 +1050,17 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw 
*hw, bool link)
+                                    E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
+               max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
+ 
+-              if (lat_enc > max_ltr_enc)
++              lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) *
++                           (1U << (E1000_LTRV_SCALE_FACTOR *
++                           ((lat_enc & E1000_LTRV_SCALE_MASK)
++                           >> E1000_LTRV_SCALE_SHIFT)));
++
++              max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) *
++                               (1U << (E1000_LTRV_SCALE_FACTOR *
++                               ((max_ltr_enc & E1000_LTRV_SCALE_MASK)
++                               >> E1000_LTRV_SCALE_SHIFT)));
++
++              if (lat_enc_d > max_ltr_enc_d)
+                       lat_enc = max_ltr_enc;
+       }
+ 
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h 
b/drivers/net/ethernet/intel/e1000e/ich8lan.h
+index 1502895eb45dd..e757896287eba 100644
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
+@@ -274,8 +274,11 @@
+ 
+ /* Latency Tolerance Reporting */
+ #define E1000_LTRV                    0x000F8
++#define E1000_LTRV_VALUE_MASK         0x000003FF
+ #define E1000_LTRV_SCALE_MAX          5
+ #define E1000_LTRV_SCALE_FACTOR               5
++#define E1000_LTRV_SCALE_SHIFT                10
++#define E1000_LTRV_SCALE_MASK         0x00001C00
+ #define E1000_LTRV_REQ_SHIFT          15
+ #define E1000_LTRV_NOSNOOP_SHIFT      16
+ #define E1000_LTRV_SEND                       (1 << 30)
+diff --git a/drivers/net/ethernet/marvell/mvneta.c 
b/drivers/net/ethernet/marvell/mvneta.c
+index 7b0543056b101..64aa5510e61a6 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -101,7 +101,7 @@
+ #define      MVNETA_DESC_SWAP                    BIT(6)
+ #define      MVNETA_TX_BRST_SZ_MASK(burst)       ((burst) << 22)
+ #define MVNETA_PORT_STATUS                       0x2444
+-#define      MVNETA_TX_IN_PRGRS                  BIT(1)
++#define      MVNETA_TX_IN_PRGRS                  BIT(0)
+ #define      MVNETA_TX_FIFO_EMPTY                BIT(8)
+ #define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
+ /* Only exists on Armada XP and Armada 370 */
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c 
b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+index 19a1a58d60f89..c449ecc0add23 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+@@ -353,6 +353,9 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, 
void *p_cookie)
+       unsigned long flags;
+       int rc = -EINVAL;
+ 
++      if (!p_ll2_conn)
++              return rc;
++
+       spin_lock_irqsave(&p_tx->lock, flags);
+       if (p_tx->b_completing_packet) {
+               rc = -EBUSY;
+@@ -526,7 +529,16 @@ static int qed_ll2_rxq_completion(struct qed_hwfn 
*p_hwfn, void *cookie)
+       unsigned long flags = 0;
+       int rc = 0;
+ 
++      if (!p_ll2_conn)
++              return rc;
++
+       spin_lock_irqsave(&p_rx->lock, flags);
++
++      if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) {
++              spin_unlock_irqrestore(&p_rx->lock, flags);
++              return 0;
++      }
++
+       cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
+       cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+ 
+@@ -847,6 +859,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn 
*p_hwfn, void *p_cookie)
+       struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
+       int rc;
+ 
++      if (!p_ll2_conn)
++              return 0;
++
+       if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
+               return 0;
+ 
+@@ -870,6 +885,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn 
*p_hwfn, void *p_cookie)
+       u16 new_idx = 0, num_bds = 0;
+       int rc;
+ 
++      if (!p_ll2_conn)
++              return 0;
++
+       if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
+               return 0;
+ 
+@@ -1642,6 +1660,8 @@ int qed_ll2_post_rx_buffer(void *cxt,
+       if (!p_ll2_conn)
+               return -EINVAL;
+       p_rx = &p_ll2_conn->rx_queue;
++      if (!p_rx->set_prod_addr)
++              return -EIO;
+ 
+       spin_lock_irqsave(&p_rx->lock, flags);
+       if (!list_empty(&p_rx->free_descq))
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c 
b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+index 38b1f402f7ed2..b291971bcf926 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+@@ -1245,8 +1245,7 @@ qed_rdma_create_qp(void *rdma_cxt,
+ 
+       if (!rdma_cxt || !in_params || !out_params ||
+           !p_hwfn->p_rdma_info->active) {
+-              DP_ERR(p_hwfn->cdev,
+-                     "qed roce create qp failed due to NULL entry 
(rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
++              pr_err("qed roce create qp failed due to NULL entry 
(rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
+                      rdma_cxt, in_params, out_params);
+               return NULL;
+       }
+diff --git a/drivers/opp/of.c b/drivers/opp/of.c
+index 249738e1e0b7a..603c688fe23dc 100644
+--- a/drivers/opp/of.c
++++ b/drivers/opp/of.c
+@@ -682,8 +682,9 @@ static int _of_add_opp_table_v2(struct device *dev, struct 
opp_table *opp_table)
+               }
+       }
+ 
+-      /* There should be one of more OPP defined */
+-      if (WARN_ON(!count)) {
++      /* There should be one or more OPPs defined */
++      if (!count) {
++              dev_err(dev, "%s: no supported OPPs", __func__);
+               ret = -ENOENT;
+               goto remove_static_opp;
+       }
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index 11592ec7b23ea..6aeb79e744e0b 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -788,12 +788,15 @@ store_state_field(struct device *dev, struct 
device_attribute *attr,
+       ret = scsi_device_set_state(sdev, state);
+       /*
+        * If the device state changes to SDEV_RUNNING, we need to
+-       * rescan the device to revalidate it, and run the queue to
+-       * avoid I/O hang.
++       * run the queue to avoid I/O hang, and rescan the device
++       * to revalidate it. Running the queue first is necessary
++       * because another thread may be waiting inside
++       * blk_mq_freeze_queue_wait() and because that call may be
++       * waiting for pending I/O to finish.
+        */
+       if (ret == 0 && state == SDEV_RUNNING) {
+-              scsi_rescan_device(dev);
+               blk_mq_run_hw_queues(sdev->request_queue, true);
++              scsi_rescan_device(dev);
+       }
+       mutex_unlock(&sdev->state_mutex);
+ 
+diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
+index b5d2ad900ec09..167c72726c5a0 100644
+--- a/drivers/tty/vt/vt_ioctl.c
++++ b/drivers/tty/vt/vt_ioctl.c
+@@ -484,16 +484,19 @@ int vt_ioctl(struct tty_struct *tty,
+                       ret = -EINVAL;
+                       goto out;
+               }
+-              /* FIXME: this needs the console lock extending */
+-              if (vc->vc_mode == (unsigned char) arg)
++              console_lock();
++              if (vc->vc_mode == (unsigned char) arg) {
++                      console_unlock();
+                       break;
++              }
+               vc->vc_mode = (unsigned char) arg;
+-              if (console != fg_console)
++              if (console != fg_console) {
++                      console_unlock();
+                       break;
++              }
+               /*
+                * explicitly blank/unblank the screen if switching modes
+                */
+-              console_lock();
+               if (arg == KD_TEXT)
+                       do_unblank_screen(1);
+               else
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 8a3752fcf7b46..39a9ad12cbbc8 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -894,19 +894,19 @@ static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep 
*dep, u8 index)
+ 
+ static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
+ {
+-      struct dwc3_trb         *tmp;
+       u8                      trbs_left;
+ 
+       /*
+-       * If enqueue & dequeue are equal than it is either full or empty.
+-       *
+-       * One way to know for sure is if the TRB right before us has HWO bit
+-       * set or not. If it has, then we're definitely full and can't fit any
+-       * more transfers in our ring.
++       * If the enqueue & dequeue are equal then the TRB ring is either full
++       * or empty. It's considered full when there are DWC3_TRB_NUM-1 of TRBs
++       * pending to be processed by the driver.
+        */
+       if (dep->trb_enqueue == dep->trb_dequeue) {
+-              tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
+-              if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
++              /*
++               * If there is any request remained in the started_list at
++               * this point, that means there is no TRB available.
++               */
++              if (!list_empty(&dep->started_list))
+                       return 0;
+ 
+               return DWC3_TRB_NUM - 1;
+@@ -2012,10 +2012,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int 
is_on)
+ 
+               ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
+                               msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
+-              if (ret == 0) {
+-                      dev_err(dwc->dev, "timed out waiting for SETUP 
phase\n");
+-                      return -ETIMEDOUT;
+-              }
++              if (ret == 0)
++                      dev_warn(dwc->dev, "timed out waiting for SETUP 
phase\n");
+       }
+ 
+       /*
+@@ -2217,6 +2215,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
+       /* begin to receive SETUP packets */
+       dwc->ep0state = EP0_SETUP_PHASE;
+       dwc->link_state = DWC3_LINK_STATE_SS_DIS;
++      dwc->delayed_status = false;
+       dwc3_ep0_out_start(dwc);
+ 
+       dwc3_gadget_enable_irq(dwc);
+diff --git a/drivers/usb/gadget/function/u_audio.c 
b/drivers/usb/gadget/function/u_audio.c
+index 223029fa84459..4e01ba0ab8ecb 100644
+--- a/drivers/usb/gadget/function/u_audio.c
++++ b/drivers/usb/gadget/function/u_audio.c
+@@ -349,8 +349,6 @@ static inline void free_ep(struct uac_rtd_params *prm, 
struct usb_ep *ep)
+       if (!prm->ep_enabled)
+               return;
+ 
+-      prm->ep_enabled = false;
+-
+       audio_dev = uac->audio_dev;
+       params = &audio_dev->params;
+ 
+@@ -368,11 +366,12 @@ static inline void free_ep(struct uac_rtd_params *prm, 
struct usb_ep *ep)
+               }
+       }
+ 
++      prm->ep_enabled = false;
++
+       if (usb_ep_disable(ep))
+               dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__);
+ }
+ 
+-
+ int u_audio_start_capture(struct g_audio *audio_dev)
+ {
+       struct snd_uac_chip *uac = audio_dev->uac;
+diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
+index e9c39a41faae9..a82ba9cc0c724 100644
+--- a/drivers/usb/serial/ch341.c
++++ b/drivers/usb/serial/ch341.c
+@@ -678,7 +678,6 @@ static struct usb_serial_driver ch341_device = {
+               .owner  = THIS_MODULE,
+               .name   = "ch341-uart",
+       },
+-      .bulk_in_size      = 512,
+       .id_table          = id_table,
+       .num_ports         = 1,
+       .open              = ch341_open,
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 793530f241ceb..d42ca13569965 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -2074,6 +2074,8 @@ static const struct usb_device_id option_ids[] = {
+         .driver_info = RSVD(4) | RSVD(5) },
+       { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff),                     
/* Fibocom NL678 series */
+         .driver_info = RSVD(6) },
++      { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) },    
/* Fibocom FG150 Diag */
++      { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) },          
/* Fibocom FG150 AT */
+       { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) },                   
/* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+       { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) },                   
/* LongSung M5710 */
+       { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) },                   
/* GosunCn GM500 RNDIS */
+diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
+index 026a37ee41777..4653de001e261 100644
+--- a/drivers/vhost/vringh.c
++++ b/drivers/vhost/vringh.c
+@@ -331,7 +331,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
+                       iov = wiov;
+               else {
+                       iov = riov;
+-                      if (unlikely(wiov && wiov->i)) {
++                      if (unlikely(wiov && wiov->used)) {
+                               vringh_bad("Readable desc %p after writable",
+                                          &descs[i]);
+                               err = -EINVAL;
+diff --git a/drivers/virtio/virtio_pci_common.c 
b/drivers/virtio/virtio_pci_common.c
+index 222d630c41fc9..b35bb2d57f62c 100644
+--- a/drivers/virtio/virtio_pci_common.c
++++ b/drivers/virtio/virtio_pci_common.c
+@@ -576,6 +576,13 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
+       struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+       struct device *dev = get_device(&vp_dev->vdev.dev);
+ 
++      /*
++       * Device is marked broken on surprise removal so that virtio upper
++       * layers can abort any ongoing operation.
++       */
++      if (!pci_device_is_present(pci_dev))
++              virtio_break_device(&vp_dev->vdev);
++
+       pci_disable_sriov(pci_dev);
+ 
+       unregister_virtio_device(&vp_dev->vdev);
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index f6011c9ed32f1..e442d400dbb2e 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -2268,7 +2268,7 @@ bool virtqueue_is_broken(struct virtqueue *_vq)
+ {
+       struct vring_virtqueue *vq = to_vvq(_vq);
+ 
+-      return vq->broken;
++      return READ_ONCE(vq->broken);
+ }
+ EXPORT_SYMBOL_GPL(virtqueue_is_broken);
+ 
+@@ -2283,7 +2283,9 @@ void virtio_break_device(struct virtio_device *dev)
+       spin_lock(&dev->vqs_list_lock);
+       list_for_each_entry(_vq, &dev->vqs, list) {
+               struct vring_virtqueue *vq = to_vvq(_vq);
+-              vq->broken = true;
++
++              /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
++              WRITE_ONCE(vq->broken, true);
+       }
+       spin_unlock(&dev->vqs_list_lock);
+ }
+diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
+index f853835c409c1..f3ff57b931580 100644
+--- a/fs/btrfs/btrfs_inode.h
++++ b/fs/btrfs/btrfs_inode.h
+@@ -268,6 +268,21 @@ static inline void btrfs_mod_outstanding_extents(struct 
btrfs_inode *inode,
+                                                 mod);
+ }
+ 
++/*
++ * Called every time after doing a buffered, direct IO or memory mapped write.
++ *
++ * This is to ensure that if we write to a file that was previously fsynced in
++ * the current transaction, then try to fsync it again in the same 
transaction,
++ * we will know that there were changes in the file and that it needs to be
++ * logged.
++ */
++static inline void btrfs_set_inode_last_sub_trans(struct btrfs_inode *inode)
++{
++      spin_lock(&inode->lock);
++      inode->last_sub_trans = inode->root->log_transid;
++      spin_unlock(&inode->lock);
++}
++
+ static inline int btrfs_inode_in_log(struct btrfs_inode *inode, u64 
generation)
+ {
+       int ret = 0;
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 400b0717b9d44..1279359ed172a 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2004,14 +2004,8 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
+ 
+       inode_unlock(inode);
+ 
+-      /*
+-       * We also have to set last_sub_trans to the current log transid,
+-       * otherwise subsequent syncs to a file that's been synced in this
+-       * transaction will appear to have already occurred.
+-       */
+-      spin_lock(&BTRFS_I(inode)->lock);
+-      BTRFS_I(inode)->last_sub_trans = root->log_transid;
+-      spin_unlock(&BTRFS_I(inode)->lock);
++      btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
++
+       if (num_written > 0)
+               num_written = generic_write_sync(iocb, num_written);
+ 
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 54b607a3cc3f2..29552d4f6845b 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -9250,9 +9250,7 @@ again:
+       set_page_dirty(page);
+       SetPageUptodate(page);
+ 
+-      BTRFS_I(inode)->last_trans = fs_info->generation;
+-      BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
+-      BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
++      btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
+ 
+       unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
+ 
+diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
+index d8a7d460e436a..cbede328bda5b 100644
+--- a/fs/btrfs/transaction.h
++++ b/fs/btrfs/transaction.h
+@@ -160,7 +160,7 @@ static inline void btrfs_set_inode_last_trans(struct 
btrfs_trans_handle *trans,
+       spin_lock(&BTRFS_I(inode)->lock);
+       BTRFS_I(inode)->last_trans = trans->transaction->transid;
+       BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
+-      BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
++      BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans - 1;
+       spin_unlock(&BTRFS_I(inode)->lock);
+ }
+ 
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 3e3529c600cb7..e882c790292f9 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -2168,7 +2168,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const 
char *device_path,
+ 
+       if (IS_ERR(device)) {
+               if (PTR_ERR(device) == -ENOENT &&
+-                  strcmp(device_path, "missing") == 0)
++                  device_path && strcmp(device_path, "missing") == 0)
+                       ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
+               else
+                       ret = PTR_ERR(device);
+diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
+index 11dd8177770df..19574ef174709 100644
+--- a/fs/overlayfs/export.c
++++ b/fs/overlayfs/export.c
+@@ -395,6 +395,7 @@ static struct dentry *ovl_lookup_real_one(struct dentry 
*connected,
+        */
+       take_dentry_name_snapshot(&name, real);
+       this = lookup_one_len(name.name.name, connected, name.name.len);
++      release_dentry_name_snapshot(&name);
+       err = PTR_ERR(this);
+       if (IS_ERR(this)) {
+               goto fail;
+@@ -409,7 +410,6 @@ static struct dentry *ovl_lookup_real_one(struct dentry 
*connected,
+       }
+ 
+ out:
+-      release_dentry_name_snapshot(&name);
+       dput(parent);
+       inode_unlock(dir);
+       return this;
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 90d2f62a96723..5a187e9b72212 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -549,8 +549,17 @@ static int proc_oom_score(struct seq_file *m, struct 
pid_namespace *ns,
+ {
+       unsigned long totalpages = totalram_pages() + total_swap_pages;
+       unsigned long points = 0;
++      long badness;
++
++      badness = oom_badness(task, totalpages);
++      /*
++       * Special case OOM_SCORE_ADJ_MIN for all others scale the
++       * badness value into [0, 2000] range which we have been
++       * exporting for a long time so userspace might depend on it.
++       */
++      if (badness != LONG_MIN)
++              points = (1000 + badness * 1000 / (long)totalpages) * 2 / 3;
+ 
+-      points = oom_badness(task, totalpages) * 1000 / totalpages;
+       seq_printf(m, "%lu\n", points);
+ 
+       return 0;
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 11a52f2fa35de..ddc66ab8a1def 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -3684,6 +3684,10 @@ int netdev_rx_handler_register(struct net_device *dev,
+ void netdev_rx_handler_unregister(struct net_device *dev);
+ 
+ bool dev_valid_name(const char *name);
++static inline bool is_socket_ioctl_cmd(unsigned int cmd)
++{
++      return _IOC_TYPE(cmd) == SOCK_IOC_TYPE;
++}
+ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
+               bool *need_copyout);
+ int dev_ifconf(struct net *net, struct ifconf *, int);
+diff --git a/include/linux/once.h b/include/linux/once.h
+index 9225ee6d96c75..ae6f4eb41cbe7 100644
+--- a/include/linux/once.h
++++ b/include/linux/once.h
+@@ -7,7 +7,7 @@
+ 
+ bool __do_once_start(bool *done, unsigned long *flags);
+ void __do_once_done(bool *done, struct static_key_true *once_key,
+-                  unsigned long *flags);
++                  unsigned long *flags, struct module *mod);
+ 
+ /* Call a function exactly once. The idea of DO_ONCE() is to perform
+  * a function call such as initialization of random seeds, etc, only
+@@ -46,7 +46,7 @@ void __do_once_done(bool *done, struct static_key_true 
*once_key,
+                       if (unlikely(___ret)) {                              \
+                               func(__VA_ARGS__);                           \
+                               __do_once_done(&___done, &___once_key,       \
+-                                             &___flags);                   \
++                                             &___flags, THIS_MODULE);      \
+                       }                                                    \
+               }                                                            \
+               ___ret;                                                      \
+diff --git a/include/linux/oom.h b/include/linux/oom.h
+index b9df34326772c..2db9a14325112 100644
+--- a/include/linux/oom.h
++++ b/include/linux/oom.h
+@@ -48,7 +48,7 @@ struct oom_control {
+       /* Used by oom implementation, do not set */
+       unsigned long totalpages;
+       struct task_struct *chosen;
+-      unsigned long chosen_points;
++      long chosen_points;
+ 
+       /* Used to print the constraint info. */
+       enum oom_constraint constraint;
+@@ -108,7 +108,7 @@ static inline vm_fault_t check_stable_address_space(struct 
mm_struct *mm)
+ 
+ bool __oom_reap_task_mm(struct mm_struct *mm);
+ 
+-extern unsigned long oom_badness(struct task_struct *p,
++long oom_badness(struct task_struct *p,
+               unsigned long totalpages);
+ 
+ extern bool out_of_memory(struct oom_control *oc);
+diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
+index e49c912f862d0..9dec6314cd28a 100644
+--- a/kernel/audit_tree.c
++++ b/kernel/audit_tree.c
+@@ -595,7 +595,6 @@ static void prune_tree_chunks(struct audit_tree *victim, 
bool tagged)
+               spin_lock(&hash_lock);
+       }
+       spin_unlock(&hash_lock);
+-      put_tree(victim);
+ }
+ 
+ /*
+@@ -604,6 +603,7 @@ static void prune_tree_chunks(struct audit_tree *victim, 
bool tagged)
+ static void prune_one(struct audit_tree *victim)
+ {
+       prune_tree_chunks(victim, false);
++      put_tree(victim);
+ }
+ 
+ /* trim the uncommitted chunks from tree */
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 0b5a446ee59c9..4deaf15b7618b 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2778,6 +2778,41 @@ static void coerce_reg_to_size(struct bpf_reg_state 
*reg, int size)
+       reg->smax_value = reg->umax_value;
+ }
+ 
++static bool bpf_map_is_rdonly(const struct bpf_map *map)
++{
++      return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen;
++}
++
++static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 
*val)
++{
++      void *ptr;
++      u64 addr;
++      int err;
++
++      err = map->ops->map_direct_value_addr(map, &addr, off);
++      if (err)
++              return err;
++      ptr = (void *)(long)addr + off;
++
++      switch (size) {
++      case sizeof(u8):
++              *val = (u64)*(u8 *)ptr;
++              break;
++      case sizeof(u16):
++              *val = (u64)*(u16 *)ptr;
++              break;
++      case sizeof(u32):
++              *val = (u64)*(u32 *)ptr;
++              break;
++      case sizeof(u64):
++              *val = *(u64 *)ptr;
++              break;
++      default:
++              return -EINVAL;
++      }
++      return 0;
++}
++
+ /* check whether memory at (regno + off) is accessible for t = (read | write)
+  * if t==write, value_regno is a register which value is stored into memory
+  * if t==read, value_regno is a register which will receive the value from 
memory
+@@ -2815,9 +2850,27 @@ static int check_mem_access(struct bpf_verifier_env 
*env, int insn_idx, u32 regn
+               if (err)
+                       return err;
+               err = check_map_access(env, regno, off, size, false);
+-              if (!err && t == BPF_READ && value_regno >= 0)
+-                      mark_reg_unknown(env, regs, value_regno);
++              if (!err && t == BPF_READ && value_regno >= 0) {
++                      struct bpf_map *map = reg->map_ptr;
++
++                      /* if map is read-only, track its contents as scalars */
++                      if (tnum_is_const(reg->var_off) &&
++                          bpf_map_is_rdonly(map) &&
++                          map->ops->map_direct_value_addr) {
++                              int map_off = off + reg->var_off.value;
++                              u64 val = 0;
+ 
++                              err = bpf_map_direct_read(map, map_off, size,
++                                                        &val);
++                              if (err)
++                                      return err;
++
++                              regs[value_regno].type = SCALAR_VALUE;
++                              __mark_reg_known(&regs[value_regno], val);
++                      } else {
++                              mark_reg_unknown(env, regs, value_regno);
++                      }
++              }
+       } else if (reg->type == PTR_TO_CTX) {
+               enum bpf_reg_type reg_type = SCALAR_VALUE;
+ 
+diff --git a/lib/once.c b/lib/once.c
+index 8b7d6235217ee..59149bf3bfb4a 100644
+--- a/lib/once.c
++++ b/lib/once.c
+@@ -3,10 +3,12 @@
+ #include <linux/spinlock.h>
+ #include <linux/once.h>
+ #include <linux/random.h>
++#include <linux/module.h>
+ 
+ struct once_work {
+       struct work_struct work;
+       struct static_key_true *key;
++      struct module *module;
+ };
+ 
+ static void once_deferred(struct work_struct *w)
+@@ -16,10 +18,11 @@ static void once_deferred(struct work_struct *w)
+       work = container_of(w, struct once_work, work);
+       BUG_ON(!static_key_enabled(work->key));
+       static_branch_disable(work->key);
++      module_put(work->module);
+       kfree(work);
+ }
+ 
+-static void once_disable_jump(struct static_key_true *key)
++static void once_disable_jump(struct static_key_true *key, struct module *mod)
+ {
+       struct once_work *w;
+ 
+@@ -29,6 +32,8 @@ static void once_disable_jump(struct static_key_true *key)
+ 
+       INIT_WORK(&w->work, once_deferred);
+       w->key = key;
++      w->module = mod;
++      __module_get(mod);
+       schedule_work(&w->work);
+ }
+ 
+@@ -53,11 +58,11 @@ bool __do_once_start(bool *done, unsigned long *flags)
+ EXPORT_SYMBOL(__do_once_start);
+ 
+ void __do_once_done(bool *done, struct static_key_true *once_key,
+-                  unsigned long *flags)
++                  unsigned long *flags, struct module *mod)
+       __releases(once_lock)
+ {
+       *done = true;
+       spin_unlock_irqrestore(&once_lock, *flags);
+-      once_disable_jump(once_key);
++      once_disable_jump(once_key, mod);
+ }
+ EXPORT_SYMBOL(__do_once_done);
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index 212e718743018..f1b810ddf2327 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -197,17 +197,17 @@ static bool is_dump_unreclaim_slabs(void)
+  * predictable as possible.  The goal is to return the highest value for the
+  * task consuming the most memory to avoid subsequent oom failures.
+  */
+-unsigned long oom_badness(struct task_struct *p, unsigned long totalpages)
++long oom_badness(struct task_struct *p, unsigned long totalpages)
+ {
+       long points;
+       long adj;
+ 
+       if (oom_unkillable_task(p))
+-              return 0;
++              return LONG_MIN;
+ 
+       p = find_lock_task_mm(p);
+       if (!p)
+-              return 0;
++              return LONG_MIN;
+ 
+       /*
+        * Do not even consider tasks which are explicitly marked oom
+@@ -219,7 +219,7 @@ unsigned long oom_badness(struct task_struct *p, unsigned 
long totalpages)
+                       test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
+                       in_vfork(p)) {
+               task_unlock(p);
+-              return 0;
++              return LONG_MIN;
+       }
+ 
+       /*
+@@ -234,11 +234,7 @@ unsigned long oom_badness(struct task_struct *p, unsigned 
long totalpages)
+       adj *= totalpages / 1000;
+       points += adj;
+ 
+-      /*
+-       * Never return 0 for an eligible task regardless of the root bonus and
+-       * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
+-       */
+-      return points > 0 ? points : 1;
++      return points;
+ }
+ 
+ static const char * const oom_constraint_text[] = {
+@@ -311,7 +307,7 @@ static enum oom_constraint constrained_alloc(struct 
oom_control *oc)
+ static int oom_evaluate_task(struct task_struct *task, void *arg)
+ {
+       struct oom_control *oc = arg;
+-      unsigned long points;
++      long points;
+ 
+       if (oom_unkillable_task(task))
+               goto next;
+@@ -337,12 +333,12 @@ static int oom_evaluate_task(struct task_struct *task, 
void *arg)
+        * killed first if it triggers an oom, then select it.
+        */
+       if (oom_task_origin(task)) {
+-              points = ULONG_MAX;
++              points = LONG_MAX;
+               goto select;
+       }
+ 
+       points = oom_badness(task, oc->totalpages);
+-      if (!points || points < oc->chosen_points)
++      if (points == LONG_MIN || points < oc->chosen_points)
+               goto next;
+ 
+ select:
+@@ -366,6 +362,8 @@ abort:
+  */
+ static void select_bad_process(struct oom_control *oc)
+ {
++      oc->chosen_points = LONG_MIN;
++
+       if (is_memcg_oom(oc))
+               mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
+       else {
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 0bad5db23129a..6fbc9cb09dc0e 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2414,6 +2414,7 @@ static int do_setlink(const struct sk_buff *skb,
+               return err;
+ 
+       if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || 
tb[IFLA_TARGET_NETNSID]) {
++              const char *pat = ifname && ifname[0] ? ifname : NULL;
+               struct net *net = rtnl_link_get_net_capable(skb, dev_net(dev),
+                                                           tb, CAP_NET_ADMIN);
+               if (IS_ERR(net)) {
+@@ -2421,7 +2422,7 @@ static int do_setlink(const struct sk_buff *skb,
+                       goto errout;
+               }
+ 
+-              err = dev_change_net_namespace(dev, net, ifname);
++              err = dev_change_net_namespace(dev, net, pat);
+               put_net(net);
+               if (err)
+                       goto errout;
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index fedad3a3e61b8..fd8298b8b1c52 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -446,6 +446,8 @@ static void __gre_xmit(struct sk_buff *skb, struct 
net_device *dev,
+ 
+ static int gre_handle_offloads(struct sk_buff *skb, bool csum)
+ {
++      if (csum && skb_checksum_start(skb) < skb->data)
++              return -EINVAL;
+       return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : 
SKB_GSO_GRE);
+ }
+ 
+diff --git a/net/netfilter/nf_conntrack_core.c 
b/net/netfilter/nf_conntrack_core.c
+index 4a988ce4264cb..4bcc36e4b2ef0 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -66,22 +66,17 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash);
+ 
+ struct conntrack_gc_work {
+       struct delayed_work     dwork;
+-      u32                     last_bucket;
++      u32                     next_bucket;
+       bool                    exiting;
+       bool                    early_drop;
+-      long                    next_gc_run;
+ };
+ 
+ static __read_mostly struct kmem_cache *nf_conntrack_cachep;
+ static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
+ static __read_mostly bool nf_conntrack_locks_all;
+ 
+-/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
+-#define GC_MAX_BUCKETS_DIV    128u
+-/* upper bound of full table scan */
+-#define GC_MAX_SCAN_JIFFIES   (16u * HZ)
+-/* desired ratio of entries found to be expired */
+-#define GC_EVICT_RATIO        50u
++#define GC_SCAN_INTERVAL      (120u * HZ)
++#define GC_SCAN_MAX_DURATION  msecs_to_jiffies(10)
+ 
+ static struct conntrack_gc_work conntrack_gc_work;
+ 
+@@ -1226,17 +1221,13 @@ static void nf_ct_offload_timeout(struct nf_conn *ct)
+ 
+ static void gc_worker(struct work_struct *work)
+ {
+-      unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
+-      unsigned int i, goal, buckets = 0, expired_count = 0;
+-      unsigned int nf_conntrack_max95 = 0;
++      unsigned long end_time = jiffies + GC_SCAN_MAX_DURATION;
++      unsigned int i, hashsz, nf_conntrack_max95 = 0;
++      unsigned long next_run = GC_SCAN_INTERVAL;
+       struct conntrack_gc_work *gc_work;
+-      unsigned int ratio, scanned = 0;
+-      unsigned long next_run;
+-
+       gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
+ 
+-      goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
+-      i = gc_work->last_bucket;
++      i = gc_work->next_bucket;
+       if (gc_work->early_drop)
+               nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
+ 
+@@ -1244,22 +1235,21 @@ static void gc_worker(struct work_struct *work)
+               struct nf_conntrack_tuple_hash *h;
+               struct hlist_nulls_head *ct_hash;
+               struct hlist_nulls_node *n;
+-              unsigned int hashsz;
+               struct nf_conn *tmp;
+ 
+-              i++;
+               rcu_read_lock();
+ 
+               nf_conntrack_get_ht(&ct_hash, &hashsz);
+-              if (i >= hashsz)
+-                      i = 0;
++              if (i >= hashsz) {
++                      rcu_read_unlock();
++                      break;
++              }
+ 
+               hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
+                       struct net *net;
+ 
+                       tmp = nf_ct_tuplehash_to_ctrack(h);
+ 
+-                      scanned++;
+                       if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
+                               nf_ct_offload_timeout(tmp);
+                               continue;
+@@ -1267,7 +1257,6 @@ static void gc_worker(struct work_struct *work)
+ 
+                       if (nf_ct_is_expired(tmp)) {
+                               nf_ct_gc_expired(tmp);
+-                              expired_count++;
+                               continue;
+                       }
+ 
+@@ -1299,7 +1288,14 @@ static void gc_worker(struct work_struct *work)
+                */
+               rcu_read_unlock();
+               cond_resched();
+-      } while (++buckets < goal);
++              i++;
++
++              if (time_after(jiffies, end_time) && i < hashsz) {
++                      gc_work->next_bucket = i;
++                      next_run = 0;
++                      break;
++              }
++      } while (i < hashsz);
+ 
+       if (gc_work->exiting)
+               return;
+@@ -1310,40 +1306,17 @@ static void gc_worker(struct work_struct *work)
+        *
+        * This worker is only here to reap expired entries when system went
+        * idle after a busy period.
+-       *
+-       * The heuristics below are supposed to balance conflicting goals:
+-       *
+-       * 1. Minimize time until we notice a stale entry
+-       * 2. Maximize scan intervals to not waste cycles
+-       *
+-       * Normally, expire ratio will be close to 0.
+-       *
+-       * As soon as a sizeable fraction of the entries have expired
+-       * increase scan frequency.
+        */
+-      ratio = scanned ? expired_count * 100 / scanned : 0;
+-      if (ratio > GC_EVICT_RATIO) {
+-              gc_work->next_gc_run = min_interval;
+-      } else {
+-              unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
+-
+-              BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
+-
+-              gc_work->next_gc_run += min_interval;
+-              if (gc_work->next_gc_run > max)
+-                      gc_work->next_gc_run = max;
++      if (next_run) {
++              gc_work->early_drop = false;
++              gc_work->next_bucket = 0;
+       }
+-
+-      next_run = gc_work->next_gc_run;
+-      gc_work->last_bucket = i;
+-      gc_work->early_drop = false;
+       queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, 
next_run);
+ }
+ 
+ static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
+ {
+       INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
+-      gc_work->next_gc_run = HZ;
+       gc_work->exiting = false;
+ }
+ 
+diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
+index faea2ce125110..b97a786d048cc 100644
+--- a/net/qrtr/qrtr.c
++++ b/net/qrtr/qrtr.c
+@@ -314,7 +314,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const 
void *data, size_t len)
+               goto err;
+       }
+ 
+-      if (len != ALIGN(size, 4) + hdrlen)
++      if (!size || len != ALIGN(size, 4) + hdrlen)
+               goto err;
+ 
+       if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA)
+diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
+index 06ecf9d2d4bf1..ef6acd7211180 100644
+--- a/net/rds/ib_frmr.c
++++ b/net/rds/ib_frmr.c
+@@ -131,9 +131,9 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
+               cpu_relax();
+       }
+ 
+-      ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len,
++      ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len,
+                               &off, PAGE_SIZE);
+-      if (unlikely(ret != ibmr->sg_len))
++      if (unlikely(ret != ibmr->sg_dma_len))
+               return ret < 0 ? ret : -EINVAL;
+ 
+       if (cmpxchg(&frmr->fr_state,
+diff --git a/net/socket.c b/net/socket.c
+index b14917dd811ad..94358566c9d10 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1053,7 +1053,7 @@ static long sock_do_ioctl(struct net *net, struct socket 
*sock,
+               rtnl_unlock();
+               if (!err && copy_to_user(argp, &ifc, sizeof(struct ifconf)))
+                       err = -EFAULT;
+-      } else {
++      } else if (is_socket_ioctl_cmd(cmd)) {
+               struct ifreq ifr;
+               bool need_copyout;
+               if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
+@@ -1062,6 +1062,8 @@ static long sock_do_ioctl(struct net *net, struct socket 
*sock,
+               if (!err && need_copyout)
+                       if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
+                               return -EFAULT;
++      } else {
++              err = -ENOTTY;
+       }
+       return err;
+ }
+@@ -3228,6 +3230,8 @@ static int compat_ifr_data_ioctl(struct net *net, 
unsigned int cmd,
+       struct ifreq ifreq;
+       u32 data32;
+ 
++      if (!is_socket_ioctl_cmd(cmd))
++              return -ENOTTY;
+       if (copy_from_user(ifreq.ifr_name, u_ifreq32->ifr_name, IFNAMSIZ))
+               return -EFAULT;
+       if (get_user(data32, &u_ifreq32->ifr_data))

Reply via email to