commit:     00a8c482d8731d5743bd512614cd63509309e907
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Oct 17 13:09:21 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Oct 17 13:10:03 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=00a8c482

Linux patch 5.14.13

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1012_linux-5.14.13.patch | 1320 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1324 insertions(+)

diff --git a/0000_README b/0000_README
index 4456b48..31ed9a4 100644
--- a/0000_README
+++ b/0000_README
@@ -95,6 +95,10 @@ Patch:  1011_linux-5.14.12.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.14.12
 
+Patch:  1012_linux-5.14.13.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.14.13
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1012_linux-5.14.13.patch b/1012_linux-5.14.13.patch
new file mode 100644
index 0000000..c94e95e
--- /dev/null
+++ b/1012_linux-5.14.13.patch
@@ -0,0 +1,1320 @@
+diff --git a/Makefile b/Makefile
+index 02cde08f4978e..7bdca9dc0e61b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 14
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = Opossums on Parade
+ 
+diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile 
b/arch/arm64/kvm/hyp/nvhe/Makefile
+index 5df6193fc4304..8d741f71377f4 100644
+--- a/arch/arm64/kvm/hyp/nvhe/Makefile
++++ b/arch/arm64/kvm/hyp/nvhe/Makefile
+@@ -54,7 +54,7 @@ $(obj)/kvm_nvhe.tmp.o: $(obj)/hyp.lds $(addprefix 
$(obj)/,$(hyp-obj)) FORCE
+ #    runtime. Because the hypervisor is part of the kernel binary, relocations
+ #    produce a kernel VA. We enumerate relocations targeting hyp at build time
+ #    and convert the kernel VAs at those positions to hyp VAs.
+-$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel
++$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel FORCE
+       $(call if_changed,hyprel)
+ 
+ # 5) Compile hyp-reloc.S and link it into the existing partially linked 
object.
+diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
+index 8f215e79e70e6..cd11eb101eacd 100644
+--- a/arch/m68k/kernel/signal.c
++++ b/arch/m68k/kernel/signal.c
+@@ -447,7 +447,7 @@ static inline void save_fpu_state(struct sigcontext *sc, 
struct pt_regs *regs)
+ 
+       if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
+               fpu_version = sc->sc_fpstate[0];
+-              if (CPU_IS_020_OR_030 &&
++              if (CPU_IS_020_OR_030 && !regs->stkadj &&
+                   regs->vector >= (VEC_FPBRUC * 4) &&
+                   regs->vector <= (VEC_FPNAN * 4)) {
+                       /* Clear pending exception in 68882 idle frame */
+@@ -510,7 +510,7 @@ static inline int rt_save_fpu_state(struct ucontext __user 
*uc, struct pt_regs *
+               if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
+                       context_size = fpstate[1];
+               fpu_version = fpstate[0];
+-              if (CPU_IS_020_OR_030 &&
++              if (CPU_IS_020_OR_030 && !regs->stkadj &&
+                   regs->vector >= (VEC_FPBRUC * 4) &&
+                   regs->vector <= (VEC_FPNAN * 4)) {
+                       /* Clear pending exception in 68882 idle frame */
+@@ -832,18 +832,24 @@ badframe:
+       return 0;
+ }
+ 
++static inline struct pt_regs *rte_regs(struct pt_regs *regs)
++{
++      return (void *)regs + regs->stkadj;
++}
++
+ static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
+                            unsigned long mask)
+ {
++      struct pt_regs *tregs = rte_regs(regs);
+       sc->sc_mask = mask;
+       sc->sc_usp = rdusp();
+       sc->sc_d0 = regs->d0;
+       sc->sc_d1 = regs->d1;
+       sc->sc_a0 = regs->a0;
+       sc->sc_a1 = regs->a1;
+-      sc->sc_sr = regs->sr;
+-      sc->sc_pc = regs->pc;
+-      sc->sc_formatvec = regs->format << 12 | regs->vector;
++      sc->sc_sr = tregs->sr;
++      sc->sc_pc = tregs->pc;
++      sc->sc_formatvec = tregs->format << 12 | tregs->vector;
+       save_a5_state(sc, regs);
+       save_fpu_state(sc, regs);
+ }
+@@ -851,6 +857,7 @@ static void setup_sigcontext(struct sigcontext *sc, struct 
pt_regs *regs,
+ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct 
pt_regs *regs)
+ {
+       struct switch_stack *sw = (struct switch_stack *)regs - 1;
++      struct pt_regs *tregs = rte_regs(regs);
+       greg_t __user *gregs = uc->uc_mcontext.gregs;
+       int err = 0;
+ 
+@@ -871,9 +878,9 @@ static inline int rt_setup_ucontext(struct ucontext __user 
*uc, struct pt_regs *
+       err |= __put_user(sw->a5, &gregs[13]);
+       err |= __put_user(sw->a6, &gregs[14]);
+       err |= __put_user(rdusp(), &gregs[15]);
+-      err |= __put_user(regs->pc, &gregs[16]);
+-      err |= __put_user(regs->sr, &gregs[17]);
+-      err |= __put_user((regs->format << 12) | regs->vector, 
&uc->uc_formatvec);
++      err |= __put_user(tregs->pc, &gregs[16]);
++      err |= __put_user(tregs->sr, &gregs[17]);
++      err |= __put_user((tregs->format << 12) | tregs->vector, 
&uc->uc_formatvec);
+       err |= rt_save_fpu_state(uc, regs);
+       return err;
+ }
+@@ -890,13 +897,14 @@ static int setup_frame(struct ksignal *ksig, sigset_t 
*set,
+                       struct pt_regs *regs)
+ {
+       struct sigframe __user *frame;
+-      int fsize = frame_extra_sizes(regs->format);
++      struct pt_regs *tregs = rte_regs(regs);
++      int fsize = frame_extra_sizes(tregs->format);
+       struct sigcontext context;
+       int err = 0, sig = ksig->sig;
+ 
+       if (fsize < 0) {
+               pr_debug("setup_frame: Unknown frame format %#x\n",
+-                       regs->format);
++                       tregs->format);
+               return -EFAULT;
+       }
+ 
+@@ -907,7 +915,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
+ 
+       err |= __put_user(sig, &frame->sig);
+ 
+-      err |= __put_user(regs->vector, &frame->code);
++      err |= __put_user(tregs->vector, &frame->code);
+       err |= __put_user(&frame->sc, &frame->psc);
+ 
+       if (_NSIG_WORDS > 1)
+@@ -933,34 +941,28 @@ static int setup_frame(struct ksignal *ksig, sigset_t 
*set,
+ 
+       push_cache ((unsigned long) &frame->retcode);
+ 
+-      /*
+-       * Set up registers for signal handler.  All the state we are about
+-       * to destroy is successfully copied to sigframe.
+-       */
+-      wrusp ((unsigned long) frame);
+-      regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
+-      adjustformat(regs);
+-
+       /*
+        * This is subtle; if we build more than one sigframe, all but the
+        * first one will see frame format 0 and have fsize == 0, so we won't
+        * screw stkadj.
+        */
+-      if (fsize)
++      if (fsize) {
+               regs->stkadj = fsize;
+-
+-      /* Prepare to skip over the extra stuff in the exception frame.  */
+-      if (regs->stkadj) {
+-              struct pt_regs *tregs =
+-                      (struct pt_regs *)((ulong)regs + regs->stkadj);
++              tregs = rte_regs(regs);
+               pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
+-              /* This must be copied with decreasing addresses to
+-                   handle overlaps.  */
+               tregs->vector = 0;
+               tregs->format = 0;
+-              tregs->pc = regs->pc;
+               tregs->sr = regs->sr;
+       }
++
++      /*
++       * Set up registers for signal handler.  All the state we are about
++       * to destroy is successfully copied to sigframe.
++       */
++      wrusp ((unsigned long) frame);
++      tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
++      adjustformat(regs);
++
+       return 0;
+ }
+ 
+@@ -968,7 +970,8 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t 
*set,
+                          struct pt_regs *regs)
+ {
+       struct rt_sigframe __user *frame;
+-      int fsize = frame_extra_sizes(regs->format);
++      struct pt_regs *tregs = rte_regs(regs);
++      int fsize = frame_extra_sizes(tregs->format);
+       int err = 0, sig = ksig->sig;
+ 
+       if (fsize < 0) {
+@@ -1018,34 +1021,27 @@ static int setup_rt_frame(struct ksignal *ksig, 
sigset_t *set,
+ 
+       push_cache ((unsigned long) &frame->retcode);
+ 
+-      /*
+-       * Set up registers for signal handler.  All the state we are about
+-       * to destroy is successfully copied to sigframe.
+-       */
+-      wrusp ((unsigned long) frame);
+-      regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
+-      adjustformat(regs);
+-
+       /*
+        * This is subtle; if we build more than one sigframe, all but the
+        * first one will see frame format 0 and have fsize == 0, so we won't
+        * screw stkadj.
+        */
+-      if (fsize)
++      if (fsize) {
+               regs->stkadj = fsize;
+-
+-      /* Prepare to skip over the extra stuff in the exception frame.  */
+-      if (regs->stkadj) {
+-              struct pt_regs *tregs =
+-                      (struct pt_regs *)((ulong)regs + regs->stkadj);
++              tregs = rte_regs(regs);
+               pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
+-              /* This must be copied with decreasing addresses to
+-                   handle overlaps.  */
+               tregs->vector = 0;
+               tregs->format = 0;
+-              tregs->pc = regs->pc;
+               tregs->sr = regs->sr;
+       }
++
++      /*
++       * Set up registers for signal handler.  All the state we are about
++       * to destroy is successfully copied to sigframe.
++       */
++      wrusp ((unsigned long) frame);
++      tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
++      adjustformat(regs);
+       return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+index 4523df2785d63..5b6317bf97511 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+@@ -1094,6 +1094,8 @@ static int gmc_v10_0_hw_fini(void *handle)
+ {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
++      gmc_v10_0_gart_disable(adev);
++
+       if (amdgpu_sriov_vf(adev)) {
+               /* full access mode, so don't touch any GMC register */
+               DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
+@@ -1102,7 +1104,6 @@ static int gmc_v10_0_hw_fini(void *handle)
+ 
+       amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+       amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+-      gmc_v10_0_gart_disable(adev);
+ 
+       return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 7eb70d69f7605..f3cd2b3fb4cc0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -1764,6 +1764,8 @@ static int gmc_v9_0_hw_fini(void *handle)
+ {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
++      gmc_v9_0_gart_disable(adev);
++
+       if (amdgpu_sriov_vf(adev)) {
+               /* full access mode, so don't touch any GMC register */
+               DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
+@@ -1772,7 +1774,6 @@ static int gmc_v9_0_hw_fini(void *handle)
+ 
+       amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+       amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+-      gmc_v9_0_gart_disable(adev);
+ 
+       return 0;
+ }
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index dc6bd4299c546..87edcd4ce07c2 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -322,12 +322,19 @@ static int apple_event(struct hid_device *hdev, struct 
hid_field *field,
+ 
+ /*
+  * MacBook JIS keyboard has wrong logical maximum
++ * Magic Keyboard JIS has wrong logical maximum
+  */
+ static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+               unsigned int *rsize)
+ {
+       struct apple_sc *asc = hid_get_drvdata(hdev);
+ 
++      if(*rsize >=71 && rdesc[70] == 0x65 && rdesc[64] == 0x65) {
++              hid_info(hdev,
++                       "fixing up Magic Keyboard JIS report descriptor\n");
++              rdesc[64] = rdesc[70] = 0xe7;
++      }
++
+       if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 &&
+                       rdesc[53] == 0x65 && rdesc[59] == 0x65) {
+               hid_info(hdev,
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 81ba642adcb74..528d94ccd76fe 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -4720,6 +4720,12 @@ static const struct wacom_features wacom_features_0x393 
=
+       { "Wacom Intuos Pro S", 31920, 19950, 8191, 63,
+         INTUOSP2S_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7,
+         .touch_max = 10 };
++static const struct wacom_features wacom_features_0x3c6 =
++      { "Wacom Intuos BT S", 15200, 9500, 4095, 63,
++        INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
++static const struct wacom_features wacom_features_0x3c8 =
++      { "Wacom Intuos BT M", 21600, 13500, 4095, 63,
++        INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
+ 
+ static const struct wacom_features wacom_features_HID_ANY_ID =
+       { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = 
HID_ANY_ID };
+@@ -4893,6 +4899,8 @@ const struct hid_device_id wacom_ids[] = {
+       { USB_DEVICE_WACOM(0x37A) },
+       { USB_DEVICE_WACOM(0x37B) },
+       { BT_DEVICE_WACOM(0x393) },
++      { BT_DEVICE_WACOM(0x3c6) },
++      { BT_DEVICE_WACOM(0x3c8) },
+       { USB_DEVICE_WACOM(0x4001) },
+       { USB_DEVICE_WACOM(0x4004) },
+       { USB_DEVICE_WACOM(0x5000) },
+diff --git a/drivers/hwmon/ltc2947-core.c b/drivers/hwmon/ltc2947-core.c
+index bb3f7749a0b00..5423466de697a 100644
+--- a/drivers/hwmon/ltc2947-core.c
++++ b/drivers/hwmon/ltc2947-core.c
+@@ -989,8 +989,12 @@ static int ltc2947_setup(struct ltc2947_data *st)
+               return ret;
+ 
+       /* check external clock presence */
+-      extclk = devm_clk_get(st->dev, NULL);
+-      if (!IS_ERR(extclk)) {
++      extclk = devm_clk_get_optional(st->dev, NULL);
++      if (IS_ERR(extclk))
++              return dev_err_probe(st->dev, PTR_ERR(extclk),
++                                   "Failed to get external clock\n");
++
++      if (extclk) {
+               unsigned long rate_hz;
+               u8 pre = 0, div, tbctl;
+               u64 aux;
+diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
+index df712ce4b164d..53f7d1418bc90 100644
+--- a/drivers/hwmon/pmbus/ibm-cffps.c
++++ b/drivers/hwmon/pmbus/ibm-cffps.c
+@@ -171,8 +171,14 @@ static ssize_t ibm_cffps_debugfs_read(struct file *file, 
char __user *buf,
+               cmd = CFFPS_SN_CMD;
+               break;
+       case CFFPS_DEBUGFS_MAX_POWER_OUT:
+-              rc = i2c_smbus_read_word_swapped(psu->client,
+-                                               CFFPS_MAX_POWER_OUT_CMD);
++              if (psu->version == cffps1) {
++                      rc = i2c_smbus_read_word_swapped(psu->client,
++                                      CFFPS_MAX_POWER_OUT_CMD);
++              } else {
++                      rc = i2c_smbus_read_word_data(psu->client,
++                                      CFFPS_MAX_POWER_OUT_CMD);
++              }
++
+               if (rc < 0)
+                       return rc;
+ 
+diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c 
b/drivers/net/ethernet/broadcom/bgmac-platform.c
+index 4ab5bf64d353e..df8ff839cc621 100644
+--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
++++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
+@@ -192,6 +192,9 @@ static int bgmac_probe(struct platform_device *pdev)
+       bgmac->dma_dev = &pdev->dev;
+ 
+       ret = of_get_mac_address(np, bgmac->net_dev->dev_addr);
++      if (ret == -EPROBE_DEFER)
++              return ret;
++
+       if (ret)
+               dev_warn(&pdev->dev,
+                        "MAC address not present in device tree\n");
+diff --git a/drivers/net/ethernet/sun/Kconfig 
b/drivers/net/ethernet/sun/Kconfig
+index 309de38a75304..b0d3f9a2950c0 100644
+--- a/drivers/net/ethernet/sun/Kconfig
++++ b/drivers/net/ethernet/sun/Kconfig
+@@ -73,6 +73,7 @@ config CASSINI
+ config SUNVNET_COMMON
+       tristate "Common routines to support Sun Virtual Networking"
+       depends on SUN_LDOMS
++      depends on INET
+       default m
+ 
+ config SUNVNET
+diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280.c 
b/drivers/pinctrl/qcom/pinctrl-sc7280.c
+index afddf6d60dbe6..9017ede409c9c 100644
+--- a/drivers/pinctrl/qcom/pinctrl-sc7280.c
++++ b/drivers/pinctrl/qcom/pinctrl-sc7280.c
+@@ -1496,6 +1496,7 @@ static const struct of_device_id 
sc7280_pinctrl_of_match[] = {
+ static struct platform_driver sc7280_pinctrl_driver = {
+       .driver = {
+               .name = "sc7280-pinctrl",
++              .pm = &msm_pinctrl_dev_pm_ops,
+               .of_match_table = sc7280_pinctrl_of_match,
+       },
+       .probe = sc7280_pinctrl_probe,
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index 2aa8f519aae62..5f1092195d1f6 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -2399,7 +2399,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t 
*vha, struct req_que *req,
+       }
+ 
+       if (unlikely(logit))
+-              ql_log(ql_log_warn, fcport->vha, 0x5060,
++              ql_log(ql_dbg_io, fcport->vha, 0x5060,
+                  "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x 
 ox_id=%x\n",
+                  sp->name, sp->handle, comp_status,
+                  fd->transferred_length, le32_to_cpu(sts->residual_len),
+@@ -3246,7 +3246,7 @@ check_scsi_status:
+ 
+ out:
+       if (logit)
+-              ql_log(ql_log_warn, fcport->vha, 0x3022,
++              ql_log(ql_dbg_io, fcport->vha, 0x3022,
+                      "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu 
portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x 
fw_resid=0x%x sp=%p cp=%p.\n",
+                      comp_status, scsi_status, res, vha->host_no,
+                      cp->device->id, cp->device->lun, fcport->d_id.b.domain,
+diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
+index 43e682297fd5f..0a1734f34587d 100644
+--- a/drivers/scsi/ses.c
++++ b/drivers/scsi/ses.c
+@@ -118,7 +118,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int 
page_code,
+ static int ses_send_diag(struct scsi_device *sdev, int page_code,
+                        void *buf, int bufflen)
+ {
+-      u32 result;
++      int result;
+ 
+       unsigned char cmd[] = {
+               SEND_DIAGNOSTIC,
+diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
+index b0deaf4af5a37..13f55f41a902d 100644
+--- a/drivers/scsi/virtio_scsi.c
++++ b/drivers/scsi/virtio_scsi.c
+@@ -300,7 +300,7 @@ static void virtscsi_handle_transport_reset(struct 
virtio_scsi *vscsi,
+               }
+               break;
+       default:
+-              pr_info("Unsupport virtio scsi event reason %x\n", 
event->reason);
++              pr_info("Unsupported virtio scsi event reason %x\n", 
event->reason);
+       }
+ }
+ 
+@@ -392,7 +392,7 @@ static void virtscsi_handle_event(struct work_struct *work)
+               virtscsi_handle_param_change(vscsi, event);
+               break;
+       default:
+-              pr_err("Unsupport virtio scsi event %x\n", event->event);
++              pr_err("Unsupported virtio scsi event %x\n", event->event);
+       }
+       virtscsi_kick_event(vscsi, event_node);
+ }
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index 24e994e75f5ca..8049448476a65 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -733,18 +733,13 @@ int ext4_write_inline_data_end(struct inode *inode, 
loff_t pos, unsigned len,
+       void *kaddr;
+       struct ext4_iloc iloc;
+ 
+-      if (unlikely(copied < len)) {
+-              if (!PageUptodate(page)) {
+-                      copied = 0;
+-                      goto out;
+-              }
+-      }
++      if (unlikely(copied < len) && !PageUptodate(page))
++              return 0;
+ 
+       ret = ext4_get_inode_loc(inode, &iloc);
+       if (ret) {
+               ext4_std_error(inode->i_sb, ret);
+-              copied = 0;
+-              goto out;
++              return ret;
+       }
+ 
+       ext4_write_lock_xattr(inode, &no_expand);
+@@ -757,7 +752,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t 
pos, unsigned len,
+       (void) ext4_find_inline_data_nolock(inode);
+ 
+       kaddr = kmap_atomic(page);
+-      ext4_write_inline_data(inode, &iloc, kaddr, pos, len);
++      ext4_write_inline_data(inode, &iloc, kaddr, pos, copied);
+       kunmap_atomic(kaddr);
+       SetPageUptodate(page);
+       /* clear page dirty so that writepages wouldn't work for us. */
+@@ -766,7 +761,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t 
pos, unsigned len,
+       ext4_write_unlock_xattr(inode, &no_expand);
+       brelse(iloc.bh);
+       mark_inode_dirty(inode);
+-out:
++
+       return copied;
+ }
+ 
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 73daf9443e5e0..fc6ea56de77c2 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1295,6 +1295,7 @@ static int ext4_write_end(struct file *file,
+                       goto errout;
+               }
+               copied = ret;
++              ret = 0;
+       } else
+               copied = block_write_end(file, mapping, pos,
+                                        len, copied, page, fsdata);
+@@ -1321,13 +1322,14 @@ static int ext4_write_end(struct file *file,
+       if (i_size_changed || inline_data)
+               ret = ext4_mark_inode_dirty(handle, inode);
+ 
++errout:
+       if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
+               /* if we have allocated more blocks and copied
+                * less. We will have blocks allocated outside
+                * inode->i_size. So truncate them
+                */
+               ext4_orphan_add(handle, inode);
+-errout:
++
+       ret2 = ext4_journal_stop(handle);
+       if (!ret)
+               ret = ret2;
+@@ -1410,6 +1412,7 @@ static int ext4_journalled_write_end(struct file *file,
+                       goto errout;
+               }
+               copied = ret;
++              ret = 0;
+       } else if (unlikely(copied < len) && !PageUptodate(page)) {
+               copied = 0;
+               ext4_journalled_zero_new_buffers(handle, page, from, to);
+@@ -1439,6 +1442,7 @@ static int ext4_journalled_write_end(struct file *file,
+                       ret = ret2;
+       }
+ 
++errout:
+       if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
+               /* if we have allocated more blocks and copied
+                * less. We will have blocks allocated outside
+@@ -1446,7 +1450,6 @@ static int ext4_journalled_write_end(struct file *file,
+                */
+               ext4_orphan_add(handle, inode);
+ 
+-errout:
+       ret2 = ext4_journal_stop(handle);
+       if (!ret)
+               ret = ret2;
+@@ -3089,35 +3092,37 @@ static int ext4_da_write_end(struct file *file,
+       end = start + copied - 1;
+ 
+       /*
+-       * generic_write_end() will run mark_inode_dirty() if i_size
+-       * changes.  So let's piggyback the i_disksize mark_inode_dirty
+-       * into that.
++       * Since we are holding inode lock, we are sure i_disksize <=
++       * i_size. We also know that if i_disksize < i_size, there are
++       * delalloc writes pending in the range upto i_size. If the end of
++       * the current write is <= i_size, there's no need to touch
++       * i_disksize since writeback will push i_disksize upto i_size
++       * eventually. If the end of the current write is > i_size and
++       * inside an allocated block (ext4_da_should_update_i_disksize()
++       * check), we need to update i_disksize here as neither
++       * ext4_writepage() nor certain ext4_writepages() paths not
++       * allocating blocks update i_disksize.
++       *
++       * Note that we defer inode dirtying to generic_write_end() /
++       * ext4_da_write_inline_data_end().
+        */
+       new_i_size = pos + copied;
+-      if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
++      if (copied && new_i_size > inode->i_size) {
+               if (ext4_has_inline_data(inode) ||
+-                  ext4_da_should_update_i_disksize(page, end)) {
++                  ext4_da_should_update_i_disksize(page, end))
+                       ext4_update_i_disksize(inode, new_i_size);
+-                      /* We need to mark inode dirty even if
+-                       * new_i_size is less that inode->i_size
+-                       * bu greater than i_disksize.(hint delalloc)
+-                       */
+-                      ret = ext4_mark_inode_dirty(handle, inode);
+-              }
+       }
+ 
+       if (write_mode != CONVERT_INLINE_DATA &&
+           ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
+           ext4_has_inline_data(inode))
+-              ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,
++              ret = ext4_da_write_inline_data_end(inode, pos, len, copied,
+                                                    page);
+       else
+-              ret2 = generic_write_end(file, mapping, pos, len, copied,
++              ret = generic_write_end(file, mapping, pos, len, copied,
+                                                       page, fsdata);
+ 
+-      copied = ret2;
+-      if (ret2 < 0)
+-              ret = ret2;
++      copied = ret;
+       ret2 = ext4_journal_stop(handle);
+       if (unlikely(ret2 && !ret))
+               ret = ret2;
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 675216f7022da..2f79586c1a7c8 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -419,7 +419,6 @@ struct io_ring_ctx {
+               struct wait_queue_head  cq_wait;
+               unsigned                cq_extra;
+               atomic_t                cq_timeouts;
+-              struct fasync_struct    *cq_fasync;
+               unsigned                cq_last_tm_flush;
+       } ____cacheline_aligned_in_smp;
+ 
+@@ -1448,10 +1447,8 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
+               wake_up(&ctx->sq_data->wait);
+       if (io_should_trigger_evfd(ctx))
+               eventfd_signal(ctx->cq_ev_fd, 1);
+-      if (waitqueue_active(&ctx->poll_wait)) {
++      if (waitqueue_active(&ctx->poll_wait))
+               wake_up_interruptible(&ctx->poll_wait);
+-              kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
+-      }
+ }
+ 
+ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
+@@ -1465,10 +1462,8 @@ static void io_cqring_ev_posted_iopoll(struct 
io_ring_ctx *ctx)
+       }
+       if (io_should_trigger_evfd(ctx))
+               eventfd_signal(ctx->cq_ev_fd, 1);
+-      if (waitqueue_active(&ctx->poll_wait)) {
++      if (waitqueue_active(&ctx->poll_wait))
+               wake_up_interruptible(&ctx->poll_wait);
+-              kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
+-      }
+ }
+ 
+ /* Returns true if there are no backlogged entries after the flush */
+@@ -8779,13 +8774,6 @@ static __poll_t io_uring_poll(struct file *file, 
poll_table *wait)
+       return mask;
+ }
+ 
+-static int io_uring_fasync(int fd, struct file *file, int on)
+-{
+-      struct io_ring_ctx *ctx = file->private_data;
+-
+-      return fasync_helper(fd, file, on, &ctx->cq_fasync);
+-}
+-
+ static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
+ {
+       const struct cred *creds;
+@@ -9571,7 +9559,6 @@ static const struct file_operations io_uring_fops = {
+       .mmap_capabilities = io_uring_nommu_mmap_capabilities,
+ #endif
+       .poll           = io_uring_poll,
+-      .fasync         = io_uring_fasync,
+ #ifdef CONFIG_PROC_FS
+       .show_fdinfo    = io_uring_show_fdinfo,
+ #endif
+diff --git a/fs/vboxsf/super.c b/fs/vboxsf/super.c
+index 4f5e59f062846..37dd3fe5b1e98 100644
+--- a/fs/vboxsf/super.c
++++ b/fs/vboxsf/super.c
+@@ -21,10 +21,7 @@
+ 
+ #define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */
+ 
+-#define VBSF_MOUNT_SIGNATURE_BYTE_0 ('\000')
+-#define VBSF_MOUNT_SIGNATURE_BYTE_1 ('\377')
+-#define VBSF_MOUNT_SIGNATURE_BYTE_2 ('\376')
+-#define VBSF_MOUNT_SIGNATURE_BYTE_3 ('\375')
++static const unsigned char VBSF_MOUNT_SIGNATURE[4] = "\000\377\376\375";
+ 
+ static int follow_symlinks;
+ module_param(follow_symlinks, int, 0444);
+@@ -386,12 +383,7 @@ fail_nomem:
+ 
+ static int vboxsf_parse_monolithic(struct fs_context *fc, void *data)
+ {
+-      unsigned char *options = data;
+-
+-      if (options && options[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 &&
+-                     options[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 &&
+-                     options[2] == VBSF_MOUNT_SIGNATURE_BYTE_2 &&
+-                     options[3] == VBSF_MOUNT_SIGNATURE_BYTE_3) {
++      if (data && !memcmp(data, VBSF_MOUNT_SIGNATURE, 4)) {
+               vbg_err("vboxsf: Old binary mount data not supported, remove 
obsolete mount.vboxsf and/or update your VBoxService.\n");
+               return -EINVAL;
+       }
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 2d510ad750edc..4aa52f7a48c16 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -683,7 +683,9 @@ struct perf_event {
+       /*
+        * timestamp shadows the actual context timing but it can
+        * be safely used in NMI interrupt context. It reflects the
+-       * context time as it was when the event was last scheduled in.
++       * context time as it was when the event was last scheduled in,
++       * or when ctx_sched_in failed to schedule the event because we
++       * run out of PMC.
+        *
+        * ctx_time already accounts for ctx->timestamp. Therefore to
+        * compute ctx_time for a sample, simply add perf_clock().
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index f6935787e7e8b..8e10c7accdbcc 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1633,7 +1633,7 @@ extern struct pid *cad_pid;
+ #define tsk_used_math(p)                      ((p)->flags & PF_USED_MATH)
+ #define used_math()                           tsk_used_math(current)
+ 
+-static inline bool is_percpu_thread(void)
++static __always_inline bool is_percpu_thread(void)
+ {
+ #ifdef CONFIG_SMP
+       return (current->flags & PF_NO_SETAFFINITY) &&
+diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
+index 6d7b12cba0158..bf79f3a890af2 100644
+--- a/include/net/pkt_sched.h
++++ b/include/net/pkt_sched.h
+@@ -11,6 +11,7 @@
+ #include <uapi/linux/pkt_sched.h>
+ 
+ #define DEFAULT_TX_QUEUE_LEN  1000
++#define STAB_SIZE_LOG_MAX     30
+ 
+ struct qdisc_walker {
+       int     stop;
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index e5c4aca620c58..22c5b1622c226 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -3707,6 +3707,29 @@ static noinline int visit_groups_merge(struct 
perf_cpu_context *cpuctx,
+       return 0;
+ }
+ 
++static inline bool event_update_userpage(struct perf_event *event)
++{
++      if (likely(!atomic_read(&event->mmap_count)))
++              return false;
++
++      perf_event_update_time(event);
++      perf_set_shadow_time(event, event->ctx);
++      perf_event_update_userpage(event);
++
++      return true;
++}
++
++static inline void group_update_userpage(struct perf_event *group_event)
++{
++      struct perf_event *event;
++
++      if (!event_update_userpage(group_event))
++              return;
++
++      for_each_sibling_event(event, group_event)
++              event_update_userpage(event);
++}
++
+ static int merge_sched_in(struct perf_event *event, void *data)
+ {
+       struct perf_event_context *ctx = event->ctx;
+@@ -3725,14 +3748,15 @@ static int merge_sched_in(struct perf_event *event, 
void *data)
+       }
+ 
+       if (event->state == PERF_EVENT_STATE_INACTIVE) {
++              *can_add_hw = 0;
+               if (event->attr.pinned) {
+                       perf_cgroup_event_disable(event, ctx);
+                       perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
++              } else {
++                      ctx->rotate_necessary = 1;
++                      perf_mux_hrtimer_restart(cpuctx);
++                      group_update_userpage(event);
+               }
+-
+-              *can_add_hw = 0;
+-              ctx->rotate_necessary = 1;
+-              perf_mux_hrtimer_restart(cpuctx);
+       }
+ 
+       return 0;
+@@ -6311,6 +6335,8 @@ accounting:
+ 
+               ring_buffer_attach(event, rb);
+ 
++              perf_event_update_time(event);
++              perf_set_shadow_time(event, event->ctx);
+               perf_event_init_userpage(event);
+               perf_event_update_userpage(event);
+       } else {
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index de2cf3943b91e..a579ea14a69b6 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -273,6 +273,7 @@ ip6t_do_table(struct sk_buff *skb,
+        * things we don't know, ie. tcp syn flag or ports).  If the
+        * rule is also a fragment-specific rule, non-fragments won't
+        * match it. */
++      acpar.fragoff = 0;
+       acpar.hotdrop = false;
+       acpar.state   = state;
+ 
+diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
+index efbefcbac3ac6..7cab1cf09bf1a 100644
+--- a/net/mac80211/mesh_pathtbl.c
++++ b/net/mac80211/mesh_pathtbl.c
+@@ -60,7 +60,10 @@ static struct mesh_table *mesh_table_alloc(void)
+       atomic_set(&newtbl->entries,  0);
+       spin_lock_init(&newtbl->gates_lock);
+       spin_lock_init(&newtbl->walk_lock);
+-      rhashtable_init(&newtbl->rhead, &mesh_rht_params);
++      if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) {
++              kfree(newtbl);
++              return NULL;
++      }
+ 
+       return newtbl;
+ }
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 2563473b5cf16..e023e307c0c3d 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -4053,7 +4053,8 @@ static bool ieee80211_accept_frame(struct 
ieee80211_rx_data *rx)
+               if (!bssid)
+                       return false;
+               if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
+-                  ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
++                  ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) ||
++                  !is_valid_ether_addr(hdr->addr2))
+                       return false;
+               if (ieee80211_is_beacon(hdr->frame_control))
+                       return true;
+diff --git a/net/netfilter/nf_nat_masquerade.c 
b/net/netfilter/nf_nat_masquerade.c
+index 8e8a65d46345b..acd73f717a088 100644
+--- a/net/netfilter/nf_nat_masquerade.c
++++ b/net/netfilter/nf_nat_masquerade.c
+@@ -9,8 +9,19 @@
+ 
+ #include <net/netfilter/nf_nat_masquerade.h>
+ 
++struct masq_dev_work {
++      struct work_struct work;
++      struct net *net;
++      union nf_inet_addr addr;
++      int ifindex;
++      int (*iter)(struct nf_conn *i, void *data);
++};
++
++#define MAX_MASQ_WORKER_COUNT 16
++
+ static DEFINE_MUTEX(masq_mutex);
+ static unsigned int masq_refcnt __read_mostly;
++static atomic_t masq_worker_count __read_mostly;
+ 
+ unsigned int
+ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
+@@ -63,13 +74,71 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int 
hooknum,
+ }
+ EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4);
+ 
+-static int device_cmp(struct nf_conn *i, void *ifindex)
++static void iterate_cleanup_work(struct work_struct *work)
++{
++      struct masq_dev_work *w;
++
++      w = container_of(work, struct masq_dev_work, work);
++
++      nf_ct_iterate_cleanup_net(w->net, w->iter, (void *)w, 0, 0);
++
++      put_net(w->net);
++      kfree(w);
++      atomic_dec(&masq_worker_count);
++      module_put(THIS_MODULE);
++}
++
++/* Iterate conntrack table in the background and remove conntrack entries
++ * that use the device/address being removed.
++ *
++ * In case too many work items have been queued already or memory allocation
++ * fails iteration is skipped, conntrack entries will time out eventually.
++ */
++static void nf_nat_masq_schedule(struct net *net, union nf_inet_addr *addr,
++                               int ifindex,
++                               int (*iter)(struct nf_conn *i, void *data),
++                               gfp_t gfp_flags)
++{
++      struct masq_dev_work *w;
++
++      if (atomic_read(&masq_worker_count) > MAX_MASQ_WORKER_COUNT)
++              return;
++
++      net = maybe_get_net(net);
++      if (!net)
++              return;
++
++      if (!try_module_get(THIS_MODULE))
++              goto err_module;
++
++      w = kzalloc(sizeof(*w), gfp_flags);
++      if (w) {
++              /* We can overshoot MAX_MASQ_WORKER_COUNT, no big deal */
++              atomic_inc(&masq_worker_count);
++
++              INIT_WORK(&w->work, iterate_cleanup_work);
++              w->ifindex = ifindex;
++              w->net = net;
++              w->iter = iter;
++              if (addr)
++                      w->addr = *addr;
++              schedule_work(&w->work);
++              return;
++      }
++
++      module_put(THIS_MODULE);
++ err_module:
++      put_net(net);
++}
++
++static int device_cmp(struct nf_conn *i, void *arg)
+ {
+       const struct nf_conn_nat *nat = nfct_nat(i);
++      const struct masq_dev_work *w = arg;
+ 
+       if (!nat)
+               return 0;
+-      return nat->masq_index == (int)(long)ifindex;
++      return nat->masq_index == w->ifindex;
+ }
+ 
+ static int masq_device_event(struct notifier_block *this,
+@@ -85,8 +154,8 @@ static int masq_device_event(struct notifier_block *this,
+                * and forget them.
+                */
+ 
+-              nf_ct_iterate_cleanup_net(net, device_cmp,
+-                                        (void *)(long)dev->ifindex, 0, 0);
++              nf_nat_masq_schedule(net, NULL, dev->ifindex,
++                                   device_cmp, GFP_KERNEL);
+       }
+ 
+       return NOTIFY_DONE;
+@@ -94,35 +163,45 @@ static int masq_device_event(struct notifier_block *this,
+ 
+ static int inet_cmp(struct nf_conn *ct, void *ptr)
+ {
+-      struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+-      struct net_device *dev = ifa->ifa_dev->dev;
+       struct nf_conntrack_tuple *tuple;
++      struct masq_dev_work *w = ptr;
+ 
+-      if (!device_cmp(ct, (void *)(long)dev->ifindex))
++      if (!device_cmp(ct, ptr))
+               return 0;
+ 
+       tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+ 
+-      return ifa->ifa_address == tuple->dst.u3.ip;
++      return nf_inet_addr_cmp(&w->addr, &tuple->dst.u3);
+ }
+ 
+ static int masq_inet_event(struct notifier_block *this,
+                          unsigned long event,
+                          void *ptr)
+ {
+-      struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;
+-      struct net *net = dev_net(idev->dev);
++      const struct in_ifaddr *ifa = ptr;
++      const struct in_device *idev;
++      const struct net_device *dev;
++      union nf_inet_addr addr;
++
++      if (event != NETDEV_DOWN)
++              return NOTIFY_DONE;
+ 
+       /* The masq_dev_notifier will catch the case of the device going
+        * down.  So if the inetdev is dead and being destroyed we have
+        * no work to do.  Otherwise this is an individual address removal
+        * and we have to perform the flush.
+        */
++      idev = ifa->ifa_dev;
+       if (idev->dead)
+               return NOTIFY_DONE;
+ 
+-      if (event == NETDEV_DOWN)
+-              nf_ct_iterate_cleanup_net(net, inet_cmp, ptr, 0, 0);
++      memset(&addr, 0, sizeof(addr));
++
++      addr.ip = ifa->ifa_address;
++
++      dev = idev->dev;
++      nf_nat_masq_schedule(dev_net(idev->dev), &addr, dev->ifindex,
++                           inet_cmp, GFP_KERNEL);
+ 
+       return NOTIFY_DONE;
+ }
+@@ -136,8 +215,6 @@ static struct notifier_block masq_inet_notifier = {
+ };
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+-static atomic_t v6_worker_count __read_mostly;
+-
+ static int
+ nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
+                      const struct in6_addr *daddr, unsigned int srcprefs,
+@@ -187,40 +264,6 @@ nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct 
nf_nat_range2 *range,
+ }
+ EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6);
+ 
+-struct masq_dev_work {
+-      struct work_struct work;
+-      struct net *net;
+-      struct in6_addr addr;
+-      int ifindex;
+-};
+-
+-static int inet6_cmp(struct nf_conn *ct, void *work)
+-{
+-      struct masq_dev_work *w = (struct masq_dev_work *)work;
+-      struct nf_conntrack_tuple *tuple;
+-
+-      if (!device_cmp(ct, (void *)(long)w->ifindex))
+-              return 0;
+-
+-      tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+-
+-      return ipv6_addr_equal(&w->addr, &tuple->dst.u3.in6);
+-}
+-
+-static void iterate_cleanup_work(struct work_struct *work)
+-{
+-      struct masq_dev_work *w;
+-
+-      w = container_of(work, struct masq_dev_work, work);
+-
+-      nf_ct_iterate_cleanup_net(w->net, inet6_cmp, (void *)w, 0, 0);
+-
+-      put_net(w->net);
+-      kfree(w);
+-      atomic_dec(&v6_worker_count);
+-      module_put(THIS_MODULE);
+-}
+-
+ /* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep).
+  *
+  * Defer it to the system workqueue.
+@@ -233,36 +276,19 @@ static int masq_inet6_event(struct notifier_block *this,
+ {
+       struct inet6_ifaddr *ifa = ptr;
+       const struct net_device *dev;
+-      struct masq_dev_work *w;
+-      struct net *net;
++      union nf_inet_addr addr;
+ 
+-      if (event != NETDEV_DOWN || atomic_read(&v6_worker_count) >= 16)
++      if (event != NETDEV_DOWN)
+               return NOTIFY_DONE;
+ 
+       dev = ifa->idev->dev;
+-      net = maybe_get_net(dev_net(dev));
+-      if (!net)
+-              return NOTIFY_DONE;
+ 
+-      if (!try_module_get(THIS_MODULE))
+-              goto err_module;
++      memset(&addr, 0, sizeof(addr));
+ 
+-      w = kmalloc(sizeof(*w), GFP_ATOMIC);
+-      if (w) {
+-              atomic_inc(&v6_worker_count);
+-
+-              INIT_WORK(&w->work, iterate_cleanup_work);
+-              w->ifindex = dev->ifindex;
+-              w->net = net;
+-              w->addr = ifa->addr;
+-              schedule_work(&w->work);
++      addr.in6 = ifa->addr;
+ 
+-              return NOTIFY_DONE;
+-      }
+-
+-      module_put(THIS_MODULE);
+- err_module:
+-      put_net(net);
++      nf_nat_masq_schedule(dev_net(dev), &addr, dev->ifindex, inet_cmp,
++                           GFP_ATOMIC);
+       return NOTIFY_DONE;
+ }
+ 
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index f87d07736a140..148edd0e71e32 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -513,6 +513,12 @@ static struct qdisc_size_table *qdisc_get_stab(struct 
nlattr *opt,
+               return stab;
+       }
+ 
++      if (s->size_log > STAB_SIZE_LOG_MAX ||
++          s->cell_log > STAB_SIZE_LOG_MAX) {
++              NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size 
table");
++              return ERR_PTR(-EINVAL);
++      }
++
+       stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
+       if (!stab)
+               return ERR_PTR(-ENOMEM);
+diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
+index cb5b5e3a481b9..daf731364695b 100644
+--- a/sound/firewire/oxfw/oxfw.c
++++ b/sound/firewire/oxfw/oxfw.c
+@@ -184,13 +184,16 @@ static int detect_quirks(struct snd_oxfw *oxfw, const 
struct ieee1394_device_id
+                       model = val;
+       }
+ 
+-      /*
+-       * Mackie Onyx Satellite with base station has a quirk to report a wrong
+-       * value in 'dbs' field of CIP header against its format information.
+-       */
+-      if (vendor == VENDOR_LOUD && model == MODEL_SATELLITE)
++      if (vendor == VENDOR_LOUD) {
++              // Mackie Onyx Satellite with base station has a quirk to 
report a wrong
++              // value in 'dbs' field of CIP header against its format 
information.
+               oxfw->quirks |= SND_OXFW_QUIRK_WRONG_DBS;
+ 
++              // OXFW971-based models may transfer events by blocking method.
++              if (!(oxfw->quirks & SND_OXFW_QUIRK_JUMBO_PAYLOAD))
++                      oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION;
++      }
++
+       return 0;
+ }
+ 
+diff --git a/sound/soc/intel/boards/sof_sdw.c 
b/sound/soc/intel/boards/sof_sdw.c
+index 1a867c73a48e0..cb3afc4519cf6 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -860,6 +860,11 @@ static int create_sdw_dailink(struct device *dev, int 
*be_index,
+                             cpus + *cpu_id, cpu_dai_num,
+                             codecs, codec_num,
+                             NULL, &sdw_ops);
++              /*
++               * SoundWire DAILINKs use 'stream' functions and Bank Switch 
operations
++               * based on wait_for_completion(), tag them as 'nonatomic'.
++               */
++              dai_links[*be_index].nonatomic = true;
+ 
+               ret = set_codec_init_func(link, dai_links + (*be_index)++,
+                                         playback, group_id);
+diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
+index 3e4dd4a86363b..59d0d7b2b55c8 100644
+--- a/sound/soc/sof/core.c
++++ b/sound/soc/sof/core.c
+@@ -371,7 +371,6 @@ int snd_sof_device_remove(struct device *dev)
+                       dev_warn(dev, "error: %d failed to prepare DSP for 
device removal",
+                                ret);
+ 
+-              snd_sof_fw_unload(sdev);
+               snd_sof_ipc_free(sdev);
+               snd_sof_free_debug(sdev);
+               snd_sof_free_trace(sdev);
+@@ -394,8 +393,7 @@ int snd_sof_device_remove(struct device *dev)
+               snd_sof_remove(sdev);
+ 
+       /* release firmware */
+-      release_firmware(pdata->fw);
+-      pdata->fw = NULL;
++      snd_sof_fw_unload(sdev);
+ 
+       return 0;
+ }
+diff --git a/sound/soc/sof/loader.c b/sound/soc/sof/loader.c
+index 2b38a77cd594f..9c3f251a0dd05 100644
+--- a/sound/soc/sof/loader.c
++++ b/sound/soc/sof/loader.c
+@@ -880,5 +880,7 @@ EXPORT_SYMBOL(snd_sof_run_firmware);
+ void snd_sof_fw_unload(struct snd_sof_dev *sdev)
+ {
+       /* TODO: support module unloading at runtime */
++      release_firmware(sdev->pdata->fw);
++      sdev->pdata->fw = NULL;
+ }
+ EXPORT_SYMBOL(snd_sof_fw_unload);
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 6abfc9d079e7c..fa75b7e72ad1f 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -1020,7 +1020,7 @@ static int usb_audio_suspend(struct usb_interface *intf, 
pm_message_t message)
+       return 0;
+ }
+ 
+-static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
++static int usb_audio_resume(struct usb_interface *intf)
+ {
+       struct snd_usb_audio *chip = usb_get_intfdata(intf);
+       struct snd_usb_stream *as;
+@@ -1046,7 +1046,7 @@ static int __usb_audio_resume(struct usb_interface 
*intf, bool reset_resume)
+        * we just notify and restart the mixers
+        */
+       list_for_each_entry(mixer, &chip->mixer_list, list) {
+-              err = snd_usb_mixer_resume(mixer, reset_resume);
++              err = snd_usb_mixer_resume(mixer);
+               if (err < 0)
+                       goto err_out;
+       }
+@@ -1066,20 +1066,10 @@ err_out:
+       atomic_dec(&chip->active); /* allow autopm after this point */
+       return err;
+ }
+-
+-static int usb_audio_resume(struct usb_interface *intf)
+-{
+-      return __usb_audio_resume(intf, false);
+-}
+-
+-static int usb_audio_reset_resume(struct usb_interface *intf)
+-{
+-      return __usb_audio_resume(intf, true);
+-}
+ #else
+ #define usb_audio_suspend     NULL
+ #define usb_audio_resume      NULL
+-#define usb_audio_reset_resume        NULL
++#define usb_audio_resume      NULL
+ #endif                /* CONFIG_PM */
+ 
+ static const struct usb_device_id usb_audio_ids [] = {
+@@ -1101,7 +1091,7 @@ static struct usb_driver usb_audio_driver = {
+       .disconnect =   usb_audio_disconnect,
+       .suspend =      usb_audio_suspend,
+       .resume =       usb_audio_resume,
+-      .reset_resume = usb_audio_reset_resume,
++      .reset_resume = usb_audio_resume,
+       .id_table =     usb_audio_ids,
+       .supports_autosuspend = 1,
+ };
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 9b713b4a5ec4c..fa7cf982d39e5 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -3655,33 +3655,16 @@ static int restore_mixer_value(struct 
usb_mixer_elem_list *list)
+       return 0;
+ }
+ 
+-static int default_mixer_reset_resume(struct usb_mixer_elem_list *list)
+-{
+-      int err;
+-
+-      if (list->resume) {
+-              err = list->resume(list);
+-              if (err < 0)
+-                      return err;
+-      }
+-      return restore_mixer_value(list);
+-}
+-
+-int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume)
++int snd_usb_mixer_resume(struct usb_mixer_interface *mixer)
+ {
+       struct usb_mixer_elem_list *list;
+-      usb_mixer_elem_resume_func_t f;
+       int id, err;
+ 
+       /* restore cached mixer values */
+       for (id = 0; id < MAX_ID_ELEMS; id++) {
+               for_each_mixer_elem(list, mixer, id) {
+-                      if (reset_resume)
+-                              f = list->reset_resume;
+-                      else
+-                              f = list->resume;
+-                      if (f) {
+-                              err = f(list);
++                      if (list->resume) {
++                              err = list->resume(list);
+                               if (err < 0)
+                                       return err;
+                       }
+@@ -3702,7 +3685,6 @@ void snd_usb_mixer_elem_init_std(struct 
usb_mixer_elem_list *list,
+       list->id = unitid;
+       list->dump = snd_usb_mixer_dump_cval;
+ #ifdef CONFIG_PM
+-      list->resume = NULL;
+-      list->reset_resume = default_mixer_reset_resume;
++      list->resume = restore_mixer_value;
+ #endif
+ }
+diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
+index ea41e7a1f7bf2..16567912b998e 100644
+--- a/sound/usb/mixer.h
++++ b/sound/usb/mixer.h
+@@ -70,7 +70,6 @@ struct usb_mixer_elem_list {
+       bool is_std_info;
+       usb_mixer_elem_dump_func_t dump;
+       usb_mixer_elem_resume_func_t resume;
+-      usb_mixer_elem_resume_func_t reset_resume;
+ };
+ 
+ /* iterate over mixer element list of the given unit id */
+@@ -122,7 +121,7 @@ int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, 
int op_flag,
+ 
+ #ifdef CONFIG_PM
+ int snd_usb_mixer_suspend(struct usb_mixer_interface *mixer);
+-int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool 
reset_resume);
++int snd_usb_mixer_resume(struct usb_mixer_interface *mixer);
+ #endif
+ 
+ int snd_usb_set_cur_mix_value(struct usb_mixer_elem_info *cval, int channel,
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index 0a3cb8fd7d004..4a4d3361ac047 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -151,7 +151,7 @@ static int add_single_ctl_with_resume(struct 
usb_mixer_interface *mixer,
+               *listp = list;
+       list->mixer = mixer;
+       list->id = id;
+-      list->reset_resume = resume;
++      list->resume = resume;
+       kctl = snd_ctl_new1(knew, list);
+       if (!kctl) {
+               kfree(list);

Reply via email to