commit:     1f9192297c331dac19647519edd7ab6b20858846
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Dec 14 12:14:13 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Dec 14 12:14:13 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1f919229

Linux patch 5.10.159

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1158_linux-5.10.159.patch | 3519 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3523 insertions(+)

diff --git a/0000_README b/0000_README
index 48ded094..a20d23b7 100644
--- a/0000_README
+++ b/0000_README
@@ -675,6 +675,10 @@ Patch:  1157_linux-5.10.158.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.10.158
 
+Patch:  1158_linux-5.10.159.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.10.159
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1158_linux-5.10.159.patch b/1158_linux-5.10.159.patch
new file mode 100644
index 00000000..8c72d24c
--- /dev/null
+++ b/1158_linux-5.10.159.patch
@@ -0,0 +1,3519 @@
+diff --git a/Makefile b/Makefile
+index f3d1f07b6a6fc..bb9fab281555a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 10
+-SUBLEVEL = 158
++SUBLEVEL = 159
+ EXTRAVERSION =
+ NAME = Dare mighty things
+ 
+diff --git a/arch/alpha/kernel/rtc.c b/arch/alpha/kernel/rtc.c
+index 1b1d5963ac550..48ffbfbd06240 100644
+--- a/arch/alpha/kernel/rtc.c
++++ b/arch/alpha/kernel/rtc.c
+@@ -80,7 +80,12 @@ init_rtc_epoch(void)
+ static int
+ alpha_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ {
+-      mc146818_get_time(tm);
++      int ret = mc146818_get_time(tm);
++
++      if (ret < 0) {
++              dev_err_ratelimited(dev, "unable to read current time\n");
++              return ret;
++      }
+ 
+       /* Adjust for non-default epochs.  It's easier to depend on the
+          generic __get_rtc_time and adjust the epoch here than create
+diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
+index e4ff47110a960..9e1b0af0aa43f 100644
+--- a/arch/arm/boot/dts/imx7s.dtsi
++++ b/arch/arm/boot/dts/imx7s.dtsi
+@@ -1221,10 +1221,10 @@
+                       clocks = <&clks IMX7D_NAND_USDHC_BUS_RAWNAND_CLK>;
+               };
+ 
+-              gpmi: nand-controller@33002000 {
++              gpmi: nand-controller@33002000{
+                       compatible = "fsl,imx7d-gpmi-nand";
+                       #address-cells = <1>;
+-                      #size-cells = <0>;
++                      #size-cells = <1>;
+                       reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
+                       reg-names = "gpmi-nand", "bch";
+                       interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+diff --git a/arch/arm/boot/dts/rk3036-evb.dts 
b/arch/arm/boot/dts/rk3036-evb.dts
+index 2a7e6624efb93..ea23ba98625e7 100644
+--- a/arch/arm/boot/dts/rk3036-evb.dts
++++ b/arch/arm/boot/dts/rk3036-evb.dts
+@@ -31,7 +31,7 @@
+ &i2c1 {
+       status = "okay";
+ 
+-      hym8563: hym8563@51 {
++      hym8563: rtc@51 {
+               compatible = "haoyu,hym8563";
+               reg = <0x51>;
+               #clock-cells = <0>;
+diff --git a/arch/arm/boot/dts/rk3188-radxarock.dts 
b/arch/arm/boot/dts/rk3188-radxarock.dts
+index b0fef82c0a71b..39b913f8d7018 100644
+--- a/arch/arm/boot/dts/rk3188-radxarock.dts
++++ b/arch/arm/boot/dts/rk3188-radxarock.dts
+@@ -67,7 +67,7 @@
+               #sound-dai-cells = <0>;
+       };
+ 
+-      ir_recv: gpio-ir-receiver {
++      ir_recv: ir-receiver {
+               compatible = "gpio-ir-receiver";
+               gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
+               pinctrl-names = "default";
+diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi
+index b6bde9d12c2be..ddf23748ead4c 100644
+--- a/arch/arm/boot/dts/rk3188.dtsi
++++ b/arch/arm/boot/dts/rk3188.dtsi
+@@ -402,7 +402,7 @@
+                               rockchip,pins = <2 RK_PD3 1 &pcfg_pull_none>;
+                       };
+ 
+-                      lcdc1_rgb24: ldcd1-rgb24 {
++                      lcdc1_rgb24: lcdc1-rgb24 {
+                               rockchip,pins = <2 RK_PA0 1 &pcfg_pull_none>,
+                                               <2 RK_PA1 1 &pcfg_pull_none>,
+                                               <2 RK_PA2 1 &pcfg_pull_none>,
+@@ -630,7 +630,6 @@
+ 
+ &global_timer {
+       interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | 
IRQ_TYPE_EDGE_RISING)>;
+-      status = "disabled";
+ };
+ 
+ &local_timer {
+diff --git a/arch/arm/boot/dts/rk3288-evb-act8846.dts 
b/arch/arm/boot/dts/rk3288-evb-act8846.dts
+index be695b8c1f672..8a635c2431274 100644
+--- a/arch/arm/boot/dts/rk3288-evb-act8846.dts
++++ b/arch/arm/boot/dts/rk3288-evb-act8846.dts
+@@ -54,7 +54,7 @@
+               vin-supply = <&vcc_sys>;
+       };
+ 
+-      hym8563@51 {
++      rtc@51 {
+               compatible = "haoyu,hym8563";
+               reg = <0x51>;
+ 
+diff --git a/arch/arm/boot/dts/rk3288-firefly.dtsi 
b/arch/arm/boot/dts/rk3288-firefly.dtsi
+index 7fb582302b326..c560afe3af780 100644
+--- a/arch/arm/boot/dts/rk3288-firefly.dtsi
++++ b/arch/arm/boot/dts/rk3288-firefly.dtsi
+@@ -233,7 +233,7 @@
+               vin-supply = <&vcc_sys>;
+       };
+ 
+-      hym8563: hym8563@51 {
++      hym8563: rtc@51 {
+               compatible = "haoyu,hym8563";
+               reg = <0x51>;
+               #clock-cells = <0>;
+diff --git a/arch/arm/boot/dts/rk3288-miqi.dts 
b/arch/arm/boot/dts/rk3288-miqi.dts
+index cf54d5ffff2f9..fe265a834e8ea 100644
+--- a/arch/arm/boot/dts/rk3288-miqi.dts
++++ b/arch/arm/boot/dts/rk3288-miqi.dts
+@@ -157,7 +157,7 @@
+               vin-supply = <&vcc_sys>;
+       };
+ 
+-      hym8563: hym8563@51 {
++      hym8563: rtc@51 {
+               compatible = "haoyu,hym8563";
+               reg = <0x51>;
+               #clock-cells = <0>;
+diff --git a/arch/arm/boot/dts/rk3288-rock2-square.dts 
b/arch/arm/boot/dts/rk3288-rock2-square.dts
+index c4d1d142d8c68..d5ef99ebbddce 100644
+--- a/arch/arm/boot/dts/rk3288-rock2-square.dts
++++ b/arch/arm/boot/dts/rk3288-rock2-square.dts
+@@ -165,7 +165,7 @@
+ };
+ 
+ &i2c0 {
+-      hym8563: hym8563@51 {
++      hym8563: rtc@51 {
+               compatible = "haoyu,hym8563";
+               reg = <0x51>;
+               #clock-cells = <0>;
+diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi
+index 859a7477909f1..5edc46a5585cb 100644
+--- a/arch/arm/boot/dts/rk3xxx.dtsi
++++ b/arch/arm/boot/dts/rk3xxx.dtsi
+@@ -111,6 +111,13 @@
+               reg = <0x1013c200 0x20>;
+               interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | 
IRQ_TYPE_EDGE_RISING)>;
+               clocks = <&cru CORE_PERI>;
++              status = "disabled";
++              /* The clock source and the sched_clock provided by the 
arm_global_timer
++               * on Rockchip rk3066a/rk3188 are quite unstable because their 
rates
++               * depend on the CPU frequency.
++               * Keep the arm_global_timer disabled in order to have the
++               * DW_APB_TIMER (rk3066a) or ROCKCHIP_TIMER (rk3188) selected 
by default.
++               */
+       };
+ 
+       local_timer: local-timer@1013c600 {
+diff --git a/arch/arm/include/asm/perf_event.h 
b/arch/arm/include/asm/perf_event.h
+index fe87397c3d8c6..bdbc1e590891e 100644
+--- a/arch/arm/include/asm/perf_event.h
++++ b/arch/arm/include/asm/perf_event.h
+@@ -17,7 +17,7 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
+ 
+ #define perf_arch_fetch_caller_regs(regs, __ip) { \
+       (regs)->ARM_pc = (__ip); \
+-      (regs)->ARM_fp = (unsigned long) __builtin_frame_address(0); \
++      frame_pointer((regs)) = (unsigned long) __builtin_frame_address(0); \
+       (regs)->ARM_sp = current_stack_pointer; \
+       (regs)->ARM_cpsr = SVC_MODE; \
+ }
+diff --git a/arch/arm/include/asm/pgtable-nommu.h 
b/arch/arm/include/asm/pgtable-nommu.h
+index d16aba48fa0a4..090011394477f 100644
+--- a/arch/arm/include/asm/pgtable-nommu.h
++++ b/arch/arm/include/asm/pgtable-nommu.h
+@@ -44,12 +44,6 @@
+ 
+ typedef pte_t *pte_addr_t;
+ 
+-/*
+- * ZERO_PAGE is a global shared page that is always zero: used
+- * for zero-mapped memory areas etc..
+- */
+-#define ZERO_PAGE(vaddr)      (virt_to_page(0))
+-
+ /*
+  * Mark the prot value as uncacheable and unbufferable.
+  */
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index c02f24400369b..d38d503493cb2 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -10,6 +10,15 @@
+ #include <linux/const.h>
+ #include <asm/proc-fns.h>
+ 
++#ifndef __ASSEMBLY__
++/*
++ * ZERO_PAGE is a global shared page that is always zero: used
++ * for zero-mapped memory areas etc..
++ */
++extern struct page *empty_zero_page;
++#define ZERO_PAGE(vaddr)      (empty_zero_page)
++#endif
++
+ #ifndef CONFIG_MMU
+ 
+ #include <asm-generic/pgtable-nopud.h>
+@@ -156,13 +165,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, 
unsigned long pfn,
+ #define __S111  __PAGE_SHARED_EXEC
+ 
+ #ifndef __ASSEMBLY__
+-/*
+- * ZERO_PAGE is a global shared page that is always zero: used
+- * for zero-mapped memory areas etc..
+- */
+-extern struct page *empty_zero_page;
+-#define ZERO_PAGE(vaddr)      (empty_zero_page)
+-
+ 
+ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+ 
+diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
+index 8b3d7191e2b88..959f057017384 100644
+--- a/arch/arm/mm/nommu.c
++++ b/arch/arm/mm/nommu.c
+@@ -26,6 +26,13 @@
+ 
+ unsigned long vectors_base;
+ 
++/*
++ * empty_zero_page is a special page that is used for
++ * zero-initialized data and COW.
++ */
++struct page *empty_zero_page;
++EXPORT_SYMBOL(empty_zero_page);
++
+ #ifdef CONFIG_ARM_MPU
+ struct mpu_rgn_info mpu_rgn_info;
+ #endif
+@@ -148,9 +155,21 @@ void __init adjust_lowmem_bounds(void)
+  */
+ void __init paging_init(const struct machine_desc *mdesc)
+ {
++      void *zero_page;
++
+       early_trap_init((void *)vectors_base);
+       mpu_setup();
++
++      /* allocate the zero page. */
++      zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
++      if (!zero_page)
++              panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
++                    __func__, PAGE_SIZE, PAGE_SIZE);
++
+       bootmem_init();
++
++      empty_zero_page = virt_to_page(zero_page);
++      flush_dcache_page(empty_zero_page);
+ }
+ 
+ /*
+diff --git a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts 
b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
+index fbcb9531cc70d..213c0759c4b85 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
+@@ -13,7 +13,7 @@
+               stdout-path = "serial2:1500000n8";
+       };
+ 
+-      ir_rx {
++      ir-receiver {
+               compatible = "gpio-ir-receiver";
+               gpios = <&gpio0 RK_PC0 GPIO_ACTIVE_HIGH>;
+               pinctrl-names = "default";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi 
b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+index f121203081b97..64df643391194 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+@@ -448,7 +448,6 @@
+ &i2s1 {
+       rockchip,playback-channels = <2>;
+       rockchip,capture-channels = <2>;
+-      status = "okay";
+ };
+ 
+ &i2s2 {
+diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
+index 3fbf7081c000c..ff58decfef5e8 100644
+--- a/arch/s390/kvm/vsie.c
++++ b/arch/s390/kvm/vsie.c
+@@ -535,8 +535,10 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct 
vsie_page *vsie_page)
+       if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
+               scb_s->eca |= scb_o->eca & ECA_CEI;
+       /* Epoch Extension */
+-      if (test_kvm_facility(vcpu->kvm, 139))
++      if (test_kvm_facility(vcpu->kvm, 139)) {
+               scb_s->ecd |= scb_o->ecd & ECD_MEF;
++              scb_s->epdx = scb_o->epdx;
++      }
+ 
+       /* etoken */
+       if (test_kvm_facility(vcpu->kvm, 156))
+diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
+index 4ab7a9757e521..574df24a8e5a4 100644
+--- a/arch/x86/kernel/hpet.c
++++ b/arch/x86/kernel/hpet.c
+@@ -1325,8 +1325,12 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
+       hpet_rtc_timer_reinit();
+       memset(&curr_time, 0, sizeof(struct rtc_time));
+ 
+-      if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
+-              mc146818_get_time(&curr_time);
++      if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) {
++              if (unlikely(mc146818_get_time(&curr_time) < 0)) {
++                      pr_err_ratelimited("unable to read current time from 
RTC\n");
++                      return IRQ_HANDLED;
++              }
++      }
+ 
+       if (hpet_rtc_flags & RTC_UIE &&
+           curr_time.tm_sec != hpet_prev_update_sec) {
+diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
+index 94665037f4a35..72b7a92337b18 100644
+--- a/drivers/base/power/trace.c
++++ b/drivers/base/power/trace.c
+@@ -120,7 +120,11 @@ static unsigned int read_magic_time(void)
+       struct rtc_time time;
+       unsigned int val;
+ 
+-      mc146818_get_time(&time);
++      if (mc146818_get_time(&time) < 0) {
++              pr_err("Unable to read current time from RTC\n");
++              return 0;
++      }
++
+       pr_info("RTC time: %ptRt, date: %ptRd\n", &time, &time);
+       val = time.tm_year;                             /* 100 years */
+       if (val > 100)
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 6efd981979bd3..54001ad5de9f5 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -1833,6 +1833,11 @@ static int btusb_setup_csr(struct hci_dev *hdev)
+ 
+       rp = (struct hci_rp_read_local_version *)skb->data;
+ 
++      bt_dev_info(hdev, "CSR: Setting up dongle with HCI ver=%u rev=%04x; LMP 
ver=%u subver=%04x; manufacturer=%u",
++              le16_to_cpu(rp->hci_ver), le16_to_cpu(rp->hci_rev),
++              le16_to_cpu(rp->lmp_ver), le16_to_cpu(rp->lmp_subver),
++              le16_to_cpu(rp->manufacturer));
++
+       /* Detect a wide host of Chinese controllers that aren't CSR.
+        *
+        * Known fake bcdDevices: 0x0100, 0x0134, 0x1915, 0x2520, 0x7558, 0x8891
+diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
+index fdcebe59510dd..68d95051dd0e6 100644
+--- a/drivers/gpio/gpio-amd8111.c
++++ b/drivers/gpio/gpio-amd8111.c
+@@ -231,7 +231,10 @@ found:
+               ioport_unmap(gp.pm);
+               goto out;
+       }
++      return 0;
++
+ out:
++      pci_dev_put(pdev);
+       return err;
+ }
+ 
+@@ -239,6 +242,7 @@ static void __exit amd_gpio_exit(void)
+ {
+       gpiochip_remove(&gp.chip);
+       ioport_unmap(gp.pm);
++      pci_dev_put(gp.pdev);
+ }
+ 
+ module_init(amd_gpio_init);
+diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c 
b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+index 356c7d0bd035f..2c3c743df9505 100644
+--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
++++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+@@ -2609,6 +2609,9 @@ static u32 
*dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+        * if supported. In any case the default RGB888 format is added
+        */
+ 
++      /* Default 8bit RGB fallback */
++      output_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
++
+       if (max_bpc >= 16 && info->bpc == 16) {
+               if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+                       output_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48;
+@@ -2642,9 +2645,6 @@ static u32 
*dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+       if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+               output_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
+ 
+-      /* Default 8bit RGB fallback */
+-      output_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+-
+       *num_output_fmts = i;
+ 
+       return output_fmts;
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c 
b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+index 1a58481037b3f..77a447a3fb1d1 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+@@ -621,9 +621,9 @@ static void ti_sn_bridge_set_video_timings(struct 
ti_sn_bridge *pdata)
+               &pdata->bridge.encoder->crtc->state->adjusted_mode;
+       u8 hsync_polarity = 0, vsync_polarity = 0;
+ 
+-      if (mode->flags & DRM_MODE_FLAG_PHSYNC)
++      if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+               hsync_polarity = CHA_HSYNC_POLARITY;
+-      if (mode->flags & DRM_MODE_FLAG_PVSYNC)
++      if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+               vsync_polarity = CHA_VSYNC_POLARITY;
+ 
+       ti_sn_bridge_write_u16(pdata, SN_CHA_ACTIVE_LINE_LENGTH_LOW_REG,
+diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c 
b/drivers/gpu/drm/drm_gem_shmem_helper.c
+index cfacce0418a49..c56656a95cf99 100644
+--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
++++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
+@@ -563,12 +563,20 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct 
*vma)
+ {
+       struct drm_gem_object *obj = vma->vm_private_data;
+       struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+-      int ret;
+ 
+       WARN_ON(shmem->base.import_attach);
+ 
+-      ret = drm_gem_shmem_get_pages(shmem);
+-      WARN_ON_ONCE(ret != 0);
++      mutex_lock(&shmem->pages_lock);
++
++      /*
++       * We should have already pinned the pages when the buffer was first
++       * mmap'd, vm_open() just grabs an additional reference for the new
++       * mm the vma is getting copied into (ie. on fork()).
++       */
++      if (!WARN_ON_ONCE(!shmem->pages_use_count))
++              shmem->pages_use_count++;
++
++      mutex_unlock(&shmem->pages_lock);
+ 
+       drm_gem_vm_open(vma);
+ }
+@@ -616,10 +624,8 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct 
vm_area_struct *vma)
+       shmem = to_drm_gem_shmem_obj(obj);
+ 
+       ret = drm_gem_shmem_get_pages(shmem);
+-      if (ret) {
+-              drm_gem_vm_close(vma);
++      if (ret)
+               return ret;
+-      }
+ 
+       vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+       vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+index 4bf0f5ec4fc2d..2b6590344468d 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+@@ -949,6 +949,10 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
+       struct drm_device *dev = dev_priv->dev;
+       int i, ret;
+ 
++      /* Screen objects won't work if GMR's aren't available */
++      if (!dev_priv->has_gmr)
++              return -ENOSYS;
++
+       if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
+               DRM_INFO("Not using screen objects,"
+                        " missing cap SCREEN_OBJECT_2\n");
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 5550c943f9855..eaaf732f06304 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1310,6 +1310,9 @@ static s32 snto32(__u32 value, unsigned n)
+       if (!value || !n)
+               return 0;
+ 
++      if (n > 32)
++              n = 32;
++
+       switch (n) {
+       case 8:  return ((__s8)value);
+       case 16: return ((__s16)value);
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 3350a41d7dce1..70a693f8f0343 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -257,6 +257,7 @@
+ #define USB_DEVICE_ID_CH_AXIS_295     0x001c
+ 
+ #define USB_VENDOR_ID_CHERRY          0x046a
++#define USB_DEVICE_ID_CHERRY_MOUSE_000C       0x000c
+ #define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023
+ #define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR   0x0027
+ 
+@@ -874,6 +875,7 @@
+ #define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER        0x02fd
+ #define USB_DEVICE_ID_MS_PIXART_MOUSE    0x00cb
+ #define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS      0x02e0
++#define USB_DEVICE_ID_MS_MOUSE_0783      0x0783
+ 
+ #define USB_VENDOR_ID_MOJO            0x8282
+ #define USB_DEVICE_ID_RETRO_ADAPTER   0x3201
+@@ -1302,6 +1304,7 @@
+ 
+ #define USB_VENDOR_ID_PRIMAX  0x0461
+ #define USB_DEVICE_ID_PRIMAX_MOUSE_4D22       0x4d22
++#define USB_DEVICE_ID_PRIMAX_MOUSE_4E2A       0x4e2a
+ #define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
+ #define USB_DEVICE_ID_PRIMAX_REZEL    0x4e72
+ #define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F        0x4d0f
+diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
+index 5e6a0cef2a06d..e3fcf1353fb3b 100644
+--- a/drivers/hid/hid-lg4ff.c
++++ b/drivers/hid/hid-lg4ff.c
+@@ -872,6 +872,12 @@ static ssize_t lg4ff_alternate_modes_store(struct device 
*dev, struct device_att
+               return -ENOMEM;
+ 
+       i = strlen(lbuf);
++
++      if (i == 0) {
++              kfree(lbuf);
++              return -EINVAL;
++      }
++
+       if (lbuf[i-1] == '\n') {
+               if (i == 1) {
+                       kfree(lbuf);
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 4a8014e9a511c..1efde40e51364 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -54,6 +54,7 @@ static const struct hid_device_id hid_quirks[] = {
+       { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), 
HID_QUIRK_NOGET },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), 
HID_QUIRK_NOGET },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), 
HID_QUIRK_NOGET },
++      { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, 
USB_DEVICE_ID_CHERRY_MOUSE_000C), HID_QUIRK_ALWAYS_POLL },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), 
HID_QUIRK_NO_INIT_REPORTS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, 
USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | 
HID_QUIRK_ALWAYS_POLL },
+       { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), 
HID_QUIRK_NO_INIT_REPORTS },
+@@ -122,6 +123,7 @@ static const struct hid_device_id hid_quirks[] = {
+       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 
USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 
USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), 
HID_QUIRK_MULTI_INPUT },
++      { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_MOUSE_0783), 
HID_QUIRK_ALWAYS_POLL },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, 
USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, 
USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, 
USB_DEVICE_ID_MS_SURFACE3_COVER), HID_QUIRK_NO_INIT_REPORTS },
+@@ -146,6 +148,7 @@ static const struct hid_device_id hid_quirks[] = {
+       { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, 
USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN), HID_QUIRK_NO_INIT_REPORTS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, 
USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+       { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, 
USB_DEVICE_ID_PRIMAX_MOUSE_4D22), HID_QUIRK_ALWAYS_POLL },
++      { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, 
USB_DEVICE_ID_PRIMAX_MOUSE_4E2A), HID_QUIRK_ALWAYS_POLL },
+       { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, 
USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F), HID_QUIRK_ALWAYS_POLL },
+       { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, 
USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D65), HID_QUIRK_ALWAYS_POLL },
+       { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, 
USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22), HID_QUIRK_ALWAYS_POLL },
+diff --git a/drivers/media/common/videobuf2/videobuf2-core.c 
b/drivers/media/common/videobuf2/videobuf2-core.c
+index 72350343a56a6..3bafde87a1257 100644
+--- a/drivers/media/common/videobuf2/videobuf2-core.c
++++ b/drivers/media/common/videobuf2/videobuf2-core.c
+@@ -787,7 +787,13 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory 
memory,
+       num_buffers = max_t(unsigned int, *count, q->min_buffers_needed);
+       num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME);
+       memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
++      /*
++       * Set this now to ensure that drivers see the correct q->memory value
++       * in the queue_setup op.
++       */
++      mutex_lock(&q->mmap_lock);
+       q->memory = memory;
++      mutex_unlock(&q->mmap_lock);
+ 
+       /*
+        * Ask the driver how many buffers and planes per buffer it requires.
+@@ -796,22 +802,27 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum 
vb2_memory memory,
+       ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
+                      plane_sizes, q->alloc_devs);
+       if (ret)
+-              return ret;
++              goto error;
+ 
+       /* Check that driver has set sane values */
+-      if (WARN_ON(!num_planes))
+-              return -EINVAL;
++      if (WARN_ON(!num_planes)) {
++              ret = -EINVAL;
++              goto error;
++      }
+ 
+       for (i = 0; i < num_planes; i++)
+-              if (WARN_ON(!plane_sizes[i]))
+-                      return -EINVAL;
++              if (WARN_ON(!plane_sizes[i])) {
++                      ret = -EINVAL;
++                      goto error;
++              }
+ 
+       /* Finally, allocate buffers and video memory */
+       allocated_buffers =
+               __vb2_queue_alloc(q, memory, num_buffers, num_planes, 
plane_sizes);
+       if (allocated_buffers == 0) {
+               dprintk(q, 1, "memory allocation failed\n");
+-              return -ENOMEM;
++              ret = -ENOMEM;
++              goto error;
+       }
+ 
+       /*
+@@ -852,7 +863,8 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory 
memory,
+       if (ret < 0) {
+               /*
+                * Note: __vb2_queue_free() will subtract 'allocated_buffers'
+-               * from q->num_buffers.
++               * from q->num_buffers and it will reset q->memory to
++               * VB2_MEMORY_UNKNOWN.
+                */
+               __vb2_queue_free(q, allocated_buffers);
+               mutex_unlock(&q->mmap_lock);
+@@ -868,6 +880,12 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory 
memory,
+       q->waiting_for_buffers = !q->is_output;
+ 
+       return 0;
++
++error:
++      mutex_lock(&q->mmap_lock);
++      q->memory = VB2_MEMORY_UNKNOWN;
++      mutex_unlock(&q->mmap_lock);
++      return ret;
+ }
+ EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
+ 
+@@ -878,6 +896,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum 
vb2_memory memory,
+ {
+       unsigned int num_planes = 0, num_buffers, allocated_buffers;
+       unsigned plane_sizes[VB2_MAX_PLANES] = { };
++      bool no_previous_buffers = !q->num_buffers;
+       int ret;
+ 
+       if (q->num_buffers == VB2_MAX_FRAME) {
+@@ -885,13 +904,19 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum 
vb2_memory memory,
+               return -ENOBUFS;
+       }
+ 
+-      if (!q->num_buffers) {
++      if (no_previous_buffers) {
+               if (q->waiting_in_dqbuf && *count) {
+                       dprintk(q, 1, "another dup()ped fd is waiting for a 
buffer\n");
+                       return -EBUSY;
+               }
+               memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
++              /*
++               * Set this now to ensure that drivers see the correct q->memory
++               * value in the queue_setup op.
++               */
++              mutex_lock(&q->mmap_lock);
+               q->memory = memory;
++              mutex_unlock(&q->mmap_lock);
+               q->waiting_for_buffers = !q->is_output;
+       } else {
+               if (q->memory != memory) {
+@@ -914,14 +939,15 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum 
vb2_memory memory,
+       ret = call_qop(q, queue_setup, q, &num_buffers,
+                      &num_planes, plane_sizes, q->alloc_devs);
+       if (ret)
+-              return ret;
++              goto error;
+ 
+       /* Finally, allocate buffers and video memory */
+       allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers,
+                               num_planes, plane_sizes);
+       if (allocated_buffers == 0) {
+               dprintk(q, 1, "memory allocation failed\n");
+-              return -ENOMEM;
++              ret = -ENOMEM;
++              goto error;
+       }
+ 
+       /*
+@@ -952,7 +978,8 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum 
vb2_memory memory,
+       if (ret < 0) {
+               /*
+                * Note: __vb2_queue_free() will subtract 'allocated_buffers'
+-               * from q->num_buffers.
++               * from q->num_buffers and it will reset q->memory to
++               * VB2_MEMORY_UNKNOWN.
+                */
+               __vb2_queue_free(q, allocated_buffers);
+               mutex_unlock(&q->mmap_lock);
+@@ -967,6 +994,14 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum 
vb2_memory memory,
+       *count = allocated_buffers;
+ 
+       return 0;
++
++error:
++      if (no_previous_buffers) {
++              mutex_lock(&q->mmap_lock);
++              q->memory = VB2_MEMORY_UNKNOWN;
++              mutex_unlock(&q->mmap_lock);
++      }
++      return ret;
+ }
+ EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
+ 
+@@ -2120,6 +2155,22 @@ static int __find_plane_by_offset(struct vb2_queue *q, 
unsigned long off,
+       struct vb2_buffer *vb;
+       unsigned int buffer, plane;
+ 
++      /*
++       * Sanity checks to ensure the lock is held, MEMORY_MMAP is
++       * used and fileio isn't active.
++       */
++      lockdep_assert_held(&q->mmap_lock);
++
++      if (q->memory != VB2_MEMORY_MMAP) {
++              dprintk(q, 1, "queue is not currently set up for mmap\n");
++              return -EINVAL;
++      }
++
++      if (vb2_fileio_is_active(q)) {
++              dprintk(q, 1, "file io in progress\n");
++              return -EBUSY;
++      }
++
+       /*
+        * Go over all buffers and their planes, comparing the given offset
+        * with an offset assigned to each plane. If a match is found,
+@@ -2219,11 +2270,6 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct 
*vma)
+       int ret;
+       unsigned long length;
+ 
+-      if (q->memory != VB2_MEMORY_MMAP) {
+-              dprintk(q, 1, "queue is not currently set up for mmap\n");
+-              return -EINVAL;
+-      }
+-
+       /*
+        * Check memory area access mode.
+        */
+@@ -2245,14 +2291,9 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct 
*vma)
+ 
+       mutex_lock(&q->mmap_lock);
+ 
+-      if (vb2_fileio_is_active(q)) {
+-              dprintk(q, 1, "mmap: file io in progress\n");
+-              ret = -EBUSY;
+-              goto unlock;
+-      }
+-
+       /*
+-       * Find the plane corresponding to the offset passed by userspace.
++       * Find the plane corresponding to the offset passed by userspace. This
++       * will return an error if not MEMORY_MMAP or file I/O is in progress.
+        */
+       ret = __find_plane_by_offset(q, off, &buffer, &plane);
+       if (ret)
+@@ -2305,22 +2346,25 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue 
*q,
+       void *vaddr;
+       int ret;
+ 
+-      if (q->memory != VB2_MEMORY_MMAP) {
+-              dprintk(q, 1, "queue is not currently set up for mmap\n");
+-              return -EINVAL;
+-      }
++      mutex_lock(&q->mmap_lock);
+ 
+       /*
+-       * Find the plane corresponding to the offset passed by userspace.
++       * Find the plane corresponding to the offset passed by userspace. This
++       * will return an error if not MEMORY_MMAP or file I/O is in progress.
+        */
+       ret = __find_plane_by_offset(q, off, &buffer, &plane);
+       if (ret)
+-              return ret;
++              goto unlock;
+ 
+       vb = q->bufs[buffer];
+ 
+       vaddr = vb2_plane_vaddr(vb, plane);
++      mutex_unlock(&q->mmap_lock);
+       return vaddr ? (unsigned long)vaddr : -EINVAL;
++
++unlock:
++      mutex_unlock(&q->mmap_lock);
++      return ret;
+ }
+ EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
+ #endif
+diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c 
b/drivers/media/v4l2-core/v4l2-dv-timings.c
+index 003c32fed3f75..942d0005c55e8 100644
+--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
++++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
+@@ -145,6 +145,8 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
+       const struct v4l2_bt_timings *bt = &t->bt;
+       const struct v4l2_bt_timings_cap *cap = &dvcap->bt;
+       u32 caps = cap->capabilities;
++      const u32 max_vert = 10240;
++      u32 max_hor = 3 * bt->width;
+ 
+       if (t->type != V4L2_DV_BT_656_1120)
+               return false;
+@@ -166,14 +168,20 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings 
*t,
+       if (!bt->interlaced &&
+           (bt->il_vbackporch || bt->il_vsync || bt->il_vfrontporch))
+               return false;
+-      if (bt->hfrontporch > 2 * bt->width ||
+-          bt->hsync > 1024 || bt->hbackporch > 1024)
++      /*
++       * Some video receivers cannot properly separate the frontporch,
++       * backporch and sync values, and instead they only have the total
++       * blanking. That can be assigned to any of these three fields.
++       * So just check that none of these are way out of range.
++       */
++      if (bt->hfrontporch > max_hor ||
++          bt->hsync > max_hor || bt->hbackporch > max_hor)
+               return false;
+-      if (bt->vfrontporch > 4096 ||
+-          bt->vsync > 128 || bt->vbackporch > 4096)
++      if (bt->vfrontporch > max_vert ||
++          bt->vsync > max_vert || bt->vbackporch > max_vert)
+               return false;
+-      if (bt->interlaced && (bt->il_vfrontporch > 4096 ||
+-          bt->il_vsync > 128 || bt->il_vbackporch > 4096))
++      if (bt->interlaced && (bt->il_vfrontporch > max_vert ||
++          bt->il_vsync > max_vert || bt->il_vbackporch > max_vert))
+               return false;
+       return fnc == NULL || fnc(t, fnc_handle);
+ }
+diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
+index 8847942a8d97e..73c5343e609bc 100644
+--- a/drivers/net/can/usb/esd_usb2.c
++++ b/drivers/net/can/usb/esd_usb2.c
+@@ -227,6 +227,10 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv 
*priv,
+               u8 rxerr = msg->msg.rx.data[2];
+               u8 txerr = msg->msg.rx.data[3];
+ 
++              netdev_dbg(priv->netdev,
++                         "CAN_ERR_EV_EXT: dlc=%#02x state=%02x ecc=%02x 
rec=%02x tec=%02x\n",
++                         msg->msg.rx.dlc, state, ecc, rxerr, txerr);
++
+               skb = alloc_can_err_skb(priv->netdev, &cf);
+               if (skb == NULL) {
+                       stats->rx_dropped++;
+@@ -253,6 +257,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv 
*priv,
+                               break;
+                       default:
+                               priv->can.state = CAN_STATE_ERROR_ACTIVE;
++                              txerr = 0;
++                              rxerr = 0;
+                               break;
+                       }
+               } else {
+diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c 
b/drivers/net/dsa/sja1105/sja1105_devlink.c
+index ec2ac91abcfa4..8e3d185c84601 100644
+--- a/drivers/net/dsa/sja1105/sja1105_devlink.c
++++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
+@@ -95,6 +95,8 @@ static int sja1105_setup_devlink_regions(struct dsa_switch 
*ds)
+               if (IS_ERR(region)) {
+                       while (--i >= 0)
+                               dsa_devlink_region_destroy(priv->regions[i]);
++
++                      kfree(priv->regions);
+                       return PTR_ERR(region);
+               }
+ 
+diff --git a/drivers/net/ethernet/aeroflex/greth.c 
b/drivers/net/ethernet/aeroflex/greth.c
+index f4f50b3a472e1..0d56cb4f5dd9b 100644
+--- a/drivers/net/ethernet/aeroflex/greth.c
++++ b/drivers/net/ethernet/aeroflex/greth.c
+@@ -258,6 +258,7 @@ static int greth_init_rings(struct greth_private *greth)
+                       if (dma_mapping_error(greth->dev, dma_addr)) {
+                               if (netif_msg_ifup(greth))
+                                       dev_err(greth->dev, "Could not create 
initial DMA mapping\n");
++                              dev_kfree_skb(skb);
+                               goto cleanup;
+                       }
+                       greth->rx_skbuff[i] = skb;
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c 
b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+index c00f1a7ffc15f..488da767cfdf3 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+@@ -2258,7 +2258,7 @@ static int nicvf_probe(struct pci_dev *pdev, const 
struct pci_device_id *ent)
+       err = register_netdev(netdev);
+       if (err) {
+               dev_err(dev, "Failed to register netdevice\n");
+-              goto err_unregister_interrupts;
++              goto err_destroy_workqueue;
+       }
+ 
+       nic->msg_enable = debug;
+@@ -2267,6 +2267,8 @@ static int nicvf_probe(struct pci_dev *pdev, const 
struct pci_device_id *ent)
+ 
+       return 0;
+ 
++err_destroy_workqueue:
++      destroy_workqueue(nic->nicvf_rx_mode_wq);
+ err_unregister_interrupts:
+       nicvf_unregister_interrupts(nic);
+ err_free_netdev:
+diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c 
b/drivers/net/ethernet/hisilicon/hisi_femac.c
+index 57c3bc4f70895..c16dfd8693639 100644
+--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
++++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
+@@ -283,7 +283,7 @@ static int hisi_femac_rx(struct net_device *dev, int limit)
+               skb->protocol = eth_type_trans(skb, dev);
+               napi_gro_receive(&priv->napi, skb);
+               dev->stats.rx_packets++;
+-              dev->stats.rx_bytes += skb->len;
++              dev->stats.rx_bytes += len;
+ next:
+               pos = (pos + 1) % rxq->num;
+               if (rx_pkts_num >= limit)
+diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c 
b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+index 8b2bf85039f16..43f3146caf07e 100644
+--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
++++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+@@ -550,7 +550,7 @@ static int hix5hd2_rx(struct net_device *dev, int limit)
+               skb->protocol = eth_type_trans(skb, dev);
+               napi_gro_receive(&priv->napi, skb);
+               dev->stats.rx_packets++;
+-              dev->stats.rx_bytes += skb->len;
++              dev->stats.rx_bytes += len;
+ next:
+               pos = dma_ring_incr(pos, RX_DESC_NUM);
+       }
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c 
b/drivers/net/ethernet/intel/e1000e/netdev.c
+index d0c4de0231120..ae0c9aaab48db 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -5937,9 +5937,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
+               e1000_tx_queue(tx_ring, tx_flags, count);
+               /* Make sure there is space in the ring for the next send. */
+               e1000_maybe_stop_tx(tx_ring,
+-                                  (MAX_SKB_FRAGS *
++                                  ((MAX_SKB_FRAGS + 1) *
+                                    DIV_ROUND_UP(PAGE_SIZE,
+-                                                adapter->tx_fifo_limit) + 2));
++                                                adapter->tx_fifo_limit) + 4));
+ 
+               if (!netdev_xmit_more() ||
+                   netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c 
b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+index 144c4824b5e80..520929f4d535f 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+@@ -4234,11 +4234,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi 
*vsi,
+                       return -EOPNOTSUPP;
+ 
+               /* First 4 bytes of L4 header */
+-              if (usr_ip4_spec->l4_4_bytes == htonl(0xFFFFFFFF))
+-                      new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK;
+-              else if (!usr_ip4_spec->l4_4_bytes)
+-                      new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+-              else
++              if (usr_ip4_spec->l4_4_bytes)
+                       return -EOPNOTSUPP;
+ 
+               /* Filtering on Type of Service is not supported. */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c 
b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index d7ddf9239e512..2c60d2a933308 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -10065,6 +10065,21 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi)
+       return 0;
+ }
+ 
++/**
++ * i40e_clean_xps_state - clean xps state for every tx_ring
++ * @vsi: ptr to the VSI
++ **/
++static void i40e_clean_xps_state(struct i40e_vsi *vsi)
++{
++      int i;
++
++      if (vsi->tx_rings)
++              for (i = 0; i < vsi->num_queue_pairs; i++)
++                      if (vsi->tx_rings[i])
++                              clear_bit(__I40E_TX_XPS_INIT_DONE,
++                                        vsi->tx_rings[i]->state);
++}
++
+ /**
+  * i40e_prep_for_reset - prep for the core to reset
+  * @pf: board private structure
+@@ -10096,8 +10111,10 @@ static void i40e_prep_for_reset(struct i40e_pf *pf, 
bool lock_acquired)
+               rtnl_unlock();
+ 
+       for (v = 0; v < pf->num_alloc_vsi; v++) {
+-              if (pf->vsi[v])
++              if (pf->vsi[v]) {
++                      i40e_clean_xps_state(pf->vsi[v]);
+                       pf->vsi[v]->seid = 0;
++              }
+       }
+ 
+       i40e_shutdown_adminq(&pf->hw);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c 
b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 381b28a087467..bb2a79b70c3ae 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -1525,6 +1525,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
+       i40e_cleanup_reset_vf(vf);
+ 
+       i40e_flush(hw);
++      usleep_range(20000, 40000);
+       clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
+ 
+       return true;
+@@ -1648,6 +1649,7 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+       }
+ 
+       i40e_flush(hw);
++      usleep_range(20000, 40000);
+       clear_bit(__I40E_VF_DISABLE, pf->state);
+ 
+       return true;
+diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c 
b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+index 28baf203459a8..5e3b0a5843a8e 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+@@ -1413,6 +1413,8 @@ static int igb_intr_test(struct igb_adapter *adapter, 
u64 *data)
+                       *data = 1;
+                       return -1;
+               }
++              wr32(E1000_IVAR_MISC, E1000_IVAR_VALID << 8);
++              wr32(E1000_EIMS, BIT(0));
+       } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
+               shared_int = false;
+               if (request_irq(irq,
+diff --git a/drivers/net/ethernet/marvell/mvneta.c 
b/drivers/net/ethernet/marvell/mvneta.c
+index 74e266c0b8e10..f5567d485e91a 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -4140,7 +4140,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
+       /* Use the cpu associated to the rxq when it is online, in all
+        * the other cases, use the cpu 0 which can't be offline.
+        */
+-      if (cpu_online(pp->rxq_def))
++      if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def))
+               elected_cpu = pp->rxq_def;
+ 
+       max_cpu = num_present_cpus();
+diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c 
b/drivers/net/ethernet/microchip/encx24j600-regmap.c
+index 81a8ccca7e5e0..5693784eec5bc 100644
+--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
++++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
+@@ -359,7 +359,7 @@ static int regmap_encx24j600_phy_reg_read(void *context, 
unsigned int reg,
+               goto err_out;
+ 
+       usleep_range(26, 100);
+-      while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) &&
++      while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) &&
+              (mistat & BUSY))
+               cpu_relax();
+ 
+@@ -397,7 +397,7 @@ static int regmap_encx24j600_phy_reg_write(void *context, 
unsigned int reg,
+               goto err_out;
+ 
+       usleep_range(26, 100);
+-      while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) &&
++      while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) &&
+              (mistat & BUSY))
+               cpu_relax();
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 
b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+index f70d8d1ce3298..1ed74cfb61fc5 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -108,10 +108,10 @@ static struct stmmac_axi *stmmac_axi_setup(struct 
platform_device *pdev)
+ 
+       axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
+       axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
+-      axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe");
+-      axi->axi_fb = of_property_read_bool(np, "snps,axi_fb");
+-      axi->axi_mb = of_property_read_bool(np, "snps,axi_mb");
+-      axi->axi_rb =  of_property_read_bool(np, "snps,axi_rb");
++      axi->axi_kbbe = of_property_read_bool(np, "snps,kbbe");
++      axi->axi_fb = of_property_read_bool(np, "snps,fb");
++      axi->axi_mb = of_property_read_bool(np, "snps,mb");
++      axi->axi_rb =  of_property_read_bool(np, "snps,rb");
+ 
+       if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt))
+               axi->axi_wr_osr_lmt = 1;
+diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
+index fd9f33c833fa3..95ef3b6f98dd3 100644
+--- a/drivers/net/ieee802154/ca8210.c
++++ b/drivers/net/ieee802154/ca8210.c
+@@ -926,7 +926,7 @@ static int ca8210_spi_transfer(
+ 
+       dev_dbg(&spi->dev, "%s called\n", __func__);
+ 
+-      cas_ctl = kmalloc(sizeof(*cas_ctl), GFP_ATOMIC);
++      cas_ctl = kzalloc(sizeof(*cas_ctl), GFP_ATOMIC);
+       if (!cas_ctl)
+               return -ENOMEM;
+ 
+diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
+index 4517517215f2b..a8369bfa4050b 100644
+--- a/drivers/net/ieee802154/cc2520.c
++++ b/drivers/net/ieee802154/cc2520.c
+@@ -970,7 +970,7 @@ static int cc2520_hw_init(struct cc2520_private *priv)
+ 
+               if (timeout-- <= 0) {
+                       dev_err(&priv->spi->dev, "oscillator start failed!\n");
+-                      return ret;
++                      return -ETIMEDOUT;
+               }
+               udelay(1);
+       } while (!(status & CC2520_STATUS_XOSC32M_STABLE));
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 3e564158c401b..eb029456b5946 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -3680,6 +3680,7 @@ static const struct nla_policy 
macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
+       [IFLA_MACSEC_SCB] = { .type = NLA_U8 },
+       [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
+       [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
++      [IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 },
+ };
+ 
+ static void macsec_free_netdev(struct net_device *dev)
+diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
+index 5a0e5a8a8917b..22f7db87ed21a 100644
+--- a/drivers/net/plip/plip.c
++++ b/drivers/net/plip/plip.c
+@@ -444,12 +444,12 @@ plip_bh_timeout_error(struct net_device *dev, struct 
net_local *nl,
+       }
+       rcv->state = PLIP_PK_DONE;
+       if (rcv->skb) {
+-              kfree_skb(rcv->skb);
++              dev_kfree_skb_irq(rcv->skb);
+               rcv->skb = NULL;
+       }
+       snd->state = PLIP_PK_DONE;
+       if (snd->skb) {
+-              dev_kfree_skb(snd->skb);
++              dev_consume_skb_irq(snd->skb);
+               snd->skb = NULL;
+       }
+       spin_unlock_irq(&nl->lock);
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 7313e6e03c125..bce151e3706a0 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1352,6 +1352,7 @@ static const struct usb_device_id products[] = {
+       {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)},    /* Foxconn T77W968 LTE */
+       {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)},    /* Foxconn T77W968 LTE with 
eSIM support*/
+       {QMI_FIXED_INTF(0x2692, 0x9025, 4)},    /* Cellient MPL200 (rebranded 
Qualcomm 05c6:9025) */
++      {QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */
+ 
+       /* 4. Gobi 1000 devices */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c 
b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 6678a734cc4d3..43a4bcdd92c1d 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -1356,6 +1356,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
+       };
+       u32 num_pkts = 0;
+       bool skip_page_frags = false;
++      bool encap_lro = false;
+       struct Vmxnet3_RxCompDesc *rcd;
+       struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
+       u16 segCnt = 0, mss = 0;
+@@ -1496,13 +1497,18 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
+                       if (VMXNET3_VERSION_GE_2(adapter) &&
+                           rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
+                               struct Vmxnet3_RxCompDescExt *rcdlro;
++                              union Vmxnet3_GenericDesc *gdesc;
++
+                               rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
++                              gdesc = (union Vmxnet3_GenericDesc *)rcd;
+ 
+                               segCnt = rcdlro->segCnt;
+                               WARN_ON_ONCE(segCnt == 0);
+                               mss = rcdlro->mss;
+                               if (unlikely(segCnt <= 1))
+                                       segCnt = 0;
++                              encap_lro = (le32_to_cpu(gdesc->dword[0]) &
++                                      (1UL << VMXNET3_RCD_HDR_INNER_SHIFT));
+                       } else {
+                               segCnt = 0;
+                       }
+@@ -1570,7 +1576,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
+                       vmxnet3_rx_csum(adapter, skb,
+                                       (union Vmxnet3_GenericDesc *)rcd);
+                       skb->protocol = eth_type_trans(skb, adapter->netdev);
+-                      if (!rcd->tcp ||
++                      if ((!rcd->tcp && !encap_lro) ||
+                           !(adapter->netdev->features & NETIF_F_LRO))
+                               goto not_lro;
+ 
+@@ -1579,7 +1585,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
+                                       SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+                               skb_shinfo(skb)->gso_size = mss;
+                               skb_shinfo(skb)->gso_segs = segCnt;
+-                      } else if (segCnt != 0 || skb->len > mtu) {
++                      } else if ((segCnt != 0 || skb->len > mtu) && 
!encap_lro) {
+                               u32 hlen;
+ 
+                               hlen = vmxnet3_get_hdr_len(adapter, skb,
+@@ -1608,6 +1614,7 @@ not_lro:
+                               napi_gro_receive(&rq->napi, skb);
+ 
+                       ctx->skb = NULL;
++                      encap_lro = false;
+                       num_pkts++;
+               }
+ 
+diff --git a/drivers/net/xen-netback/common.h 
b/drivers/net/xen-netback/common.h
+index 6a9178896c909..1ba9749692164 100644
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -48,7 +48,6 @@
+ #include <linux/debugfs.h>
+ 
+ typedef unsigned int pending_ring_idx_t;
+-#define INVALID_PENDING_RING_IDX (~0U)
+ 
+ struct pending_tx_info {
+       struct xen_netif_tx_request req; /* tx request */
+@@ -82,8 +81,6 @@ struct xenvif_rx_meta {
+ /* Discriminate from any valid pending_idx value. */
+ #define INVALID_PENDING_IDX 0xFFFF
+ 
+-#define MAX_BUFFER_OFFSET XEN_PAGE_SIZE
+-
+ #define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
+ 
+ /* The maximum number of frags is derived from the size of a grant (same
+@@ -367,11 +364,6 @@ void xenvif_free(struct xenvif *vif);
+ int xenvif_xenbus_init(void);
+ void xenvif_xenbus_fini(void);
+ 
+-int xenvif_schedulable(struct xenvif *vif);
+-
+-int xenvif_queue_stopped(struct xenvif_queue *queue);
+-void xenvif_wake_queue(struct xenvif_queue *queue);
+-
+ /* (Un)Map communication rings. */
+ void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue);
+ int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
+@@ -394,17 +386,13 @@ int xenvif_dealloc_kthread(void *data);
+ irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
+ 
+ bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
+-void xenvif_rx_action(struct xenvif_queue *queue);
+-void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
++bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
+ 
+ void xenvif_carrier_on(struct xenvif *vif);
+ 
+ /* Callback from stack when TX packet can be released */
+ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
+ 
+-/* Unmap a pending page and release it back to the guest */
+-void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
+-
+ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
+ {
+       return MAX_PENDING_REQS -
+diff --git a/drivers/net/xen-netback/interface.c 
b/drivers/net/xen-netback/interface.c
+index 7ce9807fc24c5..97cf5bc48902a 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -70,7 +70,7 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
+       wake_up(&queue->dealloc_wq);
+ }
+ 
+-int xenvif_schedulable(struct xenvif *vif)
++static int xenvif_schedulable(struct xenvif *vif)
+ {
+       return netif_running(vif->dev) &&
+               test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
+@@ -178,20 +178,6 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id)
+       return IRQ_HANDLED;
+ }
+ 
+-int xenvif_queue_stopped(struct xenvif_queue *queue)
+-{
+-      struct net_device *dev = queue->vif->dev;
+-      unsigned int id = queue->id;
+-      return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
+-}
+-
+-void xenvif_wake_queue(struct xenvif_queue *queue)
+-{
+-      struct net_device *dev = queue->vif->dev;
+-      unsigned int id = queue->id;
+-      netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
+-}
+-
+ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
+                              struct net_device *sb_dev)
+ {
+@@ -269,14 +255,16 @@ xenvif_start_xmit(struct sk_buff *skb, struct net_device 
*dev)
+       if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
+               skb_clear_hash(skb);
+ 
+-      xenvif_rx_queue_tail(queue, skb);
++      if (!xenvif_rx_queue_tail(queue, skb))
++              goto drop;
++
+       xenvif_kick_thread(queue);
+ 
+       return NETDEV_TX_OK;
+ 
+  drop:
+       vif->dev->stats.tx_dropped++;
+-      dev_kfree_skb(skb);
++      dev_kfree_skb_any(skb);
+       return NETDEV_TX_OK;
+ }
+ 
+diff --git a/drivers/net/xen-netback/netback.c 
b/drivers/net/xen-netback/netback.c
+index b0cbc7fead745..f9373a88cf37c 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -112,6 +112,8 @@ static void make_tx_response(struct xenvif_queue *queue,
+                            s8       st);
+ static void push_tx_responses(struct xenvif_queue *queue);
+ 
++static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
++
+ static inline int tx_work_todo(struct xenvif_queue *queue);
+ 
+ static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
+@@ -330,10 +332,13 @@ static int xenvif_count_requests(struct xenvif_queue 
*queue,
+ 
+ 
+ struct xenvif_tx_cb {
+-      u16 pending_idx;
++      u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
++      u8 copy_count;
+ };
+ 
+ #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
++#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
++#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
+ 
+ static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
+                                          u16 pending_idx,
+@@ -368,31 +373,93 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned 
int size)
+       return skb;
+ }
+ 
+-static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue 
*queue,
+-                                                      struct sk_buff *skb,
+-                                                      struct 
xen_netif_tx_request *txp,
+-                                                      struct 
gnttab_map_grant_ref *gop,
+-                                                      unsigned int 
frag_overflow,
+-                                                      struct sk_buff *nskb)
++static void xenvif_get_requests(struct xenvif_queue *queue,
++                              struct sk_buff *skb,
++                              struct xen_netif_tx_request *first,
++                              struct xen_netif_tx_request *txfrags,
++                              unsigned *copy_ops,
++                              unsigned *map_ops,
++                              unsigned int frag_overflow,
++                              struct sk_buff *nskb,
++                              unsigned int extra_count,
++                              unsigned int data_len)
+ {
+       struct skb_shared_info *shinfo = skb_shinfo(skb);
+       skb_frag_t *frags = shinfo->frags;
+-      u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+-      int start;
++      u16 pending_idx;
+       pending_ring_idx_t index;
+       unsigned int nr_slots;
++      struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
++      struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
++      struct xen_netif_tx_request *txp = first;
++
++      nr_slots = shinfo->nr_frags + 1;
++
++      copy_count(skb) = 0;
+ 
+-      nr_slots = shinfo->nr_frags;
++      /* Create copy ops for exactly data_len bytes into the skb head. */
++      __skb_put(skb, data_len);
++      while (data_len > 0) {
++              int amount = data_len > txp->size ? txp->size : data_len;
+ 
+-      /* Skip first skb fragment if it is on same page as header fragment. */
+-      start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
++              cop->source.u.ref = txp->gref;
++              cop->source.domid = queue->vif->domid;
++              cop->source.offset = txp->offset;
+ 
+-      for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
+-           shinfo->nr_frags++, txp++, gop++) {
++              cop->dest.domid = DOMID_SELF;
++              cop->dest.offset = (offset_in_page(skb->data +
++                                                 skb_headlen(skb) -
++                                                 data_len)) & ~XEN_PAGE_MASK;
++              cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
++                                             - data_len);
++
++              cop->len = amount;
++              cop->flags = GNTCOPY_source_gref;
++
++              index = pending_index(queue->pending_cons);
++              pending_idx = queue->pending_ring[index];
++              callback_param(queue, pending_idx).ctx = NULL;
++              copy_pending_idx(skb, copy_count(skb)) = pending_idx;
++              copy_count(skb)++;
++
++              cop++;
++              data_len -= amount;
++
++              if (amount == txp->size) {
++                      /* The copy op covered the full tx_request */
++
++                      memcpy(&queue->pending_tx_info[pending_idx].req,
++                             txp, sizeof(*txp));
++                      queue->pending_tx_info[pending_idx].extra_count =
++                              (txp == first) ? extra_count : 0;
++
++                      if (txp == first)
++                              txp = txfrags;
++                      else
++                              txp++;
++                      queue->pending_cons++;
++                      nr_slots--;
++              } else {
++                      /* The copy op partially covered the tx_request.
++                       * The remainder will be mapped.
++                       */
++                      txp->offset += amount;
++                      txp->size -= amount;
++              }
++      }
++
++      for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
++           shinfo->nr_frags++, gop++) {
+               index = pending_index(queue->pending_cons++);
+               pending_idx = queue->pending_ring[index];
+-              xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
++              xenvif_tx_create_map_op(queue, pending_idx, txp,
++                                      txp == first ? extra_count : 0, gop);
+               frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
++
++              if (txp == first)
++                      txp = txfrags;
++              else
++                      txp++;
+       }
+ 
+       if (frag_overflow) {
+@@ -413,7 +480,8 @@ static struct gnttab_map_grant_ref 
*xenvif_get_requests(struct xenvif_queue *que
+               skb_shinfo(skb)->frag_list = nskb;
+       }
+ 
+-      return gop;
++      (*copy_ops) = cop - queue->tx_copy_ops;
++      (*map_ops) = gop - queue->tx_map_ops;
+ }
+ 
+ static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
+@@ -449,7 +517,7 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
+                              struct gnttab_copy **gopp_copy)
+ {
+       struct gnttab_map_grant_ref *gop_map = *gopp_map;
+-      u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
++      u16 pending_idx;
+       /* This always points to the shinfo of the skb being checked, which
+        * could be either the first or the one on the frag_list
+        */
+@@ -460,24 +528,37 @@ static int xenvif_tx_check_gop(struct xenvif_queue 
*queue,
+       struct skb_shared_info *first_shinfo = NULL;
+       int nr_frags = shinfo->nr_frags;
+       const bool sharedslot = nr_frags &&
+-                              frag_get_pending_idx(&shinfo->frags[0]) == 
pending_idx;
+-      int i, err;
++                              frag_get_pending_idx(&shinfo->frags[0]) ==
++                                  copy_pending_idx(skb, copy_count(skb) - 1);
++      int i, err = 0;
+ 
+-      /* Check status of header. */
+-      err = (*gopp_copy)->status;
+-      if (unlikely(err)) {
+-              if (net_ratelimit())
+-                      netdev_dbg(queue->vif->dev,
+-                                 "Grant copy of header failed! status: %d 
pending_idx: %u ref: %u\n",
+-                                 (*gopp_copy)->status,
+-                                 pending_idx,
+-                                 (*gopp_copy)->source.u.ref);
+-              /* The first frag might still have this slot mapped */
+-              if (!sharedslot)
+-                      xenvif_idx_release(queue, pending_idx,
+-                                         XEN_NETIF_RSP_ERROR);
++      for (i = 0; i < copy_count(skb); i++) {
++              int newerr;
++
++              /* Check status of header. */
++              pending_idx = copy_pending_idx(skb, i);
++
++              newerr = (*gopp_copy)->status;
++              if (likely(!newerr)) {
++                      /* The first frag might still have this slot mapped */
++                      if (i < copy_count(skb) - 1 || !sharedslot)
++                              xenvif_idx_release(queue, pending_idx,
++                                                 XEN_NETIF_RSP_OKAY);
++              } else {
++                      err = newerr;
++                      if (net_ratelimit())
++                              netdev_dbg(queue->vif->dev,
++                                         "Grant copy of header failed! 
status: %d pending_idx: %u ref: %u\n",
++                                         (*gopp_copy)->status,
++                                         pending_idx,
++                                         (*gopp_copy)->source.u.ref);
++                      /* The first frag might still have this slot mapped */
++                      if (i < copy_count(skb) - 1 || !sharedslot)
++                              xenvif_idx_release(queue, pending_idx,
++                                                 XEN_NETIF_RSP_ERROR);
++              }
++              (*gopp_copy)++;
+       }
+-      (*gopp_copy)++;
+ 
+ check_frags:
+       for (i = 0; i < nr_frags; i++, gop_map++) {
+@@ -524,14 +605,6 @@ check_frags:
+               if (err)
+                       continue;
+ 
+-              /* First error: if the header haven't shared a slot with the
+-               * first frag, release it as well.
+-               */
+-              if (!sharedslot)
+-                      xenvif_idx_release(queue,
+-                                         XENVIF_TX_CB(skb)->pending_idx,
+-                                         XEN_NETIF_RSP_OKAY);
+-
+               /* Invalidate preceding fragments of this skb. */
+               for (j = 0; j < i; j++) {
+                       pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
+@@ -801,7 +874,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue 
*queue,
+                                    unsigned *copy_ops,
+                                    unsigned *map_ops)
+ {
+-      struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
+       struct sk_buff *skb, *nskb;
+       int ret;
+       unsigned int frag_overflow;
+@@ -883,8 +955,12 @@ static void xenvif_tx_build_gops(struct xenvif_queue 
*queue,
+                       continue;
+               }
+ 
++              data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
++                      XEN_NETBACK_TX_COPY_LEN : txreq.size;
++
+               ret = xenvif_count_requests(queue, &txreq, extra_count,
+                                           txfrags, work_to_do);
++
+               if (unlikely(ret < 0))
+                       break;
+ 
+@@ -910,9 +986,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue 
*queue,
+               index = pending_index(queue->pending_cons);
+               pending_idx = queue->pending_ring[index];
+ 
+-              data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
+-                          ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
+-                      XEN_NETBACK_TX_COPY_LEN : txreq.size;
++              if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < 
txreq.size)
++                      data_len = txreq.size;
+ 
+               skb = xenvif_alloc_skb(data_len);
+               if (unlikely(skb == NULL)) {
+@@ -923,8 +998,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue 
*queue,
+               }
+ 
+               skb_shinfo(skb)->nr_frags = ret;
+-              if (data_len < txreq.size)
+-                      skb_shinfo(skb)->nr_frags++;
+               /* At this point shinfo->nr_frags is in fact the number of
+                * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
+                */
+@@ -986,54 +1059,19 @@ static void xenvif_tx_build_gops(struct xenvif_queue 
*queue,
+                                            type);
+               }
+ 
+-              XENVIF_TX_CB(skb)->pending_idx = pending_idx;
+-
+-              __skb_put(skb, data_len);
+-              queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
+-              queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
+-              queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
+-
+-              queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
+-                      virt_to_gfn(skb->data);
+-              queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
+-              queue->tx_copy_ops[*copy_ops].dest.offset =
+-                      offset_in_page(skb->data) & ~XEN_PAGE_MASK;
+-
+-              queue->tx_copy_ops[*copy_ops].len = data_len;
+-              queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
+-
+-              (*copy_ops)++;
+-
+-              if (data_len < txreq.size) {
+-                      frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
+-                                           pending_idx);
+-                      xenvif_tx_create_map_op(queue, pending_idx, &txreq,
+-                                              extra_count, gop);
+-                      gop++;
+-              } else {
+-                      frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
+-                                           INVALID_PENDING_IDX);
+-                      memcpy(&queue->pending_tx_info[pending_idx].req,
+-                             &txreq, sizeof(txreq));
+-                      queue->pending_tx_info[pending_idx].extra_count =
+-                              extra_count;
+-              }
+-
+-              queue->pending_cons++;
+-
+-              gop = xenvif_get_requests(queue, skb, txfrags, gop,
+-                                        frag_overflow, nskb);
++              xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
++                                  map_ops, frag_overflow, nskb, extra_count,
++                                  data_len);
+ 
+               __skb_queue_tail(&queue->tx_queue, skb);
+ 
+               queue->tx.req_cons = idx;
+ 
+-              if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) 
||
++              if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) ||
+                   (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
+                       break;
+       }
+ 
+-      (*map_ops) = gop - queue->tx_map_ops;
+       return;
+ }
+ 
+@@ -1112,9 +1150,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
+       while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
+               struct xen_netif_tx_request *txp;
+               u16 pending_idx;
+-              unsigned data_len;
+ 
+-              pending_idx = XENVIF_TX_CB(skb)->pending_idx;
++              pending_idx = copy_pending_idx(skb, 0);
+               txp = &queue->pending_tx_info[pending_idx].req;
+ 
+               /* Check the remap error code. */
+@@ -1133,18 +1170,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
+                       continue;
+               }
+ 
+-              data_len = skb->len;
+-              callback_param(queue, pending_idx).ctx = NULL;
+-              if (data_len < txp->size) {
+-                      /* Append the packet payload as a fragment. */
+-                      txp->offset += data_len;
+-                      txp->size -= data_len;
+-              } else {
+-                      /* Schedule a response immediately. */
+-                      xenvif_idx_release(queue, pending_idx,
+-                                         XEN_NETIF_RSP_OKAY);
+-              }
+-
+               if (txp->flags & XEN_NETTXF_csum_blank)
+                       skb->ip_summed = CHECKSUM_PARTIAL;
+               else if (txp->flags & XEN_NETTXF_data_validated)
+@@ -1330,7 +1355,7 @@ static inline void xenvif_tx_dealloc_action(struct 
xenvif_queue *queue)
+ /* Called after netfront has transmitted */
+ int xenvif_tx_action(struct xenvif_queue *queue, int budget)
+ {
+-      unsigned nr_mops, nr_cops = 0;
++      unsigned nr_mops = 0, nr_cops = 0;
+       int work_done, ret;
+ 
+       if (unlikely(!tx_work_todo(queue)))
+@@ -1417,7 +1442,7 @@ static void push_tx_responses(struct xenvif_queue *queue)
+               notify_remote_via_irq(queue->tx_irq);
+ }
+ 
+-void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
++static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
+ {
+       int ret;
+       struct gnttab_unmap_grant_ref tx_unmap_op;
+diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
+index a0335407be423..0ba754ebc5baa 100644
+--- a/drivers/net/xen-netback/rx.c
++++ b/drivers/net/xen-netback/rx.c
+@@ -82,9 +82,10 @@ static bool xenvif_rx_ring_slots_available(struct 
xenvif_queue *queue)
+       return false;
+ }
+ 
+-void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
++bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
+ {
+       unsigned long flags;
++      bool ret = true;
+ 
+       spin_lock_irqsave(&queue->rx_queue.lock, flags);
+ 
+@@ -92,8 +93,7 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct 
sk_buff *skb)
+               struct net_device *dev = queue->vif->dev;
+ 
+               netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
+-              kfree_skb(skb);
+-              queue->vif->dev->stats.rx_dropped++;
++              ret = false;
+       } else {
+               if (skb_queue_empty(&queue->rx_queue))
+                       xenvif_update_needed_slots(queue, skb);
+@@ -104,6 +104,8 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, 
struct sk_buff *skb)
+       }
+ 
+       spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
++
++      return ret;
+ }
+ 
+ static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
+@@ -486,7 +488,7 @@ static void xenvif_rx_skb(struct xenvif_queue *queue)
+ 
+ #define RX_BATCH_SIZE 64
+ 
+-void xenvif_rx_action(struct xenvif_queue *queue)
++static void xenvif_rx_action(struct xenvif_queue *queue)
+ {
+       struct sk_buff_head completed_skbs;
+       unsigned int work_done = 0;
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 569f3c8e7b756..3d149890fa36e 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -1868,6 +1868,12 @@ static int netfront_resume(struct xenbus_device *dev)
+       netif_tx_unlock_bh(info->netdev);
+ 
+       xennet_disconnect_backend(info);
++
++      rtnl_lock();
++      if (info->queues)
++              xennet_destroy_queues(info);
++      rtnl_unlock();
++
+       return 0;
+ }
+ 
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index d9c78fe85cb38..e162f1dfbafe9 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -3092,10 +3092,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
+       if (!ctrl->identified) {
+               int i;
+ 
+-              ret = nvme_init_subsystem(ctrl, id);
+-              if (ret)
+-                      goto out_free;
+-
+               /*
+                * Check for quirks.  Quirk can depend on firmware version,
+                * so, in principle, the set of quirks present can change
+@@ -3108,6 +3104,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
+                       if (quirk_matches(id, &core_quirks[i]))
+                               ctrl->quirks |= core_quirks[i].quirks;
+               }
++
++              ret = nvme_init_subsystem(ctrl, id);
++              if (ret)
++                      goto out_free;
+       }
+       memcpy(ctrl->subsys->firmware_rev, id->fr,
+              sizeof(ctrl->subsys->firmware_rev));
+diff --git a/drivers/regulator/slg51000-regulator.c 
b/drivers/regulator/slg51000-regulator.c
+index 75a941fb3c2bd..1b2eee95ad3f9 100644
+--- a/drivers/regulator/slg51000-regulator.c
++++ b/drivers/regulator/slg51000-regulator.c
+@@ -457,6 +457,8 @@ static int slg51000_i2c_probe(struct i2c_client *client)
+               chip->cs_gpiod = cs_gpiod;
+       }
+ 
++      usleep_range(10000, 11000);
++
+       i2c_set_clientdata(client, chip);
+       chip->chip_irq = client->irq;
+       chip->dev = dev;
+diff --git a/drivers/regulator/twl6030-regulator.c 
b/drivers/regulator/twl6030-regulator.c
+index 7c7e3648ea4bf..f3856750944f4 100644
+--- a/drivers/regulator/twl6030-regulator.c
++++ b/drivers/regulator/twl6030-regulator.c
+@@ -67,6 +67,7 @@ struct twlreg_info {
+ #define TWL6030_CFG_STATE_SLEEP       0x03
+ #define TWL6030_CFG_STATE_GRP_SHIFT   5
+ #define TWL6030_CFG_STATE_APP_SHIFT   2
++#define TWL6030_CFG_STATE_MASK                0x03
+ #define TWL6030_CFG_STATE_APP_MASK    (0x03 << TWL6030_CFG_STATE_APP_SHIFT)
+ #define TWL6030_CFG_STATE_APP(v)      (((v) & TWL6030_CFG_STATE_APP_MASK) >>\
+                                               TWL6030_CFG_STATE_APP_SHIFT)
+@@ -128,13 +129,14 @@ static int twl6030reg_is_enabled(struct regulator_dev 
*rdev)
+               if (grp < 0)
+                       return grp;
+               grp &= P1_GRP_6030;
++              val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
++              val = TWL6030_CFG_STATE_APP(val);
+       } else {
++              val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
++              val &= TWL6030_CFG_STATE_MASK;
+               grp = 1;
+       }
+ 
+-      val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
+-      val = TWL6030_CFG_STATE_APP(val);
+-
+       return grp && (val == TWL6030_CFG_STATE_ON);
+ }
+ 
+@@ -187,7 +189,12 @@ static int twl6030reg_get_status(struct regulator_dev 
*rdev)
+ 
+       val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
+ 
+-      switch (TWL6030_CFG_STATE_APP(val)) {
++      if (info->features & TWL6032_SUBCLASS)
++              val &= TWL6030_CFG_STATE_MASK;
++      else
++              val = TWL6030_CFG_STATE_APP(val);
++
++      switch (val) {
+       case TWL6030_CFG_STATE_ON:
+               return REGULATOR_STATUS_NORMAL;
+ 
+diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
+index 58c6382a2807c..d4f6c4dd42c47 100644
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -222,6 +222,8 @@ static inline void cmos_write_bank2(unsigned char val, 
unsigned char addr)
+ 
+ static int cmos_read_time(struct device *dev, struct rtc_time *t)
+ {
++      int ret;
++
+       /*
+        * If pm_trace abused the RTC for storage, set the timespec to 0,
+        * which tells the caller that this RTC value is unusable.
+@@ -229,29 +231,64 @@ static int cmos_read_time(struct device *dev, struct 
rtc_time *t)
+       if (!pm_trace_rtc_valid())
+               return -EIO;
+ 
+-      /* REVISIT:  if the clock has a "century" register, use
+-       * that instead of the heuristic in mc146818_get_time().
+-       * That'll make Y3K compatility (year > 2070) easy!
+-       */
+-      mc146818_get_time(t);
++      ret = mc146818_get_time(t);
++      if (ret < 0) {
++              dev_err_ratelimited(dev, "unable to read current time\n");
++              return ret;
++      }
++
+       return 0;
+ }
+ 
+ static int cmos_set_time(struct device *dev, struct rtc_time *t)
+ {
+-      /* REVISIT:  set the "century" register if available
+-       *
+-       * NOTE: this ignores the issue whereby updating the seconds
++      /* NOTE: this ignores the issue whereby updating the seconds
+        * takes effect exactly 500ms after we write the register.
+        * (Also queueing and other delays before we get this far.)
+        */
+       return mc146818_set_time(t);
+ }
+ 
++struct cmos_read_alarm_callback_param {
++      struct cmos_rtc *cmos;
++      struct rtc_time *time;
++      unsigned char   rtc_control;
++};
++
++static void cmos_read_alarm_callback(unsigned char __always_unused seconds,
++                                   void *param_in)
++{
++      struct cmos_read_alarm_callback_param *p =
++              (struct cmos_read_alarm_callback_param *)param_in;
++      struct rtc_time *time = p->time;
++
++      time->tm_sec = CMOS_READ(RTC_SECONDS_ALARM);
++      time->tm_min = CMOS_READ(RTC_MINUTES_ALARM);
++      time->tm_hour = CMOS_READ(RTC_HOURS_ALARM);
++
++      if (p->cmos->day_alrm) {
++              /* ignore upper bits on readback per ACPI spec */
++              time->tm_mday = CMOS_READ(p->cmos->day_alrm) & 0x3f;
++              if (!time->tm_mday)
++                      time->tm_mday = -1;
++
++              if (p->cmos->mon_alrm) {
++                      time->tm_mon = CMOS_READ(p->cmos->mon_alrm);
++                      if (!time->tm_mon)
++                              time->tm_mon = -1;
++              }
++      }
++
++      p->rtc_control = CMOS_READ(RTC_CONTROL);
++}
++
+ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
+ {
+       struct cmos_rtc *cmos = dev_get_drvdata(dev);
+-      unsigned char   rtc_control;
++      struct cmos_read_alarm_callback_param p = {
++              .cmos = cmos,
++              .time = &t->time,
++      };
+ 
+       /* This not only a rtc_op, but also called directly */
+       if (!is_valid_irq(cmos->irq))
+@@ -262,28 +299,18 @@ static int cmos_read_alarm(struct device *dev, struct 
rtc_wkalrm *t)
+        * the future.
+        */
+ 
+-      spin_lock_irq(&rtc_lock);
+-      t->time.tm_sec = CMOS_READ(RTC_SECONDS_ALARM);
+-      t->time.tm_min = CMOS_READ(RTC_MINUTES_ALARM);
+-      t->time.tm_hour = CMOS_READ(RTC_HOURS_ALARM);
+-
+-      if (cmos->day_alrm) {
+-              /* ignore upper bits on readback per ACPI spec */
+-              t->time.tm_mday = CMOS_READ(cmos->day_alrm) & 0x3f;
+-              if (!t->time.tm_mday)
+-                      t->time.tm_mday = -1;
+-
+-              if (cmos->mon_alrm) {
+-                      t->time.tm_mon = CMOS_READ(cmos->mon_alrm);
+-                      if (!t->time.tm_mon)
+-                              t->time.tm_mon = -1;
+-              }
+-      }
+-
+-      rtc_control = CMOS_READ(RTC_CONTROL);
+-      spin_unlock_irq(&rtc_lock);
++      /* Some Intel chipsets disconnect the alarm registers when the clock
++       * update is in progress - during this time reads return bogus values
++       * and writes may fail silently. See for example "7th Generation Intel®
++       * Processor Family I/O for U/Y Platforms [...] Datasheet", section
++       * 27.7.1
++       *
++       * Use the mc146818_avoid_UIP() function to avoid this.
++       */
++      if (!mc146818_avoid_UIP(cmos_read_alarm_callback, &p))
++              return -EIO;
+ 
+-      if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
++      if (!(p.rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+               if (((unsigned)t->time.tm_sec) < 0x60)
+                       t->time.tm_sec = bcd2bin(t->time.tm_sec);
+               else
+@@ -312,7 +339,7 @@ static int cmos_read_alarm(struct device *dev, struct 
rtc_wkalrm *t)
+               }
+       }
+ 
+-      t->enabled = !!(rtc_control & RTC_AIE);
++      t->enabled = !!(p.rtc_control & RTC_AIE);
+       t->pending = 0;
+ 
+       return 0;
+@@ -443,10 +470,57 @@ static int cmos_validate_alarm(struct device *dev, 
struct rtc_wkalrm *t)
+       return 0;
+ }
+ 
++struct cmos_set_alarm_callback_param {
++      struct cmos_rtc *cmos;
++      unsigned char mon, mday, hrs, min, sec;
++      struct rtc_wkalrm *t;
++};
++
++/* Note: this function may be executed by mc146818_avoid_UIP() more then
++ *     once
++ */
++static void cmos_set_alarm_callback(unsigned char __always_unused seconds,
++                                  void *param_in)
++{
++      struct cmos_set_alarm_callback_param *p =
++              (struct cmos_set_alarm_callback_param *)param_in;
++
++      /* next rtc irq must not be from previous alarm setting */
++      cmos_irq_disable(p->cmos, RTC_AIE);
++
++      /* update alarm */
++      CMOS_WRITE(p->hrs, RTC_HOURS_ALARM);
++      CMOS_WRITE(p->min, RTC_MINUTES_ALARM);
++      CMOS_WRITE(p->sec, RTC_SECONDS_ALARM);
++
++      /* the system may support an "enhanced" alarm */
++      if (p->cmos->day_alrm) {
++              CMOS_WRITE(p->mday, p->cmos->day_alrm);
++              if (p->cmos->mon_alrm)
++                      CMOS_WRITE(p->mon, p->cmos->mon_alrm);
++      }
++
++      if (use_hpet_alarm()) {
++              /*
++               * FIXME the HPET alarm glue currently ignores day_alrm
++               * and mon_alrm ...
++               */
++              hpet_set_alarm_time(p->t->time.tm_hour, p->t->time.tm_min,
++                                  p->t->time.tm_sec);
++      }
++
++      if (p->t->enabled)
++              cmos_irq_enable(p->cmos, RTC_AIE);
++}
++
+ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
+ {
+       struct cmos_rtc *cmos = dev_get_drvdata(dev);
+-      unsigned char mon, mday, hrs, min, sec, rtc_control;
++      struct cmos_set_alarm_callback_param p = {
++              .cmos = cmos,
++              .t = t
++      };
++      unsigned char rtc_control;
+       int ret;
+ 
+       /* This not only a rtc_op, but also called directly */
+@@ -457,11 +531,11 @@ static int cmos_set_alarm(struct device *dev, struct 
rtc_wkalrm *t)
+       if (ret < 0)
+               return ret;
+ 
+-      mon = t->time.tm_mon + 1;
+-      mday = t->time.tm_mday;
+-      hrs = t->time.tm_hour;
+-      min = t->time.tm_min;
+-      sec = t->time.tm_sec;
++      p.mon = t->time.tm_mon + 1;
++      p.mday = t->time.tm_mday;
++      p.hrs = t->time.tm_hour;
++      p.min = t->time.tm_min;
++      p.sec = t->time.tm_sec;
+ 
+       spin_lock_irq(&rtc_lock);
+       rtc_control = CMOS_READ(RTC_CONTROL);
+@@ -469,43 +543,21 @@ static int cmos_set_alarm(struct device *dev, struct 
rtc_wkalrm *t)
+ 
+       if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+               /* Writing 0xff means "don't care" or "match all".  */
+-              mon = (mon <= 12) ? bin2bcd(mon) : 0xff;
+-              mday = (mday >= 1 && mday <= 31) ? bin2bcd(mday) : 0xff;
+-              hrs = (hrs < 24) ? bin2bcd(hrs) : 0xff;
+-              min = (min < 60) ? bin2bcd(min) : 0xff;
+-              sec = (sec < 60) ? bin2bcd(sec) : 0xff;
+-      }
+-
+-      spin_lock_irq(&rtc_lock);
+-
+-      /* next rtc irq must not be from previous alarm setting */
+-      cmos_irq_disable(cmos, RTC_AIE);
+-
+-      /* update alarm */
+-      CMOS_WRITE(hrs, RTC_HOURS_ALARM);
+-      CMOS_WRITE(min, RTC_MINUTES_ALARM);
+-      CMOS_WRITE(sec, RTC_SECONDS_ALARM);
+-
+-      /* the system may support an "enhanced" alarm */
+-      if (cmos->day_alrm) {
+-              CMOS_WRITE(mday, cmos->day_alrm);
+-              if (cmos->mon_alrm)
+-                      CMOS_WRITE(mon, cmos->mon_alrm);
++              p.mon = (p.mon <= 12) ? bin2bcd(p.mon) : 0xff;
++              p.mday = (p.mday >= 1 && p.mday <= 31) ? bin2bcd(p.mday) : 0xff;
++              p.hrs = (p.hrs < 24) ? bin2bcd(p.hrs) : 0xff;
++              p.min = (p.min < 60) ? bin2bcd(p.min) : 0xff;
++              p.sec = (p.sec < 60) ? bin2bcd(p.sec) : 0xff;
+       }
+ 
+-      if (use_hpet_alarm()) {
+-              /*
+-               * FIXME the HPET alarm glue currently ignores day_alrm
+-               * and mon_alrm ...
+-               */
+-              hpet_set_alarm_time(t->time.tm_hour, t->time.tm_min,
+-                                  t->time.tm_sec);
+-      }
+-
+-      if (t->enabled)
+-              cmos_irq_enable(cmos, RTC_AIE);
+-
+-      spin_unlock_irq(&rtc_lock);
++      /*
++       * Some Intel chipsets disconnect the alarm registers when the clock
++       * update is in progress - during this time writes fail silently.
++       *
++       * Use mc146818_avoid_UIP() to avoid this.
++       */
++      if (!mc146818_avoid_UIP(cmos_set_alarm_callback, &p))
++              return -EIO;
+ 
+       cmos->alarm_expires = rtc_tm_to_time64(&t->time);
+ 
+@@ -652,11 +704,10 @@ static struct cmos_rtc   cmos_rtc;
+ 
+ static irqreturn_t cmos_interrupt(int irq, void *p)
+ {
+-      unsigned long   flags;
+       u8              irqstat;
+       u8              rtc_control;
+ 
+-      spin_lock_irqsave(&rtc_lock, flags);
++      spin_lock(&rtc_lock);
+ 
+       /* When the HPET interrupt handler calls us, the interrupt
+        * status is passed as arg1 instead of the irq number.  But
+@@ -690,7 +741,7 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
+                       hpet_mask_rtc_irq_bit(RTC_AIE);
+               CMOS_READ(RTC_INTR_FLAGS);
+       }
+-      spin_unlock_irqrestore(&rtc_lock, flags);
++      spin_unlock(&rtc_lock);
+ 
+       if (is_intr(irqstat)) {
+               rtc_update_irq(p, 1, irqstat);
+@@ -806,6 +857,12 @@ cmos_do_probe(struct device *dev, struct resource *ports, 
int rtc_irq)
+ 
+       rename_region(ports, dev_name(&cmos_rtc.rtc->dev));
+ 
++      if (!mc146818_does_rtc_work()) {
++              dev_warn(dev, "broken or not accessible\n");
++              retval = -ENXIO;
++              goto cleanup1;
++      }
++
+       spin_lock_irq(&rtc_lock);
+ 
+       if (!(flags & CMOS_RTC_FLAGS_NOFREQ)) {
+@@ -1054,7 +1111,9 @@ static void cmos_check_wkalrm(struct device *dev)
+        * ACK the rtc irq here
+        */
+       if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) {
++              local_irq_disable();
+               cmos_interrupt(0, (void *)cmos->rtc);
++              local_irq_enable();
+               return;
+       }
+ 
+diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
+index b036ff33fbe61..347655d24b5d3 100644
+--- a/drivers/rtc/rtc-mc146818-lib.c
++++ b/drivers/rtc/rtc-mc146818-lib.c
+@@ -9,40 +9,143 @@
+ #endif
+ 
+ /*
+- * Returns true if a clock update is in progress
++ * Execute a function while the UIP (Update-in-progress) bit of the RTC is
++ * unset.
++ *
++ * Warning: callback may be executed more then once.
+  */
+-static inline unsigned char mc146818_is_updating(void)
++bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
++                      void *param)
+ {
+-      unsigned char uip;
++      int i;
+       unsigned long flags;
++      unsigned char seconds;
+ 
+-      spin_lock_irqsave(&rtc_lock, flags);
+-      uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
+-      spin_unlock_irqrestore(&rtc_lock, flags);
+-      return uip;
++      for (i = 0; i < 10; i++) {
++              spin_lock_irqsave(&rtc_lock, flags);
++
++              /*
++               * Check whether there is an update in progress during which the
++               * readout is unspecified. The maximum update time is ~2ms. Poll
++               * every msec for completion.
++               *
++               * Store the second value before checking UIP so a long lasting
++               * NMI which happens to hit after the UIP check cannot make
++               * an update cycle invisible.
++               */
++              seconds = CMOS_READ(RTC_SECONDS);
++
++              if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
++                      spin_unlock_irqrestore(&rtc_lock, flags);
++                      mdelay(1);
++                      continue;
++              }
++
++              /* Revalidate the above readout */
++              if (seconds != CMOS_READ(RTC_SECONDS)) {
++                      spin_unlock_irqrestore(&rtc_lock, flags);
++                      continue;
++              }
++
++              if (callback)
++                      callback(seconds, param);
++
++              /*
++               * Check for the UIP bit again. If it is set now then
++               * the above values may contain garbage.
++               */
++              if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
++                      spin_unlock_irqrestore(&rtc_lock, flags);
++                      mdelay(1);
++                      continue;
++              }
++
++              /*
++               * A NMI might have interrupted the above sequence so check
++               * whether the seconds value has changed which indicates that
++               * the NMI took longer than the UIP bit was set. Unlikely, but
++               * possible and there is also virt...
++               */
++              if (seconds != CMOS_READ(RTC_SECONDS)) {
++                      spin_unlock_irqrestore(&rtc_lock, flags);
++                      continue;
++              }
++              spin_unlock_irqrestore(&rtc_lock, flags);
++
++              return true;
++      }
++      return false;
+ }
++EXPORT_SYMBOL_GPL(mc146818_avoid_UIP);
+ 
+-unsigned int mc146818_get_time(struct rtc_time *time)
++/*
++ * If the UIP (Update-in-progress) bit of the RTC is set for more then
++ * 10ms, the RTC is apparently broken or not present.
++ */
++bool mc146818_does_rtc_work(void)
++{
++      int i;
++      unsigned char val;
++      unsigned long flags;
++
++      for (i = 0; i < 10; i++) {
++              spin_lock_irqsave(&rtc_lock, flags);
++              val = CMOS_READ(RTC_FREQ_SELECT);
++              spin_unlock_irqrestore(&rtc_lock, flags);
++
++              if ((val & RTC_UIP) == 0)
++                      return true;
++
++              mdelay(1);
++      }
++
++      return false;
++}
++EXPORT_SYMBOL_GPL(mc146818_does_rtc_work);
++
++int mc146818_get_time(struct rtc_time *time)
+ {
+       unsigned char ctrl;
+       unsigned long flags;
++      unsigned int iter_count = 0;
+       unsigned char century = 0;
++      bool retry;
+ 
+ #ifdef CONFIG_MACH_DECSTATION
+       unsigned int real_year;
+ #endif
+ 
++again:
++      if (iter_count > 10) {
++              memset(time, 0, sizeof(*time));
++              return -EIO;
++      }
++      iter_count++;
++
++      spin_lock_irqsave(&rtc_lock, flags);
++
+       /*
+-       * read RTC once any update in progress is done. The update
+-       * can take just over 2ms. We wait 20ms. There is no need to
+-       * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
+-       * If you need to know *exactly* when a second has started, enable
+-       * periodic update complete interrupts, (via ioctl) and then
+-       * immediately read /dev/rtc which will block until you get the IRQ.
+-       * Once the read clears, read the RTC time (again via ioctl). Easy.
++       * Check whether there is an update in progress during which the
++       * readout is unspecified. The maximum update time is ~2ms. Poll
++       * every msec for completion.
++       *
++       * Store the second value before checking UIP so a long lasting NMI
++       * which happens to hit after the UIP check cannot make an update
++       * cycle invisible.
+        */
+-      if (mc146818_is_updating())
+-              mdelay(20);
++      time->tm_sec = CMOS_READ(RTC_SECONDS);
++
++      if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
++              spin_unlock_irqrestore(&rtc_lock, flags);
++              mdelay(1);
++              goto again;
++      }
++
++      /* Revalidate the above readout */
++      if (time->tm_sec != CMOS_READ(RTC_SECONDS)) {
++              spin_unlock_irqrestore(&rtc_lock, flags);
++              goto again;
++      }
+ 
+       /*
+        * Only the values that we read from the RTC are set. We leave
+@@ -50,8 +153,6 @@ unsigned int mc146818_get_time(struct rtc_time *time)
+        * RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated
+        * by the RTC when initially set to a non-zero value.
+        */
+-      spin_lock_irqsave(&rtc_lock, flags);
+-      time->tm_sec = CMOS_READ(RTC_SECONDS);
+       time->tm_min = CMOS_READ(RTC_MINUTES);
+       time->tm_hour = CMOS_READ(RTC_HOURS);
+       time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
+@@ -66,8 +167,24 @@ unsigned int mc146818_get_time(struct rtc_time *time)
+               century = CMOS_READ(acpi_gbl_FADT.century);
+ #endif
+       ctrl = CMOS_READ(RTC_CONTROL);
++      /*
++       * Check for the UIP bit again. If it is set now then
++       * the above values may contain garbage.
++       */
++      retry = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP;
++      /*
++       * A NMI might have interrupted the above sequence so check whether
++       * the seconds value has changed which indicates that the NMI took
++       * longer than the UIP bit was set. Unlikely, but possible and
++       * there is also virt...
++       */
++      retry |= time->tm_sec != CMOS_READ(RTC_SECONDS);
++
+       spin_unlock_irqrestore(&rtc_lock, flags);
+ 
++      if (retry)
++              goto again;
++
+       if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+       {
+               time->tm_sec = bcd2bin(time->tm_sec);
+@@ -95,7 +212,7 @@ unsigned int mc146818_get_time(struct rtc_time *time)
+ 
+       time->tm_mon--;
+ 
+-      return RTC_24H;
++      return 0;
+ }
+ EXPORT_SYMBOL_GPL(mc146818_get_time);
+ 
+@@ -132,7 +249,6 @@ int mc146818_set_time(struct rtc_time *time)
+       if (yrs > 255)  /* They are unsigned */
+               return -EINVAL;
+ 
+-      spin_lock_irqsave(&rtc_lock, flags);
+ #ifdef CONFIG_MACH_DECSTATION
+       real_yrs = yrs;
+       leap_yr = ((!((yrs + 1900) % 4) && ((yrs + 1900) % 100)) ||
+@@ -161,16 +277,16 @@ int mc146818_set_time(struct rtc_time *time)
+       /* These limits and adjustments are independent of
+        * whether the chip is in binary mode or not.
+        */
+-      if (yrs > 169) {
+-              spin_unlock_irqrestore(&rtc_lock, flags);
++      if (yrs > 169)
+               return -EINVAL;
+-      }
+ 
+       if (yrs >= 100)
+               yrs -= 100;
+ 
+-      if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
+-          || RTC_ALWAYS_BCD) {
++      spin_lock_irqsave(&rtc_lock, flags);
++      save_control = CMOS_READ(RTC_CONTROL);
++      spin_unlock_irqrestore(&rtc_lock, flags);
++      if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+               sec = bin2bcd(sec);
+               min = bin2bcd(min);
+               hrs = bin2bcd(hrs);
+@@ -180,6 +296,7 @@ int mc146818_set_time(struct rtc_time *time)
+               century = bin2bcd(century);
+       }
+ 
++      spin_lock_irqsave(&rtc_lock, flags);
+       save_control = CMOS_READ(RTC_CONTROL);
+       CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+       save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index a9a43d6494782..28a1194f849fc 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -291,7 +291,8 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned 
int cmd,
+        *
+        * DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2
+        */
+-      if (dwc->gadget->speed <= USB_SPEED_HIGH) {
++      if (dwc->gadget->speed <= USB_SPEED_HIGH ||
++          DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_ENDTRANSFER) {
+               reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+               if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
+                       saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;
+diff --git a/drivers/video/fbdev/core/fbcon.c 
b/drivers/video/fbdev/core/fbcon.c
+index 2618d3beef649..27828435dd4fc 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -609,7 +609,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct 
fb_info *info,
+               if (scr_readw(r) != vc->vc_video_erase_char)
+                       break;
+       if (r != q && new_rows >= rows + logo_lines) {
+-              save = kmalloc(array3_size(logo_lines, new_cols, 2),
++              save = kzalloc(array3_size(logo_lines, new_cols, 2),
+                              GFP_KERNEL);
+               if (save) {
+                       int i = cols < new_cols ? cols : new_cols;
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 6b80dee17f49d..4a6ba0997e399 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -5398,6 +5398,7 @@ static int clone_range(struct send_ctx *sctx,
+               u64 ext_len;
+               u64 clone_len;
+               u64 clone_data_offset;
++              bool crossed_src_i_size = false;
+ 
+               if (slot >= btrfs_header_nritems(leaf)) {
+                       ret = btrfs_next_leaf(clone_root->root, path);
+@@ -5454,8 +5455,10 @@ static int clone_range(struct send_ctx *sctx,
+               if (key.offset >= clone_src_i_size)
+                       break;
+ 
+-              if (key.offset + ext_len > clone_src_i_size)
++              if (key.offset + ext_len > clone_src_i_size) {
+                       ext_len = clone_src_i_size - key.offset;
++                      crossed_src_i_size = true;
++              }
+ 
+               clone_data_offset = btrfs_file_extent_offset(leaf, ei);
+               if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) {
+@@ -5515,6 +5518,25 @@ static int clone_range(struct send_ctx *sctx,
+                               ret = send_clone(sctx, offset, clone_len,
+                                                clone_root);
+                       }
++              } else if (crossed_src_i_size && clone_len < len) {
++                      /*
++                       * If we are at i_size of the clone source inode and we
++                       * can not clone from it, terminate the loop. This is
++                       * to avoid sending two write operations, one with a
++                       * length matching clone_len and the final one after
++                       * this loop with a length of len - clone_len.
++                       *
++                       * When using encoded writes (BTRFS_SEND_FLAG_COMPRESSED
++                       * was passed to the send ioctl), this helps avoid
++                       * sending an encoded write for an offset that is not
++                       * sector size aligned, in case the i_size of the source
++                       * inode is not sector size aligned. That will make the
++                       * receiver fallback to decompression of the data and
++                       * writing it using regular buffered IO, therefore while
++                       * not incorrect, it's not optimal due decompression and
++                       * possible re-compression at the receiver.
++                       */
++                      break;
+               } else {
+                       ret = send_extent_data(sctx, offset, clone_len);
+               }
+diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
+index a0c4b99d28994..f40c9534f20be 100644
+--- a/include/asm-generic/tlb.h
++++ b/include/asm-generic/tlb.h
+@@ -205,12 +205,16 @@ extern void tlb_remove_table(struct mmu_gather *tlb, 
void *table);
+ #define tlb_needs_table_invalidate() (true)
+ #endif
+ 
++void tlb_remove_table_sync_one(void);
++
+ #else
+ 
+ #ifdef tlb_needs_table_invalidate
+ #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
+ #endif
+ 
++static inline void tlb_remove_table_sync_one(void) { }
++
+ #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
+ 
+ 
+diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
+index 618838c48313c..959b370733f09 100644
+--- a/include/linux/cgroup.h
++++ b/include/linux/cgroup.h
+@@ -68,6 +68,7 @@ struct css_task_iter {
+       struct list_head                iters_node;     /* css_set->task_iters 
*/
+ };
+ 
++extern struct file_system_type cgroup_fs_type;
+ extern struct cgroup_root cgrp_dfl_root;
+ extern struct css_set init_css_set;
+ 
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index b9fbb6d4150e2..955b19dc28a82 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -174,8 +174,8 @@ struct page *follow_huge_addr(struct mm_struct *mm, 
unsigned long address,
+ struct page *follow_huge_pd(struct vm_area_struct *vma,
+                           unsigned long address, hugepd_t hpd,
+                           int flags, int pdshift);
+-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+-                              pmd_t *pmd, int flags);
++struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long 
address,
++                               int flags);
+ struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
+                               pud_t *pud, int flags);
+ struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
+@@ -261,8 +261,8 @@ static inline struct page *follow_huge_pd(struct 
vm_area_struct *vma,
+       return NULL;
+ }
+ 
+-static inline struct page *follow_huge_pmd(struct mm_struct *mm,
+-                              unsigned long address, pmd_t *pmd, int flags)
++static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
++                              unsigned long address, int flags)
+ {
+       return NULL;
+ }
+diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
+index 1e02058113944..b0da04fe087bb 100644
+--- a/include/linux/mc146818rtc.h
++++ b/include/linux/mc146818rtc.h
+@@ -125,7 +125,11 @@ struct cmos_rtc_board_info {
+ #define RTC_IO_EXTENT_USED      RTC_IO_EXTENT
+ #endif /* ARCH_RTC_LOCATION */
+ 
+-unsigned int mc146818_get_time(struct rtc_time *time);
++bool mc146818_does_rtc_work(void);
++int mc146818_get_time(struct rtc_time *time);
+ int mc146818_set_time(struct rtc_time *time);
+ 
++bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
++                      void *param);
++
+ #endif /* _MC146818RTC_H */
+diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
+index 6e36e854b5124..d8fcc139ac05d 100644
+--- a/kernel/cgroup/cgroup-internal.h
++++ b/kernel/cgroup/cgroup-internal.h
+@@ -169,7 +169,6 @@ extern struct mutex cgroup_mutex;
+ extern spinlock_t css_set_lock;
+ extern struct cgroup_subsys *cgroup_subsys[];
+ extern struct list_head cgroup_roots;
+-extern struct file_system_type cgroup_fs_type;
+ 
+ /* iterate across the hierarchies */
+ #define for_each_root(root)                                           \
+diff --git a/mm/gup.c b/mm/gup.c
+index b47c751df069a..6d5e4fd55d320 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -405,6 +405,18 @@ static struct page *follow_page_pte(struct vm_area_struct 
*vma,
+       if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
+                        (FOLL_PIN | FOLL_GET)))
+               return ERR_PTR(-EINVAL);
++
++      /*
++       * Considering PTE level hugetlb, like continuous-PTE hugetlb on
++       * ARM64 architecture.
++       */
++      if (is_vm_hugetlb_page(vma)) {
++              page = follow_huge_pmd_pte(vma, address, flags);
++              if (page)
++                      return page;
++              return no_page_table(vma, flags);
++      }
++
+ retry:
+       if (unlikely(pmd_bad(*pmd)))
+               return no_page_table(vma, flags);
+@@ -560,7 +572,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct 
*vma,
+       if (pmd_none(pmdval))
+               return no_page_table(vma, flags);
+       if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
+-              page = follow_huge_pmd(mm, address, pmd, flags);
++              page = follow_huge_pmd_pte(vma, address, flags);
+               if (page)
+                       return page;
+               return no_page_table(vma, flags);
+@@ -2564,7 +2576,7 @@ static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, 
unsigned long addr, unsigned lo
+               next = pud_addr_end(addr, end);
+               if (unlikely(!pud_present(pud)))
+                       return 0;
+-              if (unlikely(pud_huge(pud))) {
++              if (unlikely(pud_huge(pud) || pud_devmap(pud))) {
+                       if (!gup_huge_pud(pud, pudp, addr, next, flags,
+                                         pages, nr))
+                               return 0;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index d8c63d79af206..3499b3803384b 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -5585,12 +5585,13 @@ follow_huge_pd(struct vm_area_struct *vma,
+ }
+ 
+ struct page * __weak
+-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+-              pmd_t *pmd, int flags)
++follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, int 
flags)
+ {
++      struct hstate *h = hstate_vma(vma);
++      struct mm_struct *mm = vma->vm_mm;
+       struct page *page = NULL;
+       spinlock_t *ptl;
+-      pte_t pte;
++      pte_t *ptep, pte;
+ 
+       /* FOLL_GET and FOLL_PIN are mutually exclusive. */
+       if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
+@@ -5598,17 +5599,15 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long 
address,
+               return NULL;
+ 
+ retry:
+-      ptl = pmd_lockptr(mm, pmd);
+-      spin_lock(ptl);
+-      /*
+-       * make sure that the address range covered by this pmd is not
+-       * unmapped from other threads.
+-       */
+-      if (!pmd_huge(*pmd))
+-              goto out;
+-      pte = huge_ptep_get((pte_t *)pmd);
++      ptep = huge_pte_offset(mm, address, huge_page_size(h));
++      if (!ptep)
++              return NULL;
++
++      ptl = huge_pte_lock(h, mm, ptep);
++      pte = huge_ptep_get(ptep);
+       if (pte_present(pte)) {
+-              page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
++              page = pte_page(pte) +
++                      ((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
+               /*
+                * try_grab_page() should always succeed here, because: a) we
+                * hold the pmd (ptl) lock, and b) we've just checked that the
+@@ -5624,7 +5623,7 @@ retry:
+       } else {
+               if (is_hugetlb_entry_migration(pte)) {
+                       spin_unlock(ptl);
+-                      __migration_entry_wait(mm, (pte_t *)pmd, ptl);
++                      __migration_entry_wait(mm, ptep, ptl);
+                       goto retry;
+               }
+               /*
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index cf4dceb9682bf..0eb3adf4ff68c 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -1154,6 +1154,7 @@ static void collapse_huge_page(struct mm_struct *mm,
+       _pmd = pmdp_collapse_flush(vma, address, pmd);
+       spin_unlock(pmd_ptl);
+       mmu_notifier_invalidate_range_end(&range);
++      tlb_remove_table_sync_one();
+ 
+       spin_lock(pte_ptl);
+       isolated = __collapse_huge_page_isolate(vma, address, pte,
+@@ -1443,6 +1444,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, 
unsigned long addr)
+       spinlock_t *ptl;
+       int count = 0;
+       int i;
++      struct mmu_notifier_range range;
+ 
+       if (!vma || !vma->vm_file ||
+           vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
+@@ -1457,6 +1459,14 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, 
unsigned long addr)
+       if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
+               return;
+ 
++      /*
++       * Symmetry with retract_page_tables(): Exclude MAP_PRIVATE mappings
++       * that got written to. Without this, we'd have to also lock the
++       * anon_vma if one exists.
++       */
++      if (vma->anon_vma)
++              return;
++
+       hpage = find_lock_page(vma->vm_file->f_mapping,
+                              linear_page_index(vma, haddr));
+       if (!hpage)
+@@ -1469,6 +1479,19 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, 
unsigned long addr)
+       if (!pmd)
+               goto drop_hpage;
+ 
++      /*
++       * We need to lock the mapping so that from here on, only GUP-fast and
++       * hardware page walks can access the parts of the page tables that
++       * we're operating on.
++       */
++      i_mmap_lock_write(vma->vm_file->f_mapping);
++
++      /*
++       * This spinlock should be unnecessary: Nobody else should be accessing
++       * the page tables under spinlock protection here, only
++       * lockless_pages_from_mm() and the hardware page walker can access page
++       * tables while all the high-level locks are held in write mode.
++       */
+       start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
+ 
+       /* step 1: check all mapped PTEs are to the right huge page */
+@@ -1515,12 +1538,17 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, 
unsigned long addr)
+       }
+ 
+       /* step 4: collapse pmd */
+-      ptl = pmd_lock(vma->vm_mm, pmd);
++      mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, haddr,
++                              haddr + HPAGE_PMD_SIZE);
++      mmu_notifier_invalidate_range_start(&range);
+       _pmd = pmdp_collapse_flush(vma, haddr, pmd);
+-      spin_unlock(ptl);
+       mm_dec_nr_ptes(mm);
++      tlb_remove_table_sync_one();
++      mmu_notifier_invalidate_range_end(&range);
+       pte_free(mm, pmd_pgtable(_pmd));
+ 
++      i_mmap_unlock_write(vma->vm_file->f_mapping);
++
+ drop_hpage:
+       unlock_page(hpage);
+       put_page(hpage);
+@@ -1528,6 +1556,7 @@ drop_hpage:
+ 
+ abort:
+       pte_unmap_unlock(start_pte, ptl);
++      i_mmap_unlock_write(vma->vm_file->f_mapping);
+       goto drop_hpage;
+ }
+ 
+@@ -1577,7 +1606,8 @@ static void retract_page_tables(struct address_space 
*mapping, pgoff_t pgoff)
+                * An alternative would be drop the check, but check that page
+                * table is clear before calling pmdp_collapse_flush() under
+                * ptl. It has higher chance to recover THP for the VMA, but
+-               * has higher cost too.
++               * has higher cost too. It would also probably require locking
++               * the anon_vma.
+                */
+               if (vma->anon_vma)
+                       continue;
+@@ -1599,12 +1629,19 @@ static void retract_page_tables(struct address_space 
*mapping, pgoff_t pgoff)
+                */
+               if (mmap_write_trylock(mm)) {
+                       if (!khugepaged_test_exit(mm)) {
+-                              spinlock_t *ptl = pmd_lock(mm, pmd);
++                              struct mmu_notifier_range range;
++
++                              mmu_notifier_range_init(&range,
++                                                      MMU_NOTIFY_CLEAR, 0,
++                                                      NULL, mm, addr,
++                                                      addr + HPAGE_PMD_SIZE);
++                              mmu_notifier_invalidate_range_start(&range);
+                               /* assume page table is clear */
+                               _pmd = pmdp_collapse_flush(vma, addr, pmd);
+-                              spin_unlock(ptl);
+                               mm_dec_nr_ptes(mm);
++                              tlb_remove_table_sync_one();
+                               pte_free(mm, pmd_pgtable(_pmd));
++                              mmu_notifier_invalidate_range_end(&range);
+                       }
+                       mmap_write_unlock(mm);
+               } else {
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 92ab008777183..c62d997c8ca1d 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -4866,6 +4866,7 @@ static ssize_t memcg_write_event_control(struct 
kernfs_open_file *of,
+       unsigned int efd, cfd;
+       struct fd efile;
+       struct fd cfile;
++      struct dentry *cdentry;
+       const char *name;
+       char *endp;
+       int ret;
+@@ -4916,6 +4917,16 @@ static ssize_t memcg_write_event_control(struct 
kernfs_open_file *of,
+       if (ret < 0)
+               goto out_put_cfile;
+ 
++      /*
++       * The control file must be a regular cgroup1 file. As a regular cgroup
++       * file can't be renamed, it's safe to access its name afterwards.
++       */
++      cdentry = cfile.file->f_path.dentry;
++      if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
++              ret = -EINVAL;
++              goto out_put_cfile;
++      }
++
+       /*
+        * Determine the event callbacks and set them in @event.  This used
+        * to be done via struct cftype but cgroup core no longer knows
+@@ -4924,7 +4935,7 @@ static ssize_t memcg_write_event_control(struct 
kernfs_open_file *of,
+        *
+        * DO NOT ADD NEW FILES.
+        */
+-      name = cfile.file->f_path.dentry->d_name.name;
++      name = cdentry->d_name.name;
+ 
+       if (!strcmp(name, "memory.usage_in_bytes")) {
+               event->register_event = mem_cgroup_usage_register_event;
+@@ -4948,7 +4959,7 @@ static ssize_t memcg_write_event_control(struct 
kernfs_open_file *of,
+        * automatically removed on cgroup destruction but the removal is
+        * asynchronous, so take an extra ref on @css.
+        */
+-      cfile_css = 
css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
++      cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
+                                              &memory_cgrp_subsys);
+       ret = -EINVAL;
+       if (IS_ERR(cfile_css))
+diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
+index 03c33c93a582b..205fdbb5792a9 100644
+--- a/mm/mmu_gather.c
++++ b/mm/mmu_gather.c
+@@ -139,7 +139,7 @@ static void tlb_remove_table_smp_sync(void *arg)
+       /* Simply deliver the interrupt */
+ }
+ 
+-static void tlb_remove_table_sync_one(void)
++void tlb_remove_table_sync_one(void)
+ {
+       /*
+        * This isn't an RCU grace period and hence the page-tables cannot be
+@@ -163,8 +163,6 @@ static void tlb_remove_table_free(struct mmu_table_batch 
*batch)
+ 
+ #else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */
+ 
+-static void tlb_remove_table_sync_one(void) { }
+-
+ static void tlb_remove_table_free(struct mmu_table_batch *batch)
+ {
+       __tlb_remove_table_free(batch);
+diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
+index deb66635f0f3b..e070a0b8e5ca3 100644
+--- a/net/9p/trans_fd.c
++++ b/net/9p/trans_fd.c
+@@ -118,7 +118,7 @@ struct p9_conn {
+       struct list_head unsent_req_list;
+       struct p9_req_t *rreq;
+       struct p9_req_t *wreq;
+-      char tmp_buf[7];
++      char tmp_buf[P9_HDRSZ];
+       struct p9_fcall rc;
+       int wpos;
+       int wsize;
+@@ -291,7 +291,7 @@ static void p9_read_work(struct work_struct *work)
+       if (!m->rc.sdata) {
+               m->rc.sdata = m->tmp_buf;
+               m->rc.offset = 0;
+-              m->rc.capacity = 7; /* start by reading header */
++              m->rc.capacity = P9_HDRSZ; /* start by reading header */
+       }
+ 
+       clear_bit(Rpending, &m->wsched);
+@@ -314,7 +314,7 @@ static void p9_read_work(struct work_struct *work)
+               p9_debug(P9_DEBUG_TRANS, "got new header\n");
+ 
+               /* Header size */
+-              m->rc.size = 7;
++              m->rc.size = P9_HDRSZ;
+               err = p9_parse_header(&m->rc, &m->rc.size, NULL, NULL, 0);
+               if (err) {
+                       p9_debug(P9_DEBUG_ERROR,
+diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
+index 432ac5a16f2e0..6c8a33f98f093 100644
+--- a/net/9p/trans_xen.c
++++ b/net/9p/trans_xen.c
+@@ -231,6 +231,14 @@ static void p9_xen_response(struct work_struct *work)
+                       continue;
+               }
+ 
++              if (h.size > req->rc.capacity) {
++                      dev_warn(&priv->dev->dev,
++                               "requested packet size too big: %d for tag %d 
with capacity %zd\n",
++                               h.size, h.tag, req->rc.capacity);
++                      req->status = REQ_STATUS_ERROR;
++                      goto recv_error;
++              }
++
+               memcpy(&req->rc, &h, sizeof(h));
+               req->rc.offset = 0;
+ 
+@@ -240,6 +248,7 @@ static void p9_xen_response(struct work_struct *work)
+                                    masked_prod, &masked_cons,
+                                    XEN_9PFS_RING_SIZE(ring));
+ 
++recv_error:
+               virt_mb();
+               cons += h.size;
+               ring->intf->in_cons = cons;
+diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
+index cff4944d5b663..7601ce9143c18 100644
+--- a/net/bluetooth/6lowpan.c
++++ b/net/bluetooth/6lowpan.c
+@@ -1010,6 +1010,7 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 
*addr_type,
+       hci_dev_lock(hdev);
+       hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type);
+       hci_dev_unlock(hdev);
++      hci_dev_put(hdev);
+ 
+       if (!hcon)
+               return -ENOENT;
+diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
+index 4ef6a54403aa2..2f87f57e7a4fd 100644
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -736,7 +736,7 @@ static int __init bt_init(void)
+ 
+       err = bt_sysfs_init();
+       if (err < 0)
+-              return err;
++              goto cleanup_led;
+ 
+       err = sock_register(&bt_sock_family_ops);
+       if (err)
+@@ -772,6 +772,8 @@ unregister_socket:
+       sock_unregister(PF_BLUETOOTH);
+ cleanup_sysfs:
+       bt_sysfs_cleanup();
++cleanup_led:
++      bt_leds_cleanup();
+       return err;
+ }
+ 
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 866eb22432de2..f8aab38ab5953 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -3799,7 +3799,8 @@ int hci_register_dev(struct hci_dev *hdev)
+       hci_sock_dev_event(hdev, HCI_DEV_REG);
+       hci_dev_hold(hdev);
+ 
+-      if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
++      if (!hdev->suspend_notifier.notifier_call &&
++          !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
+               hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
+               error = register_pm_notifier(&hdev->suspend_notifier);
+               if (error)
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index cf554e8555214..79f24c6f43c8c 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -680,7 +680,7 @@ static int can_rcv(struct sk_buff *skb, struct net_device 
*dev,
+ {
+       struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ 
+-      if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU)) {
++      if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || 
skb->len != CAN_MTU)) {
+               pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type 
%d, len %d\n",
+                            dev->type, skb->len);
+               goto free_skb;
+@@ -706,7 +706,7 @@ static int canfd_rcv(struct sk_buff *skb, struct 
net_device *dev,
+ {
+       struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ 
+-      if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU)) {
++      if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || 
skb->len != CANFD_MTU)) {
+               pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev 
type %d, len %d\n",
+                            dev->type, skb->len);
+               goto free_skb;
+diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
+index 4820dbcedfa2d..230ddf45dff0d 100644
+--- a/net/dsa/tag_ksz.c
++++ b/net/dsa/tag_ksz.c
+@@ -22,7 +22,8 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
+       if (!skb->dev)
+               return NULL;
+ 
+-      pskb_trim_rcsum(skb, skb->len - len);
++      if (pskb_trim_rcsum(skb, skb->len - len))
++              return NULL;
+ 
+       skb->offload_fwd_mark = true;
+ 
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index af8a4255cf1ba..5f786ef662ead 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -830,6 +830,9 @@ static int rtm_to_fib_config(struct net *net, struct 
sk_buff *skb,
+               return -EINVAL;
+       }
+ 
++      if (!cfg->fc_table)
++              cfg->fc_table = RT_TABLE_MAIN;
++
+       return 0;
+ errout:
+       return err;
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index 52ec0c43e6b81..ab9fcc6231b86 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -423,6 +423,7 @@ static struct fib_info *fib_find_info(struct fib_info *nfi)
+                   nfi->fib_prefsrc == fi->fib_prefsrc &&
+                   nfi->fib_priority == fi->fib_priority &&
+                   nfi->fib_type == fi->fib_type &&
++                  nfi->fib_tb_id == fi->fib_tb_id &&
+                   memcmp(nfi->fib_metrics, fi->fib_metrics,
+                          sizeof(u32) * RTAX_MAX) == 0 &&
+                   !((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_COMPARE_MASK) &&
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index 6ab5c50aa7a87..65ead8a749337 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -1493,24 +1493,6 @@ static int ipgre_fill_info(struct sk_buff *skb, const 
struct net_device *dev)
+       struct ip_tunnel_parm *p = &t->parms;
+       __be16 o_flags = p->o_flags;
+ 
+-      if (t->erspan_ver <= 2) {
+-              if (t->erspan_ver != 0 && !t->collect_md)
+-                      o_flags |= TUNNEL_KEY;
+-
+-              if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
+-                      goto nla_put_failure;
+-
+-              if (t->erspan_ver == 1) {
+-                      if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
+-                              goto nla_put_failure;
+-              } else if (t->erspan_ver == 2) {
+-                      if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
+-                              goto nla_put_failure;
+-                      if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
+-                              goto nla_put_failure;
+-              }
+-      }
+-
+       if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
+           nla_put_be16(skb, IFLA_GRE_IFLAGS,
+                        gre_tnl_flags_to_gre_flags(p->i_flags)) ||
+@@ -1551,6 +1533,34 @@ nla_put_failure:
+       return -EMSGSIZE;
+ }
+ 
++static int erspan_fill_info(struct sk_buff *skb, const struct net_device *dev)
++{
++      struct ip_tunnel *t = netdev_priv(dev);
++
++      if (t->erspan_ver <= 2) {
++              if (t->erspan_ver != 0 && !t->collect_md)
++                      t->parms.o_flags |= TUNNEL_KEY;
++
++              if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
++                      goto nla_put_failure;
++
++              if (t->erspan_ver == 1) {
++                      if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
++                              goto nla_put_failure;
++              } else if (t->erspan_ver == 2) {
++                      if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
++                              goto nla_put_failure;
++                      if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
++                              goto nla_put_failure;
++              }
++      }
++
++      return ipgre_fill_info(skb, dev);
++
++nla_put_failure:
++      return -EMSGSIZE;
++}
++
+ static void erspan_setup(struct net_device *dev)
+ {
+       struct ip_tunnel *t = netdev_priv(dev);
+@@ -1629,7 +1639,7 @@ static struct rtnl_link_ops erspan_link_ops 
__read_mostly = {
+       .changelink     = erspan_changelink,
+       .dellink        = ip_tunnel_dellink,
+       .get_size       = ipgre_get_size,
+-      .fill_info      = ipgre_fill_info,
++      .fill_info      = erspan_fill_info,
+       .get_link_net   = ip_tunnel_get_link_net,
+ };
+ 
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index fadad8e83521d..e427f5040a08e 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -919,6 +919,9 @@ int ip6_fragment(struct net *net, struct sock *sk, struct 
sk_buff *skb,
+               if (err < 0)
+                       goto fail;
+ 
++              /* We prevent @rt from being freed. */
++              rcu_read_lock();
++
+               for (;;) {
+                       /* Prepare header of the next frame,
+                        * before previous one went down. */
+@@ -942,6 +945,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct 
sk_buff *skb,
+               if (err == 0) {
+                       IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
+                                     IPSTATS_MIB_FRAGOKS);
++                      rcu_read_unlock();
+                       return 0;
+               }
+ 
+@@ -949,6 +953,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct 
sk_buff *skb,
+ 
+               IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
+                             IPSTATS_MIB_FRAGFAILS);
++              rcu_read_unlock();
+               return err;
+ 
+ slow_path_clean:
+diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
+index 1cf5ac09edcbc..a08240fe68a74 100644
+--- a/net/mac802154/iface.c
++++ b/net/mac802154/iface.c
+@@ -661,6 +661,7 @@ ieee802154_if_add(struct ieee802154_local *local, const 
char *name,
+       sdata->dev = ndev;
+       sdata->wpan_dev.wpan_phy = local->hw.phy;
+       sdata->local = local;
++      INIT_LIST_HEAD(&sdata->wpan_dev.list);
+ 
+       /* setup type-dependent data */
+       ret = ieee802154_setup_sdata(sdata, type);
+diff --git a/net/netfilter/nf_conntrack_netlink.c 
b/net/netfilter/nf_conntrack_netlink.c
+index c402283e7545b..2efdc50f978b0 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -317,8 +317,13 @@ nla_put_failure:
+ }
+ 
+ #ifdef CONFIG_NF_CONNTRACK_MARK
+-static int ctnetlink_dump_mark(struct sk_buff *skb, u32 mark)
++static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
+ {
++      u32 mark = READ_ONCE(ct->mark);
++
++      if (!mark)
++              return 0;
++
+       if (nla_put_be32(skb, CTA_MARK, htonl(mark)))
+               goto nla_put_failure;
+       return 0;
+@@ -532,7 +537,7 @@ static int ctnetlink_dump_extinfo(struct sk_buff *skb,
+ static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
+ {
+       if (ctnetlink_dump_status(skb, ct) < 0 ||
+-          ctnetlink_dump_mark(skb, READ_ONCE(ct->mark)) < 0 ||
++          ctnetlink_dump_mark(skb, ct) < 0 ||
+           ctnetlink_dump_secctx(skb, ct) < 0 ||
+           ctnetlink_dump_id(skb, ct) < 0 ||
+           ctnetlink_dump_use(skb, ct) < 0 ||
+@@ -711,7 +716,6 @@ ctnetlink_conntrack_event(unsigned int events, struct 
nf_ct_event *item)
+       struct sk_buff *skb;
+       unsigned int type;
+       unsigned int flags = 0, group;
+-      u32 mark;
+       int err;
+ 
+       if (events & (1 << IPCT_DESTROY)) {
+@@ -812,9 +816,8 @@ ctnetlink_conntrack_event(unsigned int events, struct 
nf_ct_event *item)
+       }
+ 
+ #ifdef CONFIG_NF_CONNTRACK_MARK
+-      mark = READ_ONCE(ct->mark);
+-      if ((events & (1 << IPCT_MARK) || mark) &&
+-          ctnetlink_dump_mark(skb, mark) < 0)
++      if (events & (1 << IPCT_MARK) &&
++          ctnetlink_dump_mark(skb, ct) < 0)
+               goto nla_put_failure;
+ #endif
+       nlmsg_end(skb, nlh);
+@@ -2671,7 +2674,6 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, 
struct nf_conn *ct)
+ {
+       const struct nf_conntrack_zone *zone;
+       struct nlattr *nest_parms;
+-      u32 mark;
+ 
+       zone = nf_ct_zone(ct);
+ 
+@@ -2729,8 +2731,7 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, 
struct nf_conn *ct)
+               goto nla_put_failure;
+ 
+ #ifdef CONFIG_NF_CONNTRACK_MARK
+-      mark = READ_ONCE(ct->mark);
+-      if (mark && ctnetlink_dump_mark(skb, mark) < 0)
++      if (ctnetlink_dump_mark(skb, ct) < 0)
+               goto nla_put_failure;
+ #endif
+       if (ctnetlink_dump_labels(skb, ct) < 0)
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 949da87dbb063..30cf0673d6c19 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -1162,6 +1162,7 @@ static int nft_pipapo_insert(const struct net *net, 
const struct nft_set *set,
+       struct nft_pipapo_match *m = priv->clone;
+       u8 genmask = nft_genmask_next(net);
+       struct nft_pipapo_field *f;
++      const u8 *start_p, *end_p;
+       int i, bsize_max, err = 0;
+ 
+       if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END))
+@@ -1202,9 +1203,9 @@ static int nft_pipapo_insert(const struct net *net, 
const struct nft_set *set,
+       }
+ 
+       /* Validate */
++      start_p = start;
++      end_p = end;
+       nft_pipapo_for_each_field(f, i, m) {
+-              const u8 *start_p = start, *end_p = end;
+-
+               if (f->rules >= (unsigned long)NFT_PIPAPO_RULE0_MAX)
+                       return -ENOSPC;
+ 
+diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
+index 33e1170817f0f..f8b20cddd5c96 100644
+--- a/net/nfc/nci/ntf.c
++++ b/net/nfc/nci/ntf.c
+@@ -218,6 +218,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
+               target->sens_res = nfca_poll->sens_res;
+               target->sel_res = nfca_poll->sel_res;
+               target->nfcid1_len = nfca_poll->nfcid1_len;
++              if (target->nfcid1_len > ARRAY_SIZE(target->nfcid1))
++                      return -EPROTO;
+               if (target->nfcid1_len > 0) {
+                       memcpy(target->nfcid1, nfca_poll->nfcid1,
+                              target->nfcid1_len);
+@@ -226,6 +228,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
+               nfcb_poll = (struct rf_tech_specific_params_nfcb_poll *)params;
+ 
+               target->sensb_res_len = nfcb_poll->sensb_res_len;
++              if (target->sensb_res_len > ARRAY_SIZE(target->sensb_res))
++                      return -EPROTO;
+               if (target->sensb_res_len > 0) {
+                       memcpy(target->sensb_res, nfcb_poll->sensb_res,
+                              target->sensb_res_len);
+@@ -234,6 +238,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
+               nfcf_poll = (struct rf_tech_specific_params_nfcf_poll *)params;
+ 
+               target->sensf_res_len = nfcf_poll->sensf_res_len;
++              if (target->sensf_res_len > ARRAY_SIZE(target->sensf_res))
++                      return -EPROTO;
+               if (target->sensf_res_len > 0) {
+                       memcpy(target->sensf_res, nfcf_poll->sensf_res,
+                              target->sensf_res_len);
+diff --git a/net/tipc/link.c b/net/tipc/link.c
+index 064fdb8e50e19..c1e56d1f21b38 100644
+--- a/net/tipc/link.c
++++ b/net/tipc/link.c
+@@ -2188,7 +2188,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, 
struct sk_buff *skb,
+       if (tipc_own_addr(l->net) > msg_prevnode(hdr))
+               l->net_plane = msg_net_plane(hdr);
+ 
+-      skb_linearize(skb);
++      if (skb_linearize(skb))
++              goto exit;
++
+       hdr = buf_msg(skb);
+       data = msg_data(hdr);
+ 
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index 60059827563ae..7589f2ac6fd04 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -1660,6 +1660,7 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head 
*list,
+       struct tipc_node *n;
+       struct sk_buff_head xmitq;
+       bool node_up = false;
++      struct net *peer_net;
+       int bearer_id;
+       int rc;
+ 
+@@ -1676,18 +1677,23 @@ int tipc_node_xmit(struct net *net, struct 
sk_buff_head *list,
+               return -EHOSTUNREACH;
+       }
+ 
++      rcu_read_lock();
+       tipc_node_read_lock(n);
+       node_up = node_is_up(n);
+-      if (node_up && n->peer_net && check_net(n->peer_net)) {
++      peer_net = n->peer_net;
++      tipc_node_read_unlock(n);
++      if (node_up && peer_net && check_net(peer_net)) {
+               /* xmit inner linux container */
+-              tipc_lxc_xmit(n->peer_net, list);
++              tipc_lxc_xmit(peer_net, list);
+               if (likely(skb_queue_empty(list))) {
+-                      tipc_node_read_unlock(n);
++                      rcu_read_unlock();
+                       tipc_node_put(n);
+                       return 0;
+               }
+       }
++      rcu_read_unlock();
+ 
++      tipc_node_read_lock(n);
+       bearer_id = n->active_links[selector & 1];
+       if (unlikely(bearer_id == INVALID_BEARER_ID)) {
+               tipc_node_read_unlock(n);
+diff --git a/net/unix/diag.c b/net/unix/diag.c
+index 9ff64f9df1f3b..951b33fa8f5cf 100644
+--- a/net/unix/diag.c
++++ b/net/unix/diag.c
+@@ -113,14 +113,16 @@ static int sk_diag_show_rqlen(struct sock *sk, struct 
sk_buff *nlskb)
+       return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql);
+ }
+ 
+-static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb)
++static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb,
++                          struct user_namespace *user_ns)
+ {
+-      uid_t uid = from_kuid_munged(sk_user_ns(nlskb->sk), sock_i_uid(sk));
++      uid_t uid = from_kuid_munged(user_ns, sock_i_uid(sk));
+       return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid);
+ }
+ 
+ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct 
unix_diag_req *req,
+-              u32 portid, u32 seq, u32 flags, int sk_ino)
++                      struct user_namespace *user_ns,
++                      u32 portid, u32 seq, u32 flags, int sk_ino)
+ {
+       struct nlmsghdr *nlh;
+       struct unix_diag_msg *rep;
+@@ -166,7 +168,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff 
*skb, struct unix_diag_r
+               goto out_nlmsg_trim;
+ 
+       if ((req->udiag_show & UDIAG_SHOW_UID) &&
+-          sk_diag_dump_uid(sk, skb))
++          sk_diag_dump_uid(sk, skb, user_ns))
+               goto out_nlmsg_trim;
+ 
+       nlmsg_end(skb, nlh);
+@@ -178,7 +180,8 @@ out_nlmsg_trim:
+ }
+ 
+ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct 
unix_diag_req *req,
+-              u32 portid, u32 seq, u32 flags)
++                      struct user_namespace *user_ns,
++                      u32 portid, u32 seq, u32 flags)
+ {
+       int sk_ino;
+ 
+@@ -189,7 +192,7 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff 
*skb, struct unix_diag_r
+       if (!sk_ino)
+               return 0;
+ 
+-      return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino);
++      return sk_diag_fill(sk, skb, req, user_ns, portid, seq, flags, sk_ino);
+ }
+ 
+ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+@@ -217,7 +220,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct 
netlink_callback *cb)
+                               goto next;
+                       if (!(req->udiag_states & (1 << sk->sk_state)))
+                               goto next;
+-                      if (sk_diag_dump(sk, skb, req,
++                      if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
+                                        NETLINK_CB(cb->skb).portid,
+                                        cb->nlh->nlmsg_seq,
+                                        NLM_F_MULTI) < 0)
+@@ -285,7 +288,8 @@ again:
+       if (!rep)
+               goto out;
+ 
+-      err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid,
++      err = sk_diag_fill(sk, rep, req, sk_user_ns(NETLINK_CB(in_skb).sk),
++                         NETLINK_CB(in_skb).portid,
+                          nlh->nlmsg_seq, 0, req->udiag_ino);
+       if (err < 0) {
+               nlmsg_free(rep);
+diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
+index 65db1a7c77b76..bb76a2dd0a2ff 100644
+--- a/sound/core/seq/seq_memory.c
++++ b/sound/core/seq/seq_memory.c
+@@ -112,15 +112,19 @@ EXPORT_SYMBOL(snd_seq_dump_var_event);
+  * expand the variable length event to linear buffer space.
+  */
+ 
+-static int seq_copy_in_kernel(char **bufptr, const void *src, int size)
++static int seq_copy_in_kernel(void *ptr, void *src, int size)
+ {
++      char **bufptr = ptr;
++
+       memcpy(*bufptr, src, size);
+       *bufptr += size;
+       return 0;
+ }
+ 
+-static int seq_copy_in_user(char __user **bufptr, const void *src, int size)
++static int seq_copy_in_user(void *ptr, void *src, int size)
+ {
++      char __user **bufptr = ptr;
++
+       if (copy_to_user(*bufptr, src, size))
+               return -EFAULT;
+       *bufptr += size;
+@@ -149,8 +153,7 @@ int snd_seq_expand_var_event(const struct snd_seq_event 
*event, int count, char
+               return newlen;
+       }
+       err = snd_seq_dump_var_event(event,
+-                                   in_kernel ? 
(snd_seq_dump_func_t)seq_copy_in_kernel :
+-                                   (snd_seq_dump_func_t)seq_copy_in_user,
++                                   in_kernel ? seq_copy_in_kernel : 
seq_copy_in_user,
+                                    &buf);
+       return err < 0 ? err : newlen;
+ }
+diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
+index 21574447650cd..57aeded978c28 100644
+--- a/sound/soc/codecs/wm8962.c
++++ b/sound/soc/codecs/wm8962.c
+@@ -2489,6 +2489,14 @@ static void wm8962_configure_bclk(struct 
snd_soc_component *component)
+               snd_soc_component_update_bits(component, WM8962_CLOCKING2,
+                               WM8962_SYSCLK_ENA_MASK, WM8962_SYSCLK_ENA);
+ 
++      /* DSPCLK_DIV field in WM8962_CLOCKING1 register is used to generate
++       * correct frequency of LRCLK and BCLK. Sometimes the read-only value
++       * can't be updated timely after enabling SYSCLK. This results in wrong
++       * calculation values. Delay is introduced here to wait for newest
++       * value from register. The time of the delay should be at least
++       * 500~1000us according to test.
++       */
++      usleep_range(500, 1000);
+       dspclk = snd_soc_component_read(component, WM8962_CLOCKING1);
+ 
+       if (snd_soc_component_get_bias_level(component) != SND_SOC_BIAS_ON)
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 0e2261ee07b67..fb874f924bbe3 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -1154,6 +1154,8 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime 
*fe,
+               return;
+ 
+       be_substream = snd_soc_dpcm_get_substream(be, stream);
++      if (!be_substream)
++              return;
+ 
+       for_each_dpcm_fe(be, stream, dpcm) {
+               if (dpcm->fe == fe)
+diff --git a/tools/testing/selftests/net/fib_tests.sh 
b/tools/testing/selftests/net/fib_tests.sh
+index a7f53c2a9580a..0f3bf90e04d36 100755
+--- a/tools/testing/selftests/net/fib_tests.sh
++++ b/tools/testing/selftests/net/fib_tests.sh
+@@ -1622,13 +1622,21 @@ ipv4_del_addr_test()
+ 
+       $IP addr add dev dummy1 172.16.104.1/24
+       $IP addr add dev dummy1 172.16.104.11/24
++      $IP addr add dev dummy1 172.16.104.12/24
++      $IP addr add dev dummy1 172.16.104.13/24
+       $IP addr add dev dummy2 172.16.104.1/24
+       $IP addr add dev dummy2 172.16.104.11/24
++      $IP addr add dev dummy2 172.16.104.12/24
+       $IP route add 172.16.105.0/24 via 172.16.104.2 src 172.16.104.11
++      $IP route add 172.16.106.0/24 dev lo src 172.16.104.12
++      $IP route add table 0 172.16.107.0/24 via 172.16.104.2 src 172.16.104.13
+       $IP route add vrf red 172.16.105.0/24 via 172.16.104.2 src 172.16.104.11
++      $IP route add vrf red 172.16.106.0/24 dev lo src 172.16.104.12
+       set +e
+ 
+       # removing address from device in vrf should only remove route from vrf 
table
++      echo "    Regular FIB info"
++
+       $IP addr del dev dummy2 172.16.104.11/24
+       $IP ro ls vrf red | grep -q 172.16.105.0/24
+       log_test $? 1 "Route removed from VRF when source address deleted"
+@@ -1646,6 +1654,35 @@ ipv4_del_addr_test()
+       $IP ro ls vrf red | grep -q 172.16.105.0/24
+       log_test $? 0 "Route in VRF is not removed by address delete"
+ 
++      # removing address from device in vrf should only remove route from vrf
++      # table even when the associated fib info only differs in table ID
++      echo "    Identical FIB info with different table ID"
++
++      $IP addr del dev dummy2 172.16.104.12/24
++      $IP ro ls vrf red | grep -q 172.16.106.0/24
++      log_test $? 1 "Route removed from VRF when source address deleted"
++
++      $IP ro ls | grep -q 172.16.106.0/24
++      log_test $? 0 "Route in default VRF not removed"
++
++      $IP addr add dev dummy2 172.16.104.12/24
++      $IP route add vrf red 172.16.106.0/24 dev lo src 172.16.104.12
++
++      $IP addr del dev dummy1 172.16.104.12/24
++      $IP ro ls | grep -q 172.16.106.0/24
++      log_test $? 1 "Route removed in default VRF when source address deleted"
++
++      $IP ro ls vrf red | grep -q 172.16.106.0/24
++      log_test $? 0 "Route in VRF is not removed by address delete"
++
++      # removing address from device in default vrf should remove route from
++      # the default vrf even when route was inserted with a table ID of 0.
++      echo "    Table ID 0"
++
++      $IP addr del dev dummy1 172.16.104.13/24
++      $IP ro ls | grep -q 172.16.107.0/24
++      log_test $? 1 "Route removed in default VRF when source address deleted"
++
+       $IP li del dummy1
+       $IP li del dummy2
+       cleanup
+diff --git a/tools/testing/selftests/net/rtnetlink.sh 
b/tools/testing/selftests/net/rtnetlink.sh
+index c9ce3dfa42ee7..c3a905923ef29 100755
+--- a/tools/testing/selftests/net/rtnetlink.sh
++++ b/tools/testing/selftests/net/rtnetlink.sh
+@@ -782,7 +782,7 @@ kci_test_ipsec_offload()
+           tmpl proto esp src $srcip dst $dstip spi 9 \
+           mode transport reqid 42
+       check_err $?
+-      ip x p add dir out src $dstip/24 dst $srcip/24 \
++      ip x p add dir in src $dstip/24 dst $srcip/24 \
+           tmpl proto esp src $dstip dst $srcip spi 9 \
+           mode transport reqid 42
+       check_err $?

Reply via email to