commit:     4e25534a85d84d184b0dc1e699be423fd9d9b85b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Dec  1 12:50:44 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Dec  1 12:50:44 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4e25534a

Linux patch 4.19.219

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1218_linux-4.19.219.patch | 3456 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3460 insertions(+)

diff --git a/0000_README b/0000_README
index 2c528d2c..c2e992ef 100644
--- a/0000_README
+++ b/0000_README
@@ -911,6 +911,10 @@ Patch:  1217_linux-4.19.218.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.19.218
 
+Patch:  1218_linux-4.19.219.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.19.219
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1218_linux-4.19.219.patch b/1218_linux-4.19.219.patch
new file mode 100644
index 00000000..a394c820
--- /dev/null
+++ b/1218_linux-4.19.219.patch
@@ -0,0 +1,3456 @@
+diff --git 
a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt 
b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
+index c7c088d2dd503..fb8ec9b0f8c70 100644
+--- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
++++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
+@@ -43,26 +43,26 @@ group emmc_nb
+ 
+ group pwm0
+  - pin 11 (GPIO1-11)
+- - functions pwm, gpio
++ - functions pwm, led, gpio
+ 
+ group pwm1
+  - pin 12
+- - functions pwm, gpio
++ - functions pwm, led, gpio
+ 
+ group pwm2
+  - pin 13
+- - functions pwm, gpio
++ - functions pwm, led, gpio
+ 
+ group pwm3
+  - pin 14
+- - functions pwm, gpio
++ - functions pwm, led, gpio
+ 
+ group pmic1
+- - pin 17
++ - pin 7
+  - functions pmic, gpio
+ 
+ group pmic0
+- - pin 16
++ - pin 6
+  - functions pmic, gpio
+ 
+ group i2c2
+@@ -112,17 +112,25 @@ group usb2_drvvbus1
+  - functions drvbus, gpio
+ 
+ group sdio_sb
+- - pins 60-64
++ - pins 60-65
+  - functions sdio, gpio
+ 
+ group rgmii
+- - pins 42-55
++ - pins 42-53
+  - functions mii, gpio
+ 
+ group pcie1
+- - pins 39-40
++ - pins 39
++ - functions pcie, gpio
++
++group pcie1_clkreq
++ - pins 40
+  - functions pcie, gpio
+ 
++group smi
++ - pins 54-55
++ - functions smi, gpio
++
+ group ptp
+  - pins 56-58
+  - functions ptp, gpio
+diff --git a/Documentation/networking/ipvs-sysctl.txt 
b/Documentation/networking/ipvs-sysctl.txt
+index 056898685d408..fc531c29a2e83 100644
+--- a/Documentation/networking/ipvs-sysctl.txt
++++ b/Documentation/networking/ipvs-sysctl.txt
+@@ -30,8 +30,7 @@ conn_reuse_mode - INTEGER
+ 
+       0: disable any special handling on port reuse. The new
+       connection will be delivered to the same real server that was
+-      servicing the previous connection. This will effectively
+-      disable expire_nodest_conn.
++      servicing the previous connection.
+ 
+       bit 1: enable rescheduling of new connections when it is safe.
+       That is, whenever expire_nodest_conn and for TCP sockets, when
+diff --git a/Makefile b/Makefile
+index 455ba411998f9..310cc8508b9e8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 218
++SUBLEVEL = 219
+ EXTRAVERSION =
+ NAME = "People's Front"
+ 
+diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
+index fa3422c4caec1..6edc4bd1e7eaf 100644
+--- a/arch/arm/boot/dts/bcm5301x.dtsi
++++ b/arch/arm/boot/dts/bcm5301x.dtsi
+@@ -239,6 +239,8 @@
+ 
+                       gpio-controller;
+                       #gpio-cells = <2>;
++                      interrupt-controller;
++                      #interrupt-cells = <2>;
+               };
+ 
+               pcie0: pcie@12000 {
+@@ -384,7 +386,7 @@
+       i2c0: i2c@18009000 {
+               compatible = "brcm,iproc-i2c";
+               reg = <0x18009000 0x50>;
+-              interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
++              interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               clock-frequency = <100000>;
+diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
+index f854148c8d7c2..00baa13c158d7 100644
+--- a/arch/arm/include/asm/tlb.h
++++ b/arch/arm/include/asm/tlb.h
+@@ -280,6 +280,14 @@ tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t 
*pmdp, unsigned long addr
+       tlb_add_flush(tlb, addr);
+ }
+ 
++static inline void
++tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
++                  unsigned long size)
++{
++      tlb_add_flush(tlb, address);
++      tlb_add_flush(tlb, address + size - PMD_SIZE);
++}
++
+ #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
+ #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
+ #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
+diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h
+index 65e1817d8afe6..692a287a8712d 100644
+--- a/arch/arm/mach-socfpga/core.h
++++ b/arch/arm/mach-socfpga/core.h
+@@ -48,7 +48,7 @@ extern void __iomem *sdr_ctl_base_addr;
+ u32 socfpga_sdram_self_refresh(u32 sdr_base);
+ extern unsigned int socfpga_sdram_self_refresh_sz;
+ 
+-extern char secondary_trampoline, secondary_trampoline_end;
++extern char secondary_trampoline[], secondary_trampoline_end[];
+ 
+ extern unsigned long socfpga_cpu1start_addr;
+ 
+diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c
+index 0ee76772b5074..a272999ce04b9 100644
+--- a/arch/arm/mach-socfpga/platsmp.c
++++ b/arch/arm/mach-socfpga/platsmp.c
+@@ -31,14 +31,14 @@
+ 
+ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ {
+-      int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
++      int trampoline_size = secondary_trampoline_end - secondary_trampoline;
+ 
+       if (socfpga_cpu1start_addr) {
+               /* This will put CPU #1 into reset. */
+               writel(RSTMGR_MPUMODRST_CPU1,
+                      rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST);
+ 
+-              memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
++              memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
+ 
+               writel(__pa_symbol(secondary_startup),
+                      sys_manager_base_addr + (socfpga_cpu1start_addr & 
0x000000ff));
+@@ -56,12 +56,12 @@ static int socfpga_boot_secondary(unsigned int cpu, struct 
task_struct *idle)
+ 
+ static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct 
*idle)
+ {
+-      int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
++      int trampoline_size = secondary_trampoline_end - secondary_trampoline;
+ 
+       if (socfpga_cpu1start_addr) {
+               writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr +
+                      SOCFPGA_A10_RSTMGR_MODMPURST);
+-              memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
++              memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
+ 
+               writel(__pa_symbol(secondary_startup),
+                      sys_manager_base_addr + (socfpga_cpu1start_addr & 
0x00000fff));
+diff --git a/arch/arm64/boot/dts/marvell/armada-3720-db.dts 
b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
+index f2cc00594d64a..3e5789f372069 100644
+--- a/arch/arm64/boot/dts/marvell/armada-3720-db.dts
++++ b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
+@@ -128,6 +128,9 @@
+ 
+ /* CON15(V2.0)/CON17(V1.4) : PCIe / CON15(V2.0)/CON12(V1.4) :mini-PCIe */
+ &pcie0 {
++      pinctrl-names = "default";
++      pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
++      reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
+       status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts 
b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
+index 1a3e6e3b04eba..f360891982434 100644
+--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
++++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
+@@ -55,6 +55,9 @@
+ 
+ /* J9 */
+ &pcie0 {
++      pinctrl-names = "default";
++      pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
++      reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
+       status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi 
b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+index fca78eb334b19..7500be1a11a3c 100644
+--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+@@ -254,6 +254,15 @@
+                                       function = "mii";
+                               };
+ 
++                              pcie_reset_pins: pcie-reset-pins {
++                                      groups = "pcie1";
++                                      function = "gpio";
++                              };
++
++                              pcie_clkreq_pins: pcie-clkreq-pins {
++                                      groups = "pcie1_clkreq";
++                                      function = "pcie";
++                              };
+                       };
+ 
+                       eth0: ethernet@30000 {
+diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
+index 516355a774bfe..5d032d97c254e 100644
+--- a/arch/ia64/include/asm/tlb.h
++++ b/arch/ia64/include/asm/tlb.h
+@@ -268,6 +268,16 @@ __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t 
*ptep, unsigned long addre
+       tlb->end_addr = address + PAGE_SIZE;
+ }
+ 
++static inline void
++tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
++                  unsigned long size)
++{
++      if (tlb->start_addr > address)
++              tlb->start_addr = address;
++      if (tlb->end_addr < address + size)
++              tlb->end_addr = address + size;
++}
++
+ #define tlb_migrate_finish(mm)        platform_tlb_migrate_finish(mm)
+ 
+ #define tlb_start_vma(tlb, vma)                       do { } while (0)
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index e513528be3ad7..8a227a80f6bd5 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -2991,7 +2991,7 @@ config HAVE_LATENCYTOP_SUPPORT
+ config PGTABLE_LEVELS
+       int
+       default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48
+-      default 3 if 64BIT && !PAGE_SIZE_64KB
++      default 3 if 64BIT && (!PAGE_SIZE_64KB || MIPS_VA_BITS_48)
+       default 2
+ 
+ config MIPS_AUTO_PFN_OFFSET
+diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
+index b31c779cf5817..1df28a8e2f19e 100644
+--- a/arch/s390/include/asm/tlb.h
++++ b/arch/s390/include/asm/tlb.h
+@@ -116,6 +116,20 @@ static inline void tlb_remove_page_size(struct mmu_gather 
*tlb,
+       return tlb_remove_page(tlb, page);
+ }
+ 
++static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
++                              unsigned long address, unsigned long size)
++{
++      /*
++       * the range might exceed the original range that was provided to
++       * tlb_gather_mmu(), so we need to update it despite the fact it is
++       * usually not updated.
++       */
++      if (tlb->start > address)
++              tlb->start = address;
++      if (tlb->end < address + size)
++              tlb->end = address + size;
++}
++
+ /*
+  * pte_free_tlb frees a pte table and clears the CRSTE for the
+  * page table from the tlb.
+@@ -177,6 +191,8 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, 
pud_t *pud,
+ #define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
+ #define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr)     do { } while (0)
+ #define tlb_migrate_finish(mm)                        do { } while (0)
++#define tlb_flush_pmd_range(tlb, addr, sz)    do { } while (0)
++
+ #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)      \
+       tlb_remove_tlb_entry(tlb, ptep, address)
+ 
+diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
+index ae894ac83fd61..4354ac6077503 100644
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -970,6 +970,7 @@ EXPORT_SYMBOL(get_guest_storage_key);
+ int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
+                       unsigned long *oldpte, unsigned long *oldpgste)
+ {
++      struct vm_area_struct *vma;
+       unsigned long pgstev;
+       spinlock_t *ptl;
+       pgste_t pgste;
+@@ -979,6 +980,10 @@ int pgste_perform_essa(struct mm_struct *mm, unsigned 
long hva, int orc,
+       WARN_ON_ONCE(orc > ESSA_MAX);
+       if (unlikely(orc > ESSA_MAX))
+               return -EINVAL;
++
++      vma = find_vma(mm, hva);
++      if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
++              return -EFAULT;
+       ptep = get_locked_pte(mm, hva, &ptl);
+       if (unlikely(!ptep))
+               return -EFAULT;
+@@ -1071,10 +1076,14 @@ EXPORT_SYMBOL(pgste_perform_essa);
+ int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
+                       unsigned long bits, unsigned long value)
+ {
++      struct vm_area_struct *vma;
+       spinlock_t *ptl;
+       pgste_t new;
+       pte_t *ptep;
+ 
++      vma = find_vma(mm, hva);
++      if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
++              return -EFAULT;
+       ptep = get_locked_pte(mm, hva, &ptl);
+       if (unlikely(!ptep))
+               return -EFAULT;
+@@ -1099,9 +1108,13 @@ EXPORT_SYMBOL(set_pgste_bits);
+  */
+ int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep)
+ {
++      struct vm_area_struct *vma;
+       spinlock_t *ptl;
+       pte_t *ptep;
+ 
++      vma = find_vma(mm, hva);
++      if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
++              return -EFAULT;
+       ptep = get_locked_pte(mm, hva, &ptl);
+       if (unlikely(!ptep))
+               return -EFAULT;
+diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
+index 77abe192fb43d..adcb0bfe238e3 100644
+--- a/arch/sh/include/asm/tlb.h
++++ b/arch/sh/include/asm/tlb.h
+@@ -127,6 +127,16 @@ static inline void tlb_remove_page_size(struct mmu_gather 
*tlb,
+       return tlb_remove_page(tlb, page);
+ }
+ 
++static inline void
++tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
++                  unsigned long size)
++{
++      if (tlb->start > address)
++              tlb->start = address;
++      if (tlb->end < address + size)
++              tlb->end = address + size;
++}
++
+ #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
+ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
+                                                    unsigned int page_size)
+diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
+index dce6db147f245..02e61f6abfcab 100644
+--- a/arch/um/include/asm/tlb.h
++++ b/arch/um/include/asm/tlb.h
+@@ -130,6 +130,18 @@ static inline void tlb_remove_page_size(struct mmu_gather 
*tlb,
+       return tlb_remove_page(tlb, page);
+ }
+ 
++static inline void
++tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
++                  unsigned long size)
++{
++      tlb->need_flush = 1;
++
++      if (tlb->start > address)
++              tlb->start = address;
++      if (tlb->end < address + size)
++              tlb->end = address + size;
++}
++
+ /**
+  * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
+  *
+diff --git a/arch/xtensa/include/asm/vectors.h 
b/arch/xtensa/include/asm/vectors.h
+index 7111280c88422..2d3e0cca9ba0f 100644
+--- a/arch/xtensa/include/asm/vectors.h
++++ b/arch/xtensa/include/asm/vectors.h
+@@ -31,7 +31,7 @@
+ #endif
+ #define XCHAL_KIO_SIZE                        0x10000000
+ 
+-#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
++#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_USE_OF)
+ #define XCHAL_KIO_PADDR                       xtensa_get_kio_paddr()
+ #ifndef __ASSEMBLY__
+ extern unsigned long xtensa_kio_paddr;
+diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
+index 6a0167ac803c6..901990b8296c1 100644
+--- a/arch/xtensa/kernel/setup.c
++++ b/arch/xtensa/kernel/setup.c
+@@ -65,7 +65,7 @@ int initrd_is_mapped = 0;
+ extern int initrd_below_start_ok;
+ #endif
+ 
+-#ifdef CONFIG_OF
++#ifdef CONFIG_USE_OF
+ void *dtb_start = __dtb_start;
+ #endif
+ 
+@@ -127,7 +127,7 @@ __tagtable(BP_TAG_INITRD, parse_tag_initrd);
+ 
+ #endif /* CONFIG_BLK_DEV_INITRD */
+ 
+-#ifdef CONFIG_OF
++#ifdef CONFIG_USE_OF
+ 
+ static int __init parse_tag_fdt(const bp_tag_t *tag)
+ {
+@@ -137,7 +137,7 @@ static int __init parse_tag_fdt(const bp_tag_t *tag)
+ 
+ __tagtable(BP_TAG_FDT, parse_tag_fdt);
+ 
+-#endif /* CONFIG_OF */
++#endif /* CONFIG_USE_OF */
+ 
+ static int __init parse_tag_cmdline(const bp_tag_t* tag)
+ {
+@@ -185,7 +185,7 @@ static int __init parse_bootparam(const bp_tag_t *tag)
+ }
+ #endif
+ 
+-#ifdef CONFIG_OF
++#ifdef CONFIG_USE_OF
+ 
+ #if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY
+ unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
+@@ -234,7 +234,7 @@ void __init early_init_devtree(void *params)
+               strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+ }
+ 
+-#endif /* CONFIG_OF */
++#endif /* CONFIG_USE_OF */
+ 
+ /*
+  * Initialize architecture. (Early stage)
+@@ -255,7 +255,7 @@ void __init init_arch(bp_tag_t *bp_start)
+       if (bp_start)
+               parse_bootparam(bp_start);
+ 
+-#ifdef CONFIG_OF
++#ifdef CONFIG_USE_OF
+       early_init_devtree(dtb_start);
+ #endif
+ 
+diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
+index 9d1ecfc536708..470843188f2fc 100644
+--- a/arch/xtensa/mm/mmu.c
++++ b/arch/xtensa/mm/mmu.c
+@@ -98,7 +98,7 @@ void init_mmu(void)
+ 
+ void init_kio(void)
+ {
+-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
++#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_USE_OF)
+       /*
+        * Update the IO area mapping in case xtensa_kio_paddr has changed
+        */
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 83be89c8627b9..9229c5c9ad473 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -2966,7 +2966,7 @@ static void binder_transaction(struct binder_proc *proc,
+               t->from = thread;
+       else
+               t->from = NULL;
+-      t->sender_euid = proc->cred->euid;
++      t->sender_euid = task_euid(proc->tsk);
+       t->to_proc = target_proc;
+       t->to_thread = target_thread;
+       t->code = tr->code;
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index 1b06c8e46ffa4..bd756b294d307 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -80,6 +80,7 @@ enum blkif_state {
+       BLKIF_STATE_DISCONNECTED,
+       BLKIF_STATE_CONNECTED,
+       BLKIF_STATE_SUSPENDED,
++      BLKIF_STATE_ERROR,
+ };
+ 
+ struct grant {
+@@ -89,6 +90,7 @@ struct grant {
+ };
+ 
+ enum blk_req_status {
++      REQ_PROCESSING,
+       REQ_WAITING,
+       REQ_DONE,
+       REQ_ERROR,
+@@ -533,10 +535,10 @@ static unsigned long blkif_ring_get_request(struct 
blkfront_ring_info *rinfo,
+ 
+       id = get_id_from_freelist(rinfo);
+       rinfo->shadow[id].request = req;
+-      rinfo->shadow[id].status = REQ_WAITING;
++      rinfo->shadow[id].status = REQ_PROCESSING;
+       rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
+ 
+-      (*ring_req)->u.rw.id = id;
++      rinfo->shadow[id].req.u.rw.id = id;
+ 
+       return id;
+ }
+@@ -544,11 +546,12 @@ static unsigned long blkif_ring_get_request(struct 
blkfront_ring_info *rinfo,
+ static int blkif_queue_discard_req(struct request *req, struct 
blkfront_ring_info *rinfo)
+ {
+       struct blkfront_info *info = rinfo->dev_info;
+-      struct blkif_request *ring_req;
++      struct blkif_request *ring_req, *final_ring_req;
+       unsigned long id;
+ 
+       /* Fill out a communications ring structure. */
+-      id = blkif_ring_get_request(rinfo, req, &ring_req);
++      id = blkif_ring_get_request(rinfo, req, &final_ring_req);
++      ring_req = &rinfo->shadow[id].req;
+ 
+       ring_req->operation = BLKIF_OP_DISCARD;
+       ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
+@@ -559,8 +562,9 @@ static int blkif_queue_discard_req(struct request *req, 
struct blkfront_ring_inf
+       else
+               ring_req->u.discard.flag = 0;
+ 
+-      /* Keep a private copy so we can reissue requests when recovering. */
+-      rinfo->shadow[id].req = *ring_req;
++      /* Copy the request to the ring page. */
++      *final_ring_req = *ring_req;
++      rinfo->shadow[id].status = REQ_WAITING;
+ 
+       return 0;
+ }
+@@ -693,6 +697,7 @@ static int blkif_queue_rw_req(struct request *req, struct 
blkfront_ring_info *ri
+ {
+       struct blkfront_info *info = rinfo->dev_info;
+       struct blkif_request *ring_req, *extra_ring_req = NULL;
++      struct blkif_request *final_ring_req, *final_extra_ring_req = NULL;
+       unsigned long id, extra_id = NO_ASSOCIATED_ID;
+       bool require_extra_req = false;
+       int i;
+@@ -737,7 +742,8 @@ static int blkif_queue_rw_req(struct request *req, struct 
blkfront_ring_info *ri
+       }
+ 
+       /* Fill out a communications ring structure. */
+-      id = blkif_ring_get_request(rinfo, req, &ring_req);
++      id = blkif_ring_get_request(rinfo, req, &final_ring_req);
++      ring_req = &rinfo->shadow[id].req;
+ 
+       num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
+       num_grant = 0;
+@@ -788,7 +794,9 @@ static int blkif_queue_rw_req(struct request *req, struct 
blkfront_ring_info *ri
+               ring_req->u.rw.nr_segments = num_grant;
+               if (unlikely(require_extra_req)) {
+                       extra_id = blkif_ring_get_request(rinfo, req,
+-                                                        &extra_ring_req);
++                                                        
&final_extra_ring_req);
++                      extra_ring_req = &rinfo->shadow[extra_id].req;
++
+                       /*
+                        * Only the first request contains the scatter-gather
+                        * list.
+@@ -830,10 +838,13 @@ static int blkif_queue_rw_req(struct request *req, 
struct blkfront_ring_info *ri
+       if (setup.segments)
+               kunmap_atomic(setup.segments);
+ 
+-      /* Keep a private copy so we can reissue requests when recovering. */
+-      rinfo->shadow[id].req = *ring_req;
+-      if (unlikely(require_extra_req))
+-              rinfo->shadow[extra_id].req = *extra_ring_req;
++      /* Copy request(s) to the ring page. */
++      *final_ring_req = *ring_req;
++      rinfo->shadow[id].status = REQ_WAITING;
++      if (unlikely(require_extra_req)) {
++              *final_extra_ring_req = *extra_ring_req;
++              rinfo->shadow[extra_id].status = REQ_WAITING;
++      }
+ 
+       if (new_persistent_gnts)
+               gnttab_free_grant_references(setup.gref_head);
+@@ -1407,8 +1418,8 @@ static enum blk_req_status blkif_rsp_to_req_status(int 
rsp)
+ static int blkif_get_final_status(enum blk_req_status s1,
+                                 enum blk_req_status s2)
+ {
+-      BUG_ON(s1 == REQ_WAITING);
+-      BUG_ON(s2 == REQ_WAITING);
++      BUG_ON(s1 < REQ_DONE);
++      BUG_ON(s2 < REQ_DONE);
+ 
+       if (s1 == REQ_ERROR || s2 == REQ_ERROR)
+               return BLKIF_RSP_ERROR;
+@@ -1441,7 +1452,7 @@ static bool blkif_completion(unsigned long *id,
+               s->status = blkif_rsp_to_req_status(bret->status);
+ 
+               /* Wait the second response if not yet here. */
+-              if (s2->status == REQ_WAITING)
++              if (s2->status < REQ_DONE)
+                       return false;
+ 
+               bret->status = blkif_get_final_status(s->status,
+@@ -1549,7 +1560,7 @@ static bool blkif_completion(unsigned long *id,
+ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
+ {
+       struct request *req;
+-      struct blkif_response *bret;
++      struct blkif_response bret;
+       RING_IDX i, rp;
+       unsigned long flags;
+       struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
+@@ -1560,54 +1571,76 @@ static irqreturn_t blkif_interrupt(int irq, void 
*dev_id)
+ 
+       spin_lock_irqsave(&rinfo->ring_lock, flags);
+  again:
+-      rp = rinfo->ring.sring->rsp_prod;
+-      rmb(); /* Ensure we see queued responses up to 'rp'. */
++      rp = READ_ONCE(rinfo->ring.sring->rsp_prod);
++      virt_rmb(); /* Ensure we see queued responses up to 'rp'. */
++      if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) {
++              pr_alert("%s: illegal number of responses %u\n",
++                       info->gd->disk_name, rp - rinfo->ring.rsp_cons);
++              goto err;
++      }
+ 
+       for (i = rinfo->ring.rsp_cons; i != rp; i++) {
+               unsigned long id;
++              unsigned int op;
++
++              RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
++              id = bret.id;
+ 
+-              bret = RING_GET_RESPONSE(&rinfo->ring, i);
+-              id   = bret->id;
+               /*
+                * The backend has messed up and given us an id that we would
+                * never have given to it (we stamp it up to BLK_RING_SIZE -
+                * look in get_id_from_freelist.
+                */
+               if (id >= BLK_RING_SIZE(info)) {
+-                      WARN(1, "%s: response to %s has incorrect id (%ld)\n",
+-                           info->gd->disk_name, op_name(bret->operation), id);
+-                      /* We can't safely get the 'struct request' as
+-                       * the id is busted. */
+-                      continue;
++                      pr_alert("%s: response has incorrect id (%ld)\n",
++                               info->gd->disk_name, id);
++                      goto err;
+               }
++              if (rinfo->shadow[id].status != REQ_WAITING) {
++                      pr_alert("%s: response references no pending request\n",
++                               info->gd->disk_name);
++                      goto err;
++              }
++
++              rinfo->shadow[id].status = REQ_PROCESSING;
+               req  = rinfo->shadow[id].request;
+ 
+-              if (bret->operation != BLKIF_OP_DISCARD) {
++              op = rinfo->shadow[id].req.operation;
++              if (op == BLKIF_OP_INDIRECT)
++                      op = rinfo->shadow[id].req.u.indirect.indirect_op;
++              if (bret.operation != op) {
++                      pr_alert("%s: response has wrong operation (%u instead 
of %u)\n",
++                               info->gd->disk_name, bret.operation, op);
++                      goto err;
++              }
++
++              if (bret.operation != BLKIF_OP_DISCARD) {
+                       /*
+                        * We may need to wait for an extra response if the
+                        * I/O request is split in 2
+                        */
+-                      if (!blkif_completion(&id, rinfo, bret))
++                      if (!blkif_completion(&id, rinfo, &bret))
+                               continue;
+               }
+ 
+               if (add_id_to_freelist(rinfo, id)) {
+                       WARN(1, "%s: response to %s (id %ld) couldn't be 
recycled!\n",
+-                           info->gd->disk_name, op_name(bret->operation), id);
++                           info->gd->disk_name, op_name(bret.operation), id);
+                       continue;
+               }
+ 
+-              if (bret->status == BLKIF_RSP_OKAY)
++              if (bret.status == BLKIF_RSP_OKAY)
+                       blkif_req(req)->error = BLK_STS_OK;
+               else
+                       blkif_req(req)->error = BLK_STS_IOERR;
+ 
+-              switch (bret->operation) {
++              switch (bret.operation) {
+               case BLKIF_OP_DISCARD:
+-                      if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
++                      if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
+                               struct request_queue *rq = info->rq;
+-                              printk(KERN_WARNING "blkfront: %s: %s op 
failed\n",
+-                                         info->gd->disk_name, 
op_name(bret->operation));
++
++                              pr_warn_ratelimited("blkfront: %s: %s op 
failed\n",
++                                         info->gd->disk_name, 
op_name(bret.operation));
+                               blkif_req(req)->error = BLK_STS_NOTSUPP;
+                               info->feature_discard = 0;
+                               info->feature_secdiscard = 0;
+@@ -1617,15 +1650,15 @@ static irqreturn_t blkif_interrupt(int irq, void 
*dev_id)
+                       break;
+               case BLKIF_OP_FLUSH_DISKCACHE:
+               case BLKIF_OP_WRITE_BARRIER:
+-                      if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
+-                              printk(KERN_WARNING "blkfront: %s: %s op 
failed\n",
+-                                     info->gd->disk_name, 
op_name(bret->operation));
++                      if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
++                              pr_warn_ratelimited("blkfront: %s: %s op 
failed\n",
++                                     info->gd->disk_name, 
op_name(bret.operation));
+                               blkif_req(req)->error = BLK_STS_NOTSUPP;
+                       }
+-                      if (unlikely(bret->status == BLKIF_RSP_ERROR &&
++                      if (unlikely(bret.status == BLKIF_RSP_ERROR &&
+                                    rinfo->shadow[id].req.u.rw.nr_segments == 
0)) {
+-                              printk(KERN_WARNING "blkfront: %s: empty %s op 
failed\n",
+-                                     info->gd->disk_name, 
op_name(bret->operation));
++                              pr_warn_ratelimited("blkfront: %s: empty %s op 
failed\n",
++                                     info->gd->disk_name, 
op_name(bret.operation));
+                               blkif_req(req)->error = BLK_STS_NOTSUPP;
+                       }
+                       if (unlikely(blkif_req(req)->error)) {
+@@ -1638,9 +1671,10 @@ static irqreturn_t blkif_interrupt(int irq, void 
*dev_id)
+                       /* fall through */
+               case BLKIF_OP_READ:
+               case BLKIF_OP_WRITE:
+-                      if (unlikely(bret->status != BLKIF_RSP_OKAY))
+-                              dev_dbg(&info->xbdev->dev, "Bad return from 
blkdev data "
+-                                      "request: %x\n", bret->status);
++                      if (unlikely(bret.status != BLKIF_RSP_OKAY))
++                              dev_dbg_ratelimited(&info->xbdev->dev,
++                                      "Bad return from blkdev data request: 
%#x\n",
++                                      bret.status);
+ 
+                       break;
+               default:
+@@ -1665,6 +1699,14 @@ static irqreturn_t blkif_interrupt(int irq, void 
*dev_id)
+       spin_unlock_irqrestore(&rinfo->ring_lock, flags);
+ 
+       return IRQ_HANDLED;
++
++ err:
++      info->connected = BLKIF_STATE_ERROR;
++
++      spin_unlock_irqrestore(&rinfo->ring_lock, flags);
++
++      pr_alert("%s disabled for further use\n", info->gd->disk_name);
++      return IRQ_HANDLED;
+ }
+ 
+ 
+diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c 
b/drivers/firmware/arm_scmi/scmi_pm_domain.c
+index 041f8152272bf..177874adccf0d 100644
+--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
++++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
+@@ -106,9 +106,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
+       scmi_pd_data->domains = domains;
+       scmi_pd_data->num_domains = num_domains;
+ 
+-      of_genpd_add_provider_onecell(np, scmi_pd_data);
+-
+-      return 0;
++      return of_genpd_add_provider_onecell(np, scmi_pd_data);
+ }
+ 
+ static const struct scmi_device_id scmi_id_table[] = {
+diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
+index 8dcce7182bb7c..1e28ff9815997 100644
+--- a/drivers/gpu/drm/vc4/vc4_bo.c
++++ b/drivers/gpu/drm/vc4/vc4_bo.c
+@@ -417,7 +417,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device 
*dev, size_t size)
+ 
+       bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+       if (!bo)
+-              return ERR_PTR(-ENOMEM);
++              return NULL;
+ 
+       bo->madv = VC4_MADV_WILLNEED;
+       refcount_set(&bo->usecnt, 0);
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 10524c93f8b62..f22f59df02601 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -2538,6 +2538,9 @@ static void wacom_wac_finger_event(struct hid_device 
*hdev,
+       struct wacom_features *features = &wacom->wacom_wac.features;
+ 
+       switch (equivalent_usage) {
++      case HID_DG_CONFIDENCE:
++              wacom_wac->hid_data.confidence = value;
++              break;
+       case HID_GD_X:
+               wacom_wac->hid_data.x = value;
+               break;
+@@ -2568,7 +2571,8 @@ static void wacom_wac_finger_event(struct hid_device 
*hdev,
+ 
+ 
+       if (usage->usage_index + 1 == field->report_count) {
+-              if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
++              if (equivalent_usage == wacom_wac->hid_data.last_slot_field &&
++                  wacom_wac->hid_data.confidence)
+                       wacom_wac_finger_slot(wacom_wac, 
wacom_wac->touch_input);
+       }
+ }
+@@ -2581,6 +2585,8 @@ static void wacom_wac_finger_pre_report(struct 
hid_device *hdev,
+       struct hid_data* hid_data = &wacom_wac->hid_data;
+       int i;
+ 
++      hid_data->confidence = true;
++
+       for (i = 0; i < report->maxfield; i++) {
+               struct hid_field *field = report->field[i];
+               int j;
+diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
+index 0abed1e5b5260..48ce2b0a4549e 100644
+--- a/drivers/hid/wacom_wac.h
++++ b/drivers/hid/wacom_wac.h
+@@ -302,6 +302,7 @@ struct hid_data {
+       bool tipswitch;
+       bool barrelswitch;
+       bool barrelswitch2;
++      bool confidence;
+       int x;
+       int y;
+       int pressure;
+diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
+index 60b20ae02b055..5eeadab15a5f5 100644
+--- a/drivers/media/cec/cec-adap.c
++++ b/drivers/media/cec/cec-adap.c
+@@ -1146,6 +1146,7 @@ void cec_received_msg_ts(struct cec_adapter *adap,
+                       if (abort)
+                               dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT;
+                       msg->flags = dst->flags;
++                      msg->sequence = dst->sequence;
+                       /* Remove it from the wait_queue */
+                       list_del_init(&data->list);
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 
b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index fd5375b5991bb..a257bf635bc24 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -451,9 +451,9 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev 
*hdev,  u16 rss_size)
+       roundup_size = ilog2(roundup_size);
+ 
+       for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
+-              tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
++              tc_valid[i] = 1;
+               tc_size[i] = roundup_size;
+-              tc_offset[i] = rss_size * i;
++              tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0;
+       }
+ 
+       hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index a1c828ffac8b7..434a009c52d90 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -121,21 +121,17 @@ struct netfront_queue {
+ 
+       /*
+        * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
+-       * are linked from tx_skb_freelist through skb_entry.link.
+-       *
+-       *  NB. Freelist index entries are always going to be less than
+-       *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
+-       *  greater than PAGE_OFFSET: we use this property to distinguish
+-       *  them.
++       * are linked from tx_skb_freelist through tx_link.
+        */
+-      union skb_entry {
+-              struct sk_buff *skb;
+-              unsigned long link;
+-      } tx_skbs[NET_TX_RING_SIZE];
++      struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
++      unsigned short tx_link[NET_TX_RING_SIZE];
++#define TX_LINK_NONE 0xffff
++#define TX_PENDING   0xfffe
+       grant_ref_t gref_tx_head;
+       grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
+       struct page *grant_tx_page[NET_TX_RING_SIZE];
+       unsigned tx_skb_freelist;
++      unsigned int tx_pend_queue;
+ 
+       spinlock_t   rx_lock ____cacheline_aligned_in_smp;
+       struct xen_netif_rx_front_ring rx;
+@@ -161,6 +157,9 @@ struct netfront_info {
+       struct netfront_stats __percpu *rx_stats;
+       struct netfront_stats __percpu *tx_stats;
+ 
++      /* Is device behaving sane? */
++      bool broken;
++
+       atomic_t rx_gso_checksum_fixup;
+ };
+ 
+@@ -169,33 +168,25 @@ struct netfront_rx_info {
+       struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
+ };
+ 
+-static void skb_entry_set_link(union skb_entry *list, unsigned short id)
+-{
+-      list->link = id;
+-}
+-
+-static int skb_entry_is_link(const union skb_entry *list)
+-{
+-      BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
+-      return (unsigned long)list->skb < PAGE_OFFSET;
+-}
+-
+ /*
+  * Access macros for acquiring freeing slots in tx_skbs[].
+  */
+ 
+-static void add_id_to_freelist(unsigned *head, union skb_entry *list,
+-                             unsigned short id)
++static void add_id_to_list(unsigned *head, unsigned short *list,
++                         unsigned short id)
+ {
+-      skb_entry_set_link(&list[id], *head);
++      list[id] = *head;
+       *head = id;
+ }
+ 
+-static unsigned short get_id_from_freelist(unsigned *head,
+-                                         union skb_entry *list)
++static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
+ {
+       unsigned int id = *head;
+-      *head = list[id].link;
++
++      if (id != TX_LINK_NONE) {
++              *head = list[id];
++              list[id] = TX_LINK_NONE;
++      }
+       return id;
+ }
+ 
+@@ -353,7 +344,7 @@ static int xennet_open(struct net_device *dev)
+       unsigned int i = 0;
+       struct netfront_queue *queue = NULL;
+ 
+-      if (!np->queues)
++      if (!np->queues || np->broken)
+               return -ENODEV;
+ 
+       for (i = 0; i < num_queues; ++i) {
+@@ -381,27 +372,47 @@ static void xennet_tx_buf_gc(struct netfront_queue 
*queue)
+       unsigned short id;
+       struct sk_buff *skb;
+       bool more_to_do;
++      const struct device *dev = &queue->info->netdev->dev;
+ 
+       BUG_ON(!netif_carrier_ok(queue->info->netdev));
+ 
+       do {
+               prod = queue->tx.sring->rsp_prod;
++              if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
++                      dev_alert(dev, "Illegal number of responses %u\n",
++                                prod - queue->tx.rsp_cons);
++                      goto err;
++              }
+               rmb(); /* Ensure we see responses up to 'rp'. */
+ 
+               for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
+-                      struct xen_netif_tx_response *txrsp;
++                      struct xen_netif_tx_response txrsp;
+ 
+-                      txrsp = RING_GET_RESPONSE(&queue->tx, cons);
+-                      if (txrsp->status == XEN_NETIF_RSP_NULL)
++                      RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
++                      if (txrsp.status == XEN_NETIF_RSP_NULL)
+                               continue;
+ 
+-                      id  = txrsp->id;
+-                      skb = queue->tx_skbs[id].skb;
++                      id = txrsp.id;
++                      if (id >= RING_SIZE(&queue->tx)) {
++                              dev_alert(dev,
++                                        "Response has incorrect id (%u)\n",
++                                        id);
++                              goto err;
++                      }
++                      if (queue->tx_link[id] != TX_PENDING) {
++                              dev_alert(dev,
++                                        "Response for inactive request\n");
++                              goto err;
++                      }
++
++                      queue->tx_link[id] = TX_LINK_NONE;
++                      skb = queue->tx_skbs[id];
++                      queue->tx_skbs[id] = NULL;
+                       if (unlikely(gnttab_query_foreign_access(
+                               queue->grant_tx_ref[id]) != 0)) {
+-                              pr_alert("%s: warning -- grant still in use by 
backend domain\n",
+-                                       __func__);
+-                              BUG();
++                              dev_alert(dev,
++                                        "Grant still in use by backend 
domain\n");
++                              goto err;
+                       }
+                       gnttab_end_foreign_access_ref(
+                               queue->grant_tx_ref[id], GNTMAP_readonly);
+@@ -409,7 +420,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
+                               &queue->gref_tx_head, queue->grant_tx_ref[id]);
+                       queue->grant_tx_ref[id] = GRANT_INVALID_REF;
+                       queue->grant_tx_page[id] = NULL;
+-                      add_id_to_freelist(&queue->tx_skb_freelist, 
queue->tx_skbs, id);
++                      add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, 
id);
+                       dev_kfree_skb_irq(skb);
+               }
+ 
+@@ -419,13 +430,20 @@ static void xennet_tx_buf_gc(struct netfront_queue 
*queue)
+       } while (more_to_do);
+ 
+       xennet_maybe_wake_tx(queue);
++
++      return;
++
++ err:
++      queue->info->broken = true;
++      dev_alert(dev, "Disabled for further use\n");
+ }
+ 
+ struct xennet_gnttab_make_txreq {
+       struct netfront_queue *queue;
+       struct sk_buff *skb;
+       struct page *page;
+-      struct xen_netif_tx_request *tx; /* Last request */
++      struct xen_netif_tx_request *tx;      /* Last request on ring page */
++      struct xen_netif_tx_request tx_local; /* Last request local copy*/
+       unsigned int size;
+ };
+ 
+@@ -441,7 +459,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, 
unsigned int offset,
+       struct netfront_queue *queue = info->queue;
+       struct sk_buff *skb = info->skb;
+ 
+-      id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
++      id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
+       tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
+       ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
+       WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
+@@ -449,34 +467,37 @@ static void xennet_tx_setup_grant(unsigned long gfn, 
unsigned int offset,
+       gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
+                                       gfn, GNTMAP_readonly);
+ 
+-      queue->tx_skbs[id].skb = skb;
++      queue->tx_skbs[id] = skb;
+       queue->grant_tx_page[id] = page;
+       queue->grant_tx_ref[id] = ref;
+ 
+-      tx->id = id;
+-      tx->gref = ref;
+-      tx->offset = offset;
+-      tx->size = len;
+-      tx->flags = 0;
++      info->tx_local.id = id;
++      info->tx_local.gref = ref;
++      info->tx_local.offset = offset;
++      info->tx_local.size = len;
++      info->tx_local.flags = 0;
++
++      *tx = info->tx_local;
++
++      /*
++       * Put the request in the pending queue, it will be set to be pending
++       * when the producer index is about to be raised.
++       */
++      add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
+ 
+       info->tx = tx;
+-      info->size += tx->size;
++      info->size += info->tx_local.size;
+ }
+ 
+ static struct xen_netif_tx_request *xennet_make_first_txreq(
+-      struct netfront_queue *queue, struct sk_buff *skb,
+-      struct page *page, unsigned int offset, unsigned int len)
++      struct xennet_gnttab_make_txreq *info,
++      unsigned int offset, unsigned int len)
+ {
+-      struct xennet_gnttab_make_txreq info = {
+-              .queue = queue,
+-              .skb = skb,
+-              .page = page,
+-              .size = 0,
+-      };
++      info->size = 0;
+ 
+-      gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
++      gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, 
info);
+ 
+-      return info.tx;
++      return info->tx;
+ }
+ 
+ static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
+@@ -489,35 +510,27 @@ static void xennet_make_one_txreq(unsigned long gfn, 
unsigned int offset,
+       xennet_tx_setup_grant(gfn, offset, len, data);
+ }
+ 
+-static struct xen_netif_tx_request *xennet_make_txreqs(
+-      struct netfront_queue *queue, struct xen_netif_tx_request *tx,
+-      struct sk_buff *skb, struct page *page,
++static void xennet_make_txreqs(
++      struct xennet_gnttab_make_txreq *info,
++      struct page *page,
+       unsigned int offset, unsigned int len)
+ {
+-      struct xennet_gnttab_make_txreq info = {
+-              .queue = queue,
+-              .skb = skb,
+-              .tx = tx,
+-      };
+-
+       /* Skip unused frames from start of page */
+       page += offset >> PAGE_SHIFT;
+       offset &= ~PAGE_MASK;
+ 
+       while (len) {
+-              info.page = page;
+-              info.size = 0;
++              info->page = page;
++              info->size = 0;
+ 
+               gnttab_foreach_grant_in_range(page, offset, len,
+                                             xennet_make_one_txreq,
+-                                            &info);
++                                            info);
+ 
+               page++;
+               offset = 0;
+-              len -= info.size;
++              len -= info->size;
+       }
+-
+-      return info.tx;
+ }
+ 
+ /*
+@@ -565,13 +578,22 @@ static u16 xennet_select_queue(struct net_device *dev, 
struct sk_buff *skb,
+       return queue_idx;
+ }
+ 
++static void xennet_mark_tx_pending(struct netfront_queue *queue)
++{
++      unsigned int i;
++
++      while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
++              TX_LINK_NONE)
++              queue->tx_link[i] = TX_PENDING;
++}
++
+ #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
+ 
+ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device 
*dev)
+ {
+       struct netfront_info *np = netdev_priv(dev);
+       struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
+-      struct xen_netif_tx_request *tx, *first_tx;
++      struct xen_netif_tx_request *first_tx;
+       unsigned int i;
+       int notify;
+       int slots;
+@@ -580,6 +602,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, 
struct net_device *dev
+       unsigned int len;
+       unsigned long flags;
+       struct netfront_queue *queue = NULL;
++      struct xennet_gnttab_make_txreq info = { };
+       unsigned int num_queues = dev->real_num_tx_queues;
+       u16 queue_index;
+       struct sk_buff *nskb;
+@@ -587,6 +610,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, 
struct net_device *dev
+       /* Drop the packet if no queues are set up */
+       if (num_queues < 1)
+               goto drop;
++      if (unlikely(np->broken))
++              goto drop;
+       /* Determine which queue to transmit this SKB on */
+       queue_index = skb_get_queue_mapping(skb);
+       queue = &np->queues[queue_index];
+@@ -637,21 +662,24 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff 
*skb, struct net_device *dev
+       }
+ 
+       /* First request for the linear area. */
+-      first_tx = tx = xennet_make_first_txreq(queue, skb,
+-                                              page, offset, len);
+-      offset += tx->size;
++      info.queue = queue;
++      info.skb = skb;
++      info.page = page;
++      first_tx = xennet_make_first_txreq(&info, offset, len);
++      offset += info.tx_local.size;
+       if (offset == PAGE_SIZE) {
+               page++;
+               offset = 0;
+       }
+-      len -= tx->size;
++      len -= info.tx_local.size;
+ 
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               /* local packet? */
+-              tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
++              first_tx->flags |= XEN_NETTXF_csum_blank |
++                                 XEN_NETTXF_data_validated;
+       else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+               /* remote but checksummed. */
+-              tx->flags |= XEN_NETTXF_data_validated;
++              first_tx->flags |= XEN_NETTXF_data_validated;
+ 
+       /* Optional extra info after the first request. */
+       if (skb_shinfo(skb)->gso_size) {
+@@ -660,7 +688,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, 
struct net_device *dev
+               gso = (struct xen_netif_extra_info *)
+                       RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
+ 
+-              tx->flags |= XEN_NETTXF_extra_info;
++              first_tx->flags |= XEN_NETTXF_extra_info;
+ 
+               gso->u.gso.size = skb_shinfo(skb)->gso_size;
+               gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
+@@ -674,19 +702,21 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff 
*skb, struct net_device *dev
+       }
+ 
+       /* Requests for the rest of the linear area. */
+-      tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
++      xennet_make_txreqs(&info, page, offset, len);
+ 
+       /* Requests for all the frags. */
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+-              tx = xennet_make_txreqs(queue, tx, skb,
+-                                      skb_frag_page(frag), frag->page_offset,
++              xennet_make_txreqs(&info, skb_frag_page(frag),
++                                      frag->page_offset,
+                                       skb_frag_size(frag));
+       }
+ 
+       /* First request has the packet length. */
+       first_tx->size = skb->len;
+ 
++      xennet_mark_tx_pending(queue);
++
+       RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
+       if (notify)
+               notify_remote_via_irq(queue->tx_irq);
+@@ -744,7 +774,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
+                            RING_IDX rp)
+ 
+ {
+-      struct xen_netif_extra_info *extra;
++      struct xen_netif_extra_info extra;
+       struct device *dev = &queue->info->netdev->dev;
+       RING_IDX cons = queue->rx.rsp_cons;
+       int err = 0;
+@@ -760,24 +790,22 @@ static int xennet_get_extras(struct netfront_queue 
*queue,
+                       break;
+               }
+ 
+-              extra = (struct xen_netif_extra_info *)
+-                      RING_GET_RESPONSE(&queue->rx, ++cons);
++              RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
+ 
+-              if (unlikely(!extra->type ||
+-                           extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
++              if (unlikely(!extra.type ||
++                           extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
+                       if (net_ratelimit())
+                               dev_warn(dev, "Invalid extra type: %d\n",
+-                                      extra->type);
++                                       extra.type);
+                       err = -EINVAL;
+               } else {
+-                      memcpy(&extras[extra->type - 1], extra,
+-                             sizeof(*extra));
++                      extras[extra.type - 1] = extra;
+               }
+ 
+               skb = xennet_get_rx_skb(queue, cons);
+               ref = xennet_get_rx_ref(queue, cons);
+               xennet_move_rx_slot(queue, skb, ref);
+-      } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
++      } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
+ 
+       queue->rx.rsp_cons = cons;
+       return err;
+@@ -787,7 +815,7 @@ static int xennet_get_responses(struct netfront_queue 
*queue,
+                               struct netfront_rx_info *rinfo, RING_IDX rp,
+                               struct sk_buff_head *list)
+ {
+-      struct xen_netif_rx_response *rx = &rinfo->rx;
++      struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
+       struct xen_netif_extra_info *extras = rinfo->extras;
+       struct device *dev = &queue->info->netdev->dev;
+       RING_IDX cons = queue->rx.rsp_cons;
+@@ -845,7 +873,8 @@ next:
+                       break;
+               }
+ 
+-              rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
++              RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
++              rx = &rx_local;
+               skb = xennet_get_rx_skb(queue, cons + slots);
+               ref = xennet_get_rx_ref(queue, cons + slots);
+               slots++;
+@@ -900,10 +929,11 @@ static int xennet_fill_frags(struct netfront_queue 
*queue,
+       struct sk_buff *nskb;
+ 
+       while ((nskb = __skb_dequeue(list))) {
+-              struct xen_netif_rx_response *rx =
+-                      RING_GET_RESPONSE(&queue->rx, ++cons);
++              struct xen_netif_rx_response rx;
+               skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
+ 
++              RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
++
+               if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
+                       unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
+ 
+@@ -918,7 +948,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
+ 
+               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+                               skb_frag_page(nfrag),
+-                              rx->offset, rx->status, PAGE_SIZE);
++                              rx.offset, rx.status, PAGE_SIZE);
+ 
+               skb_shinfo(nskb)->nr_frags = 0;
+               kfree_skb(nskb);
+@@ -1011,12 +1041,19 @@ static int xennet_poll(struct napi_struct *napi, int 
budget)
+       skb_queue_head_init(&tmpq);
+ 
+       rp = queue->rx.sring->rsp_prod;
++      if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
++              dev_alert(&dev->dev, "Illegal number of responses %u\n",
++                        rp - queue->rx.rsp_cons);
++              queue->info->broken = true;
++              spin_unlock(&queue->rx_lock);
++              return 0;
++      }
+       rmb(); /* Ensure we see queued responses up to 'rp'. */
+ 
+       i = queue->rx.rsp_cons;
+       work_done = 0;
+       while ((i != rp) && (work_done < budget)) {
+-              memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
++              RING_COPY_RESPONSE(&queue->rx, i, rx);
+               memset(extras, 0, sizeof(rinfo.extras));
+ 
+               err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
+@@ -1138,17 +1175,18 @@ static void xennet_release_tx_bufs(struct 
netfront_queue *queue)
+ 
+       for (i = 0; i < NET_TX_RING_SIZE; i++) {
+               /* Skip over entries which are actually freelist references */
+-              if (skb_entry_is_link(&queue->tx_skbs[i]))
++              if (!queue->tx_skbs[i])
+                       continue;
+ 
+-              skb = queue->tx_skbs[i].skb;
++              skb = queue->tx_skbs[i];
++              queue->tx_skbs[i] = NULL;
+               get_page(queue->grant_tx_page[i]);
+               gnttab_end_foreign_access(queue->grant_tx_ref[i],
+                                         GNTMAP_readonly,
+                                         (unsigned 
long)page_address(queue->grant_tx_page[i]));
+               queue->grant_tx_page[i] = NULL;
+               queue->grant_tx_ref[i] = GRANT_INVALID_REF;
+-              add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
++              add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
+               dev_kfree_skb_irq(skb);
+       }
+ }
+@@ -1228,6 +1266,9 @@ static irqreturn_t xennet_tx_interrupt(int irq, void 
*dev_id)
+       struct netfront_queue *queue = dev_id;
+       unsigned long flags;
+ 
++      if (queue->info->broken)
++              return IRQ_HANDLED;
++
+       spin_lock_irqsave(&queue->tx_lock, flags);
+       xennet_tx_buf_gc(queue);
+       spin_unlock_irqrestore(&queue->tx_lock, flags);
+@@ -1240,6 +1281,9 @@ static irqreturn_t xennet_rx_interrupt(int irq, void 
*dev_id)
+       struct netfront_queue *queue = dev_id;
+       struct net_device *dev = queue->info->netdev;
+ 
++      if (queue->info->broken)
++              return IRQ_HANDLED;
++
+       if (likely(netif_carrier_ok(dev) &&
+                  RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
+               napi_schedule(&queue->napi);
+@@ -1261,6 +1305,10 @@ static void xennet_poll_controller(struct net_device 
*dev)
+       struct netfront_info *info = netdev_priv(dev);
+       unsigned int num_queues = dev->real_num_tx_queues;
+       unsigned int i;
++
++      if (info->broken)
++              return;
++
+       for (i = 0; i < num_queues; ++i)
+               xennet_interrupt(0, &info->queues[i]);
+ }
+@@ -1630,13 +1678,15 @@ static int xennet_init_queue(struct netfront_queue 
*queue)
+       snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
+                devid, queue->id);
+ 
+-      /* Initialise tx_skbs as a free chain containing every entry. */
++      /* Initialise tx_skb_freelist as a free chain containing every entry. */
+       queue->tx_skb_freelist = 0;
++      queue->tx_pend_queue = TX_LINK_NONE;
+       for (i = 0; i < NET_TX_RING_SIZE; i++) {
+-              skb_entry_set_link(&queue->tx_skbs[i], i+1);
++              queue->tx_link[i] = i + 1;
+               queue->grant_tx_ref[i] = GRANT_INVALID_REF;
+               queue->grant_tx_page[i] = NULL;
+       }
++      queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
+ 
+       /* Clear out rx_skbs */
+       for (i = 0; i < NET_RX_RING_SIZE; i++) {
+@@ -1841,6 +1891,9 @@ static int talk_to_netback(struct xenbus_device *dev,
+       if (info->queues)
+               xennet_destroy_queues(info);
+ 
++      /* For the case of a reconnect reset the "broken" indicator. */
++      info->broken = false;
++
+       err = xennet_create_queues(info, &num_queues);
+       if (err < 0) {
+               xenbus_dev_fatal(dev, err, "creating queues");
+diff --git a/drivers/pci/controller/pci-aardvark.c 
b/drivers/pci/controller/pci-aardvark.c
+index 98fb3c1f45e4d..e6d60fa2217da 100644
+--- a/drivers/pci/controller/pci-aardvark.c
++++ b/drivers/pci/controller/pci-aardvark.c
+@@ -9,6 +9,7 @@
+  */
+ 
+ #include <linux/delay.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/interrupt.h>
+ #include <linux/irq.h>
+ #include <linux/irqdomain.h>
+@@ -17,6 +18,7 @@
+ #include <linux/init.h>
+ #include <linux/platform_device.h>
+ #include <linux/of_address.h>
++#include <linux/of_gpio.h>
+ #include <linux/of_pci.h>
+ 
+ #include "../pci.h"
+@@ -26,16 +28,7 @@
+ #define     PCIE_CORE_CMD_IO_ACCESS_EN                                BIT(0)
+ #define     PCIE_CORE_CMD_MEM_ACCESS_EN                               BIT(1)
+ #define     PCIE_CORE_CMD_MEM_IO_REQ_EN                               BIT(2)
+-#define PCIE_CORE_DEV_CTRL_STATS_REG                          0xc8
+-#define     PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE      (0 << 4)
+-#define     PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT     5
+-#define     PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE            (0 << 11)
+-#define     PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT    12
+-#define     PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ            0x2
+-#define PCIE_CORE_LINK_CTRL_STAT_REG                          0xd0
+-#define     PCIE_CORE_LINK_L0S_ENTRY                          BIT(0)
+-#define     PCIE_CORE_LINK_TRAINING                           BIT(5)
+-#define     PCIE_CORE_LINK_WIDTH_SHIFT                                20
++#define PCIE_CORE_PCIEXP_CAP                                  0xc0
+ #define PCIE_CORE_ERR_CAPCTL_REG                              0x118
+ #define     PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX                  BIT(5)
+ #define     PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN                       BIT(6)
+@@ -113,14 +106,95 @@
+ #define PCIE_MSI_PAYLOAD_REG                  (CONTROL_BASE_ADDR + 0x9C)
+ #define     PCIE_MSI_DATA_MASK                        GENMASK(15, 0)
+ 
++/* PCIe window configuration */
++#define OB_WIN_BASE_ADDR                      0x4c00
++#define OB_WIN_BLOCK_SIZE                     0x20
++#define OB_WIN_COUNT                          8
++#define OB_WIN_REG_ADDR(win, offset)          (OB_WIN_BASE_ADDR + \
++                                                OB_WIN_BLOCK_SIZE * (win) + \
++                                                (offset))
++#define OB_WIN_MATCH_LS(win)                  OB_WIN_REG_ADDR(win, 0x00)
++#define     OB_WIN_ENABLE                     BIT(0)
++#define OB_WIN_MATCH_MS(win)                  OB_WIN_REG_ADDR(win, 0x04)
++#define OB_WIN_REMAP_LS(win)                  OB_WIN_REG_ADDR(win, 0x08)
++#define OB_WIN_REMAP_MS(win)                  OB_WIN_REG_ADDR(win, 0x0c)
++#define OB_WIN_MASK_LS(win)                   OB_WIN_REG_ADDR(win, 0x10)
++#define OB_WIN_MASK_MS(win)                   OB_WIN_REG_ADDR(win, 0x14)
++#define OB_WIN_ACTIONS(win)                   OB_WIN_REG_ADDR(win, 0x18)
++#define OB_WIN_DEFAULT_ACTIONS                        
(OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
++#define     OB_WIN_FUNC_NUM_MASK              GENMASK(31, 24)
++#define     OB_WIN_FUNC_NUM_SHIFT             24
++#define     OB_WIN_FUNC_NUM_ENABLE            BIT(23)
++#define     OB_WIN_BUS_NUM_BITS_MASK          GENMASK(22, 20)
++#define     OB_WIN_BUS_NUM_BITS_SHIFT         20
++#define     OB_WIN_MSG_CODE_ENABLE            BIT(22)
++#define     OB_WIN_MSG_CODE_MASK              GENMASK(21, 14)
++#define     OB_WIN_MSG_CODE_SHIFT             14
++#define     OB_WIN_MSG_PAYLOAD_LEN            BIT(12)
++#define     OB_WIN_ATTR_ENABLE                        BIT(11)
++#define     OB_WIN_ATTR_TC_MASK                       GENMASK(10, 8)
++#define     OB_WIN_ATTR_TC_SHIFT              8
++#define     OB_WIN_ATTR_RELAXED                       BIT(7)
++#define     OB_WIN_ATTR_NOSNOOP                       BIT(6)
++#define     OB_WIN_ATTR_POISON                        BIT(5)
++#define     OB_WIN_ATTR_IDO                   BIT(4)
++#define     OB_WIN_TYPE_MASK                  GENMASK(3, 0)
++#define     OB_WIN_TYPE_SHIFT                 0
++#define     OB_WIN_TYPE_MEM                   0x0
++#define     OB_WIN_TYPE_IO                    0x4
++#define     OB_WIN_TYPE_CONFIG_TYPE0          0x8
++#define     OB_WIN_TYPE_CONFIG_TYPE1          0x9
++#define     OB_WIN_TYPE_MSG                   0xc
++
+ /* LMI registers base address and register offsets */
+ #define LMI_BASE_ADDR                         0x6000
+ #define CFG_REG                                       (LMI_BASE_ADDR + 0x0)
+ #define     LTSSM_SHIFT                               24
+ #define     LTSSM_MASK                                0x3f
+-#define     LTSSM_L0                          0x10
+ #define     RC_BAR_CONFIG                     0x300
+ 
++/* LTSSM values in CFG_REG */
++enum {
++      LTSSM_DETECT_QUIET                      = 0x0,
++      LTSSM_DETECT_ACTIVE                     = 0x1,
++      LTSSM_POLLING_ACTIVE                    = 0x2,
++      LTSSM_POLLING_COMPLIANCE                = 0x3,
++      LTSSM_POLLING_CONFIGURATION             = 0x4,
++      LTSSM_CONFIG_LINKWIDTH_START            = 0x5,
++      LTSSM_CONFIG_LINKWIDTH_ACCEPT           = 0x6,
++      LTSSM_CONFIG_LANENUM_ACCEPT             = 0x7,
++      LTSSM_CONFIG_LANENUM_WAIT               = 0x8,
++      LTSSM_CONFIG_COMPLETE                   = 0x9,
++      LTSSM_CONFIG_IDLE                       = 0xa,
++      LTSSM_RECOVERY_RCVR_LOCK                = 0xb,
++      LTSSM_RECOVERY_SPEED                    = 0xc,
++      LTSSM_RECOVERY_RCVR_CFG                 = 0xd,
++      LTSSM_RECOVERY_IDLE                     = 0xe,
++      LTSSM_L0                                = 0x10,
++      LTSSM_RX_L0S_ENTRY                      = 0x11,
++      LTSSM_RX_L0S_IDLE                       = 0x12,
++      LTSSM_RX_L0S_FTS                        = 0x13,
++      LTSSM_TX_L0S_ENTRY                      = 0x14,
++      LTSSM_TX_L0S_IDLE                       = 0x15,
++      LTSSM_TX_L0S_FTS                        = 0x16,
++      LTSSM_L1_ENTRY                          = 0x17,
++      LTSSM_L1_IDLE                           = 0x18,
++      LTSSM_L2_IDLE                           = 0x19,
++      LTSSM_L2_TRANSMIT_WAKE                  = 0x1a,
++      LTSSM_DISABLED                          = 0x20,
++      LTSSM_LOOPBACK_ENTRY_MASTER             = 0x21,
++      LTSSM_LOOPBACK_ACTIVE_MASTER            = 0x22,
++      LTSSM_LOOPBACK_EXIT_MASTER              = 0x23,
++      LTSSM_LOOPBACK_ENTRY_SLAVE              = 0x24,
++      LTSSM_LOOPBACK_ACTIVE_SLAVE             = 0x25,
++      LTSSM_LOOPBACK_EXIT_SLAVE               = 0x26,
++      LTSSM_HOT_RESET                         = 0x27,
++      LTSSM_RECOVERY_EQUALIZATION_PHASE0      = 0x28,
++      LTSSM_RECOVERY_EQUALIZATION_PHASE1      = 0x29,
++      LTSSM_RECOVERY_EQUALIZATION_PHASE2      = 0x2a,
++      LTSSM_RECOVERY_EQUALIZATION_PHASE3      = 0x2b,
++};
++
+ /* PCIe core controller registers */
+ #define CTRL_CORE_BASE_ADDR                   0x18000
+ #define CTRL_CONFIG_REG                               (CTRL_CORE_BASE_ADDR + 
0x0)
+@@ -181,6 +255,13 @@ struct advk_pcie {
+       struct platform_device *pdev;
+       void __iomem *base;
+       struct list_head resources;
++      struct {
++              phys_addr_t match;
++              phys_addr_t remap;
++              phys_addr_t mask;
++              u32 actions;
++      } wins[OB_WIN_COUNT];
++      u8 wins_count;
+       struct irq_domain *irq_domain;
+       struct irq_chip irq_chip;
+       raw_spinlock_t irq_lock;
+@@ -193,6 +274,8 @@ struct advk_pcie {
+       struct mutex msi_used_lock;
+       u16 msi_msg;
+       int root_bus_nr;
++      int link_gen;
++      struct gpio_desc *reset_gpio;
+ };
+ 
+ static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
+@@ -205,37 +288,161 @@ static inline u32 advk_readl(struct advk_pcie *pcie, 
u64 reg)
+       return readl(pcie->base + reg);
+ }
+ 
+-static int advk_pcie_link_up(struct advk_pcie *pcie)
++static u8 advk_pcie_ltssm_state(struct advk_pcie *pcie)
+ {
+-      u32 val, ltssm_state;
++      u32 val;
++      u8 ltssm_state;
+ 
+       val = advk_readl(pcie, CFG_REG);
+       ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK;
+-      return ltssm_state >= LTSSM_L0;
++      return ltssm_state;
++}
++
++static inline bool advk_pcie_link_up(struct advk_pcie *pcie)
++{
++      /* check if LTSSM is in normal operation - some L* state */
++      u8 ltssm_state = advk_pcie_ltssm_state(pcie);
++      return ltssm_state >= LTSSM_L0 && ltssm_state < LTSSM_DISABLED;
++}
++
++static inline bool advk_pcie_link_training(struct advk_pcie *pcie)
++{
++      /*
++        * According to PCIe Base specification 3.0, Table 4-14: Link
++        * Status Mapped to the LTSSM is Link Training mapped to LTSSM
++        * Configuration and Recovery states.
++        */
++      u8 ltssm_state = advk_pcie_ltssm_state(pcie);
++      return ((ltssm_state >= LTSSM_CONFIG_LINKWIDTH_START &&
++                ltssm_state < LTSSM_L0) ||
++              (ltssm_state >= LTSSM_RECOVERY_EQUALIZATION_PHASE0 &&
++                ltssm_state <= LTSSM_RECOVERY_EQUALIZATION_PHASE3));
+ }
+ 
+ static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
+ {
+-      struct device *dev = &pcie->pdev->dev;
+       int retries;
+ 
+       /* check if the link is up or not */
+       for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+-              if (advk_pcie_link_up(pcie)) {
+-                      dev_info(dev, "link up\n");
++              if (advk_pcie_link_up(pcie))
+                       return 0;
+-              }
+ 
+               usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+       }
+ 
+-      dev_err(dev, "link never came up\n");
+       return -ETIMEDOUT;
+ }
+ 
++static void advk_pcie_issue_perst(struct advk_pcie *pcie)
++{
++      if (!pcie->reset_gpio)
++              return;
++
++      /* 10ms delay is needed for some cards */
++      dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
++      gpiod_set_value_cansleep(pcie->reset_gpio, 1);
++      usleep_range(10000, 11000);
++      gpiod_set_value_cansleep(pcie->reset_gpio, 0);
++}
++
++static void advk_pcie_train_link(struct advk_pcie *pcie)
++{
++      struct device *dev = &pcie->pdev->dev;
++      u32 reg;
++      int ret;
++
++      /*
++       * Setup PCIe rev / gen compliance based on device tree property
++       * 'max-link-speed' which also forces maximal link speed.
++       */
++      reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
++      reg &= ~PCIE_GEN_SEL_MSK;
++      if (pcie->link_gen == 3)
++              reg |= SPEED_GEN_3;
++      else if (pcie->link_gen == 2)
++              reg |= SPEED_GEN_2;
++      else
++              reg |= SPEED_GEN_1;
++      advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
++
++      /*
++       * Set maximal link speed value also into PCIe Link Control 2 register.
++       * Armada 3700 Functional Specification says that default value is based
++       * on SPEED_GEN but tests showed that default value is always 8.0 GT/s.
++       */
++      reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
++      reg &= ~PCI_EXP_LNKCTL2_TLS;
++      if (pcie->link_gen == 3)
++              reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
++      else if (pcie->link_gen == 2)
++              reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
++      else
++              reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
++      advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
++
++      /* Enable link training after selecting PCIe generation */
++      reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
++      reg |= LINK_TRAINING_EN;
++      advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
++
++      /*
++       * Reset PCIe card via PERST# signal. Some cards are not detected
++       * during link training when they are in some non-initial state.
++       */
++      advk_pcie_issue_perst(pcie);
++
++      /*
++       * PERST# signal could have been asserted by pinctrl subsystem before
++       * probe() callback has been called or issued explicitly by reset gpio
++       * function advk_pcie_issue_perst(), making the endpoint going into
++       * fundamental reset. As required by PCI Express spec (PCI Express
++       * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1
++       * Conventional Reset) a delay for at least 100ms after such a reset
++       * before sending a Configuration Request to the device is needed.
++       * So wait until PCIe link is up. Function advk_pcie_wait_for_link()
++       * waits for link at least 900ms.
++       */
++      ret = advk_pcie_wait_for_link(pcie);
++      if (ret < 0)
++              dev_err(dev, "link never came up\n");
++      else
++              dev_info(dev, "link up\n");
++}
++
++/*
++ * Set PCIe address window register which could be used for memory
++ * mapping.
++ */
++static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num,
++                               phys_addr_t match, phys_addr_t remap,
++                               phys_addr_t mask, u32 actions)
++{
++      advk_writel(pcie, OB_WIN_ENABLE |
++                        lower_32_bits(match), OB_WIN_MATCH_LS(win_num));
++      advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num));
++      advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num));
++      advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num));
++      advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num));
++      advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num));
++      advk_writel(pcie, actions, OB_WIN_ACTIONS(win_num));
++}
++
++static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
++{
++      advk_writel(pcie, 0, OB_WIN_MATCH_LS(win_num));
++      advk_writel(pcie, 0, OB_WIN_MATCH_MS(win_num));
++      advk_writel(pcie, 0, OB_WIN_REMAP_LS(win_num));
++      advk_writel(pcie, 0, OB_WIN_REMAP_MS(win_num));
++      advk_writel(pcie, 0, OB_WIN_MASK_LS(win_num));
++      advk_writel(pcie, 0, OB_WIN_MASK_MS(win_num));
++      advk_writel(pcie, 0, OB_WIN_ACTIONS(win_num));
++}
++
+ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
+ {
+       u32 reg;
++      int i;
+ 
+       /* Set to Direct mode */
+       reg = advk_readl(pcie, CTRL_CONFIG_REG);
+@@ -255,36 +462,27 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
+               PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV;
+       advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG);
+ 
+-      /* Set PCIe Device Control and Status 1 PF0 register */
+-      reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
+-              (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
+-              PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
+-              (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ <<
+-               PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT);
+-      advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
++      /* Set PCIe Device Control register */
++      reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
++      reg &= ~PCI_EXP_DEVCTL_RELAX_EN;
++      reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
++      reg &= ~PCI_EXP_DEVCTL_PAYLOAD;
++      reg &= ~PCI_EXP_DEVCTL_READRQ;
++      reg |= PCI_EXP_DEVCTL_PAYLOAD_512B;
++      reg |= PCI_EXP_DEVCTL_READRQ_512B;
++      advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
+ 
+       /* Program PCIe Control 2 to disable strict ordering */
+       reg = PCIE_CORE_CTRL2_RESERVED |
+               PCIE_CORE_CTRL2_TD_ENABLE;
+       advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
+ 
+-      /* Set GEN2 */
+-      reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+-      reg &= ~PCIE_GEN_SEL_MSK;
+-      reg |= SPEED_GEN_2;
+-      advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+-
+       /* Set lane X1 */
+       reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+       reg &= ~LANE_CNT_MSK;
+       reg |= LANE_COUNT_1;
+       advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+ 
+-      /* Enable link training */
+-      reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+-      reg |= LINK_TRAINING_EN;
+-      advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+-
+       /* Enable MSI */
+       reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
+       reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
+@@ -309,21 +507,52 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
+       reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
+       advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
+ 
++      /*
++       * Enable AXI address window location generation:
++       * When it is enabled, the default outbound window
++       * configurations (Default User Field: 0xD0074CFC)
++       * are used to transparent address translation for
++       * the outbound transactions. Thus, PCIe address
++       * windows are not required for transparent memory
++       * access when default outbound window configuration
++       * is set for memory access.
++       */
+       reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
+       reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
+       advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
+ 
+-      /* Bypass the address window mapping for PIO */
++      /*
++       * Set memory access in Default User Field so it
++       * is not required to configure PCIe address for
++       * transparent memory access.
++       */
++      advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS);
++
++      /*
++       * Bypass the address window mapping for PIO:
++       * Since PIO access already contains all required
++       * info over AXI interface by PIO registers, the
++       * address window is not required.
++       */
+       reg = advk_readl(pcie, PIO_CTRL);
+       reg |= PIO_CTRL_ADDR_WIN_DISABLE;
+       advk_writel(pcie, reg, PIO_CTRL);
+ 
+-      /* Start link training */
+-      reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG);
+-      reg |= PCIE_CORE_LINK_TRAINING;
+-      advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
++      /*
++       * Configure PCIe address windows for non-memory or
++       * non-transparent access as by default PCIe uses
++       * transparent memory access.
++       */
++      for (i = 0; i < pcie->wins_count; i++)
++              advk_pcie_set_ob_win(pcie, i,
++                                   pcie->wins[i].match, pcie->wins[i].remap,
++                                   pcie->wins[i].mask, pcie->wins[i].actions);
+ 
+-      advk_pcie_wait_for_link(pcie);
++      /* Disable remaining PCIe outbound windows */
++      for (i = pcie->wins_count; i < OB_WIN_COUNT; i++)
++              advk_pcie_disable_ob_win(pcie, i);
++
++      advk_pcie_train_link(pcie);
+ 
+       reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
+       reg |= PCIE_CORE_CMD_MEM_ACCESS_EN |
+@@ -435,6 +664,13 @@ static bool advk_pcie_valid_device(struct advk_pcie 
*pcie, struct pci_bus *bus,
+       if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
+               return false;
+ 
++      /*
++       * If the link goes down after we check for link-up, nothing bad
++       * happens but the config access times out.
++       */
++      if (bus->number != pcie->root_bus_nr && !advk_pcie_link_up(pcie))
++              return false;
++
+       return true;
+ }
+ 
+@@ -506,8 +742,10 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 
devfn,
+       advk_writel(pcie, 1, PIO_START);
+ 
+       ret = advk_pcie_wait_pio(pcie);
+-      if (ret < 0)
++      if (ret < 0) {
++              *val = 0xffffffff;
+               return PCIBIOS_SET_FAILED;
++      }
+ 
+       /* Check PIO status and get the read result */
+       ret = advk_pcie_check_pio_status(pcie, val);
+@@ -754,6 +992,7 @@ static int advk_pcie_init_irq_domain(struct advk_pcie 
*pcie)
+       struct device_node *node = dev->of_node;
+       struct device_node *pcie_intc_node;
+       struct irq_chip *irq_chip;
++      int ret = 0;
+ 
+       raw_spin_lock_init(&pcie->irq_lock);
+ 
+@@ -768,8 +1007,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie 
*pcie)
+       irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq",
+                                       dev_name(dev));
+       if (!irq_chip->name) {
+-              of_node_put(pcie_intc_node);
+-              return -ENOMEM;
++              ret = -ENOMEM;
++              goto out_put_node;
+       }
+ 
+       irq_chip->irq_mask = advk_pcie_irq_mask;
+@@ -781,11 +1020,13 @@ static int advk_pcie_init_irq_domain(struct advk_pcie 
*pcie)
+                                     &advk_pcie_irq_domain_ops, pcie);
+       if (!pcie->irq_domain) {
+               dev_err(dev, "Failed to get a INTx IRQ domain\n");
+-              of_node_put(pcie_intc_node);
+-              return -ENOMEM;
++              ret = -ENOMEM;
++              goto out_put_node;
+       }
+ 
+-      return 0;
++out_put_node:
++      of_node_put(pcie_intc_node);
++      return ret;
+ }
+ 
+ static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
+@@ -925,6 +1166,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
+       struct advk_pcie *pcie;
+       struct resource *res;
+       struct pci_host_bridge *bridge;
++      struct resource_entry *entry;
+       int ret, irq;
+ 
+       bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
+@@ -954,6 +1196,102 @@ static int advk_pcie_probe(struct platform_device *pdev)
+               return ret;
+       }
+ 
++      resource_list_for_each_entry(entry, &pcie->resources) {
++              resource_size_t start = entry->res->start;
++              resource_size_t size = resource_size(entry->res);
++              unsigned long type = resource_type(entry->res);
++              u64 win_size;
++
++              /*
++               * Aardvark hardware allows to configure also PCIe window
++               * for config type 0 and type 1 mapping, but driver uses
++               * only PIO for issuing configuration transfers which does
++               * not use PCIe window configuration.
++               */
++              if (type != IORESOURCE_MEM && type != IORESOURCE_MEM_64 &&
++                  type != IORESOURCE_IO)
++                      continue;
++
++              /*
++               * Skip transparent memory resources. Default outbound access
++               * configuration is set to transparent memory access so it
++               * does not need window configuration.
++               */
++              if ((type == IORESOURCE_MEM || type == IORESOURCE_MEM_64) &&
++                  entry->offset == 0)
++                      continue;
++
++              /*
++               * The n-th PCIe window is configured by tuple (match, remap, 
mask)
++               * and an access to address A uses this window if A matches the
++               * match with given mask.
++               * So every PCIe window size must be a power of two and every 
start
++               * address must be aligned to window size. Minimal size is 64 
KiB
++               * because lower 16 bits of mask must be zero. Remapped address
++               * may have set only bits from the mask.
++               */
++              while (pcie->wins_count < OB_WIN_COUNT && size > 0) {
++                      /* Calculate the largest aligned window size */
++                      win_size = (1ULL << (fls64(size)-1)) |
++                                 (start ? (1ULL << __ffs64(start)) : 0);
++                      win_size = 1ULL << __ffs64(win_size);
++                      if (win_size < 0x10000)
++                              break;
++
++                      dev_dbg(dev,
++                              "Configuring PCIe window %d: [0x%llx-0x%llx] as 
%lu\n",
++                              pcie->wins_count, (unsigned long long)start,
++                              (unsigned long long)start + win_size, type);
++
++                      if (type == IORESOURCE_IO) {
++                              pcie->wins[pcie->wins_count].actions = 
OB_WIN_TYPE_IO;
++                              pcie->wins[pcie->wins_count].match = 
pci_pio_to_address(start);
++                      } else {
++                              pcie->wins[pcie->wins_count].actions = 
OB_WIN_TYPE_MEM;
++                              pcie->wins[pcie->wins_count].match = start;
++                      }
++                      pcie->wins[pcie->wins_count].remap = start - 
entry->offset;
++                      pcie->wins[pcie->wins_count].mask = ~(win_size - 1);
++
++                      if (pcie->wins[pcie->wins_count].remap & (win_size - 1))
++                              break;
++
++                      start += win_size;
++                      size -= win_size;
++                      pcie->wins_count++;
++              }
++
++              if (size > 0) {
++                      dev_err(&pcie->pdev->dev,
++                              "Invalid PCIe region [0x%llx-0x%llx]\n",
++                              (unsigned long long)entry->res->start,
++                              (unsigned long long)entry->res->end + 1);
++                      return -EINVAL;
++              }
++      }
++
++      pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
++                                                     "reset-gpios", 0,
++                                                     GPIOD_OUT_LOW,
++                                                     "pcie1-reset");
++      ret = PTR_ERR_OR_ZERO(pcie->reset_gpio);
++      if (ret) {
++              if (ret == -ENOENT) {
++                      pcie->reset_gpio = NULL;
++              } else {
++                      if (ret != -EPROBE_DEFER)
++                              dev_err(dev, "Failed to get reset-gpio: %i\n",
++                                      ret);
++                      return ret;
++              }
++      }
++
++      ret = of_pci_get_max_link_speed(dev->of_node);
++      if (ret <= 0 || ret > 3)
++              pcie->link_gen = 3;
++      else
++              pcie->link_gen = ret;
++
+       advk_pcie_setup_hw(pcie);
+ 
+       ret = advk_pcie_init_irq_domain(pcie);
+diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c 
b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+index d76ac6b4b40df..e69b84d9538a0 100644
+--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
++++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+@@ -166,12 +166,16 @@ static struct armada_37xx_pin_group 
armada_37xx_nb_groups[] = {
+       PIN_GRP_GPIO("jtag", 20, 5, BIT(0), "jtag"),
+       PIN_GRP_GPIO("sdio0", 8, 3, BIT(1), "sdio"),
+       PIN_GRP_GPIO("emmc_nb", 27, 9, BIT(2), "emmc"),
+-      PIN_GRP_GPIO("pwm0", 11, 1, BIT(3), "pwm"),
+-      PIN_GRP_GPIO("pwm1", 12, 1, BIT(4), "pwm"),
+-      PIN_GRP_GPIO("pwm2", 13, 1, BIT(5), "pwm"),
+-      PIN_GRP_GPIO("pwm3", 14, 1, BIT(6), "pwm"),
+-      PIN_GRP_GPIO("pmic1", 17, 1, BIT(7), "pmic"),
+-      PIN_GRP_GPIO("pmic0", 16, 1, BIT(8), "pmic"),
++      PIN_GRP_GPIO_3("pwm0", 11, 1, BIT(3) | BIT(20), 0, BIT(20), BIT(3),
++                     "pwm", "led"),
++      PIN_GRP_GPIO_3("pwm1", 12, 1, BIT(4) | BIT(21), 0, BIT(21), BIT(4),
++                     "pwm", "led"),
++      PIN_GRP_GPIO_3("pwm2", 13, 1, BIT(5) | BIT(22), 0, BIT(22), BIT(5),
++                     "pwm", "led"),
++      PIN_GRP_GPIO_3("pwm3", 14, 1, BIT(6) | BIT(23), 0, BIT(23), BIT(6),
++                     "pwm", "led"),
++      PIN_GRP_GPIO("pmic1", 7, 1, BIT(7), "pmic"),
++      PIN_GRP_GPIO("pmic0", 6, 1, BIT(8), "pmic"),
+       PIN_GRP_GPIO("i2c2", 2, 2, BIT(9), "i2c"),
+       PIN_GRP_GPIO("i2c1", 0, 2, BIT(10), "i2c"),
+       PIN_GRP_GPIO("spi_cs1", 17, 1, BIT(12), "spi"),
+@@ -183,11 +187,6 @@ static struct armada_37xx_pin_group 
armada_37xx_nb_groups[] = {
+       PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
+                     BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
+                     18, 2, "gpio", "uart"),
+-      PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
+-      PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
+-      PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
+-      PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
+-
+ };
+ 
+ static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
+@@ -195,8 +194,11 @@ static struct armada_37xx_pin_group 
armada_37xx_sb_groups[] = {
+       PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"),
+       PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"),
+       PIN_GRP_GPIO("rgmii", 6, 12, BIT(3), "mii"),
+-      PIN_GRP_GPIO("pcie1", 3, 2, BIT(4), "pcie"),
+-      PIN_GRP_GPIO("ptp", 20, 3, BIT(5), "ptp"),
++      PIN_GRP_GPIO("smi", 18, 2, BIT(4), "smi"),
++      PIN_GRP_GPIO("pcie1", 3, 1, BIT(5), "pcie"),
++      PIN_GRP_GPIO("pcie1_clkreq", 4, 1, BIT(9), "pcie"),
++      PIN_GRP_GPIO("pcie1_wakeup", 5, 1, BIT(10), "pcie"),
++      PIN_GRP_GPIO("ptp", 20, 3, BIT(11) | BIT(12) | BIT(13), "ptp"),
+       PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"),
+       PIN_GRP("ptp_trig", 22, 1, BIT(7), "ptp", "mii"),
+       PIN_GRP_GPIO_3("mii_col", 23, 1, BIT(8) | BIT(14), 0, BIT(8), BIT(14),
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c 
b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index add699b01836f..d899f216245e5 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -3364,7 +3364,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 
sas_address)
+ 
+       shost_for_each_device(sdev, ioc->shost) {
+               sas_device_priv_data = sdev->hostdata;
+-              if (!sas_device_priv_data)
++              if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
+                       continue;
+               if (sas_device_priv_data->sas_target->sas_address
+                   != sas_address)
+diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c 
b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+index 7cdced0b0581e..da73998bc5f70 100644
+--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+@@ -2579,13 +2579,14 @@ static void _rtl92e_pci_disconnect(struct pci_dev 
*pdev)
+                       free_irq(dev->irq, dev);
+                       priv->irq = 0;
+               }
+-              free_rtllib(dev);
+ 
+               if (dev->mem_start != 0) {
+                       iounmap((void __iomem *)dev->mem_start);
+                       release_mem_region(pci_resource_start(pdev, 1),
+                                       pci_resource_len(pdev, 1));
+               }
++
++              free_rtllib(dev);
+       } else {
+               priv = rtllib_priv(dev);
+       }
+diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
+index dc43fa96c3de7..7874aaf30ef48 100644
+--- a/drivers/tty/hvc/hvc_xen.c
++++ b/drivers/tty/hvc/hvc_xen.c
+@@ -86,7 +86,11 @@ static int __write_console(struct xencons_info *xencons,
+       cons = intf->out_cons;
+       prod = intf->out_prod;
+       mb();                   /* update queue values before going on */
+-      BUG_ON((prod - cons) > sizeof(intf->out));
++
++      if ((prod - cons) > sizeof(intf->out)) {
++              pr_err_once("xencons: Illegal ring page indices");
++              return -EINVAL;
++      }
+ 
+       while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
+               intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
+@@ -114,7 +118,10 @@ static int domU_write_console(uint32_t vtermno, const 
char *data, int len)
+        */
+       while (len) {
+               int sent = __write_console(cons, data, len);
+-              
++
++              if (sent < 0)
++                      return sent;
++
+               data += sent;
+               len -= sent;
+ 
+@@ -138,7 +145,11 @@ static int domU_read_console(uint32_t vtermno, char *buf, 
int len)
+       cons = intf->in_cons;
+       prod = intf->in_prod;
+       mb();                   /* get pointers before reading ring */
+-      BUG_ON((prod - cons) > sizeof(intf->in));
++
++      if ((prod - cons) > sizeof(intf->in)) {
++              pr_err_once("xencons: Illegal ring page indices");
++              return -EINVAL;
++      }
+ 
+       while (cons != prod && recv < len)
+               buf[recv++] = intf->in[MASK_XENCONS_IDX(cons++, intf->in)];
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index f1a11032a0a01..73ad4af487039 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -4575,8 +4575,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device 
*udev, int port1,
+       if (oldspeed == USB_SPEED_LOW)
+               delay = HUB_LONG_RESET_TIME;
+ 
+-      mutex_lock(hcd->address0_mutex);
+-
+       /* Reset the device; full speed may morph to high speed */
+       /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
+       retval = hub_port_reset(hub, port1, udev, delay, false);
+@@ -4891,7 +4889,6 @@ fail:
+               hub_port_disable(hub, port1, 0);
+               update_devnum(udev, devnum);    /* for disconnect processing */
+       }
+-      mutex_unlock(hcd->address0_mutex);
+       return retval;
+ }
+ 
+@@ -4981,6 +4978,7 @@ static void hub_port_connect(struct usb_hub *hub, int 
port1, u16 portstatus,
+       struct usb_port *port_dev = hub->ports[port1 - 1];
+       struct usb_device *udev = port_dev->child;
+       static int unreliable_port = -1;
++      bool retry_locked;
+ 
+       /* Disconnect any existing devices under this port */
+       if (udev) {
+@@ -5036,7 +5034,11 @@ static void hub_port_connect(struct usb_hub *hub, int 
port1, u16 portstatus,
+               unit_load = 100;
+ 
+       status = 0;
++
+       for (i = 0; i < SET_CONFIG_TRIES; i++) {
++              usb_lock_port(port_dev);
++              mutex_lock(hcd->address0_mutex);
++              retry_locked = true;
+ 
+               /* reallocate for each attempt, since references
+                * to the previous one can escape in various ways
+@@ -5045,6 +5047,8 @@ static void hub_port_connect(struct usb_hub *hub, int 
port1, u16 portstatus,
+               if (!udev) {
+                       dev_err(&port_dev->dev,
+                                       "couldn't allocate usb_device\n");
++                      mutex_unlock(hcd->address0_mutex);
++                      usb_unlock_port(port_dev);
+                       goto done;
+               }
+ 
+@@ -5066,12 +5070,14 @@ static void hub_port_connect(struct usb_hub *hub, int 
port1, u16 portstatus,
+               }
+ 
+               /* reset (non-USB 3.0 devices) and get descriptor */
+-              usb_lock_port(port_dev);
+               status = hub_port_init(hub, udev, port1, i);
+-              usb_unlock_port(port_dev);
+               if (status < 0)
+                       goto loop;
+ 
++              mutex_unlock(hcd->address0_mutex);
++              usb_unlock_port(port_dev);
++              retry_locked = false;
++
+               if (udev->quirks & USB_QUIRK_DELAY_INIT)
+                       msleep(2000);
+ 
+@@ -5164,6 +5170,10 @@ loop:
+               usb_ep0_reinit(udev);
+               release_devnum(udev);
+               hub_free_dev(udev);
++              if (retry_locked) {
++                      mutex_unlock(hcd->address0_mutex);
++                      usb_unlock_port(port_dev);
++              }
+               usb_put_dev(udev);
+               if ((status == -ENOTCONN) || (status == -ENOTSUPP))
+                       break;
+@@ -5722,6 +5732,8 @@ static int usb_reset_and_verify_device(struct usb_device 
*udev)
+       bos = udev->bos;
+       udev->bos = NULL;
+ 
++      mutex_lock(hcd->address0_mutex);
++
+       for (i = 0; i < SET_CONFIG_TRIES; ++i) {
+ 
+               /* ep0 maxpacket size may change; let the HCD know about it.
+@@ -5731,6 +5743,7 @@ static int usb_reset_and_verify_device(struct usb_device 
*udev)
+               if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV)
+                       break;
+       }
++      mutex_unlock(hcd->address0_mutex);
+ 
+       if (ret < 0)
+               goto re_enumerate;
+diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
+index 68bbac64b7536..94af71e9856f2 100644
+--- a/drivers/usb/dwc2/hcd_queue.c
++++ b/drivers/usb/dwc2/hcd_queue.c
+@@ -59,7 +59,7 @@
+ #define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
+ 
+ /* If we get a NAK, wait this long before retrying */
+-#define DWC2_RETRY_WAIT_DELAY 1*1E6L
++#define DWC2_RETRY_WAIT_DELAY (1 * NSEC_PER_MSEC)
+ 
+ /**
+  * dwc2_periodic_channel_available() - Checks that a channel is available for 
a
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index eaf118ee2a865..818097e86cb58 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1267,6 +1267,8 @@ static const struct usb_device_id option_ids[] = {
+         .driver_info = NCTRL(2) },
+       { USB_DEVICE(TELIT_VENDOR_ID, 0x9010),                          /* 
Telit SBL FN980 flashing device */
+         .driver_info = NCTRL(0) | ZLP },
++      { USB_DEVICE(TELIT_VENDOR_ID, 0x9200),                          /* 
Telit LE910S1 flashing device */
++        .driver_info = NCTRL(0) | ZLP },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 
0xff, 0xff) }, /* ZTE WCDMA products */
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 
0xff),
+         .driver_info = RSVD(1) },
+@@ -2094,6 +2096,9 @@ static const struct usb_device_id option_ids[] = {
+       { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) },    
/* Fibocom FG150 Diag */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) },          
/* Fibocom FG150 AT */
+       { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) },                   
/* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
++      { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) },                   
/* Fibocom FM101-GL (laptop MBIM) */
++      { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff),                     
/* Fibocom FM101-GL (laptop MBIM) */
++        .driver_info = RSVD(4) },
+       { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) },                   
/* LongSung M5710 */
+       { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) },                   
/* GosunCn GM500 RNDIS */
+       { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) },                   
/* GosunCn GM500 MBIM */
+diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
+index 6ee320259e4f7..d61abf569dc1d 100644
+--- a/drivers/vhost/vsock.c
++++ b/drivers/vhost/vsock.c
+@@ -490,7 +490,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work 
*work)
+                       virtio_transport_free_pkt(pkt);
+ 
+               len += sizeof(pkt->hdr);
+-              vhost_add_used(vq, head, len);
++              vhost_add_used(vq, head, 0);
+               total_len += len;
+               added = true;
+       } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
+diff --git a/drivers/xen/xenbus/xenbus_probe.c 
b/drivers/xen/xenbus/xenbus_probe.c
+index 652894d619677..b911a91bce6b7 100644
+--- a/drivers/xen/xenbus/xenbus_probe.c
++++ b/drivers/xen/xenbus/xenbus_probe.c
+@@ -846,7 +846,7 @@ static struct notifier_block xenbus_resume_nb = {
+ 
+ static int __init xenbus_init(void)
+ {
+-      int err = 0;
++      int err;
+       uint64_t v = 0;
+       xen_store_domain_type = XS_UNKNOWN;
+ 
+@@ -886,6 +886,29 @@ static int __init xenbus_init(void)
+               err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
+               if (err)
+                       goto out_error;
++              /*
++               * Uninitialized hvm_params are zero and return no error.
++               * Although it is theoretically possible to have
++               * HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is
++               * not zero when valid. If zero, it means that Xenstore hasn't
++               * been properly initialized. Instead of attempting to map a
++               * wrong guest physical address return error.
++               *
++               * Also recognize all bits set as an invalid value.
++               */
++              if (!v || !~v) {
++                      err = -ENOENT;
++                      goto out_error;
++              }
++              /* Avoid truncation on 32-bit. */
++#if BITS_PER_LONG == 32
++              if (v > ULONG_MAX) {
++                      pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > 
ULONG_MAX\n",
++                             __func__, v);
++                      err = -EINVAL;
++                      goto out_error;
++              }
++#endif
+               xen_store_gfn = (unsigned long)v;
+               xen_store_interface =
+                       xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
+@@ -920,8 +943,10 @@ static int __init xenbus_init(void)
+        */
+       proc_create_mount_point("xen");
+ #endif
++      return 0;
+ 
+ out_error:
++      xen_store_domain_type = XS_UNKNOWN;
+       return err;
+ }
+ 
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 8b22f8705dd48..d1dc545302528 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -910,6 +910,12 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, 
struct page **pagep)
+       if (!(buf->flags & PIPE_BUF_FLAG_LRU))
+               lru_cache_add_file(newpage);
+ 
++      /*
++       * Release while we have extra ref on stolen page.  Otherwise
++       * anon_pipe_buf_release() might think the page can be reused.
++       */
++      pipe_buf_release(cs->pipe, buf);
++
+       err = 0;
+       spin_lock(&cs->req->waitq.lock);
+       if (test_bit(FR_ABORTED, &cs->req->flags))
+@@ -2054,8 +2060,12 @@ static ssize_t fuse_dev_splice_write(struct 
pipe_inode_info *pipe,
+ 
+       pipe_lock(pipe);
+ out_free:
+-      for (idx = 0; idx < nbuf; idx++)
+-              pipe_buf_release(pipe, &bufs[idx]);
++      for (idx = 0; idx < nbuf; idx++) {
++              struct pipe_buffer *buf = &bufs[idx];
++
++              if (buf->ops)
++                      pipe_buf_release(pipe, buf);
++      }
+       pipe_unlock(pipe);
+ 
+       kvfree(bufs);
+diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
+index ec9803088f6b8..eee011de3f58b 100644
+--- a/fs/nfs/nfs42xdr.c
++++ b/fs/nfs/nfs42xdr.c
+@@ -707,8 +707,7 @@ static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp,
+       status = decode_clone(xdr);
+       if (status)
+               goto out;
+-      status = decode_getfattr(xdr, res->dst_fattr, res->server);
+-
++      decode_getfattr(xdr, res->dst_fattr, res->server);
+ out:
+       res->rpc_status = status;
+       return status;
+diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
+index c4147e50af98a..f5dfedc015520 100644
+--- a/fs/proc/vmcore.c
++++ b/fs/proc/vmcore.c
+@@ -117,14 +117,19 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
+                       nr_bytes = count;
+ 
+               /* If pfn is not ram, return zeros for sparse dump files */
+-              if (pfn_is_ram(pfn) == 0)
+-                      memset(buf, 0, nr_bytes);
+-              else {
++              if (pfn_is_ram(pfn) == 0) {
++                      tmp = 0;
++                      if (!userbuf)
++                              memset(buf, 0, nr_bytes);
++                      else if (clear_user(buf, nr_bytes))
++                              tmp = -EFAULT;
++              } else {
+                       tmp = copy_oldmem_page(pfn, buf, nr_bytes,
+                                               offset, userbuf);
+-                      if (tmp < 0)
+-                              return tmp;
+               }
++              if (tmp < 0)
++                      return tmp;
++
+               *ppos += nr_bytes;
+               count -= nr_bytes;
+               buf += nr_bytes;
+diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
+index b3353e21f3b3e..db72ad39853b9 100644
+--- a/include/asm-generic/tlb.h
++++ b/include/asm-generic/tlb.h
+@@ -118,6 +118,8 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb,
+ void tlb_flush_mmu(struct mmu_gather *tlb);
+ void arch_tlb_finish_mmu(struct mmu_gather *tlb,
+                        unsigned long start, unsigned long end, bool force);
++void tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
++                       unsigned long size);
+ extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
+                                  int page_size);
+ 
+diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
+index e67564af6f934..1560ce548add5 100644
+--- a/include/net/nfc/nci_core.h
++++ b/include/net/nfc/nci_core.h
+@@ -42,6 +42,7 @@ enum nci_flag {
+       NCI_UP,
+       NCI_DATA_EXCHANGE,
+       NCI_DATA_EXCHANGE_TO,
++      NCI_UNREG,
+ };
+ 
+ /* NCI device states */
+diff --git a/include/net/nl802154.h b/include/net/nl802154.h
+index ddcee128f5d9a..145acb8f25095 100644
+--- a/include/net/nl802154.h
++++ b/include/net/nl802154.h
+@@ -19,6 +19,8 @@
+  *
+  */
+ 
++#include <linux/types.h>
++
+ #define NL802154_GENL_NAME "nl802154"
+ 
+ enum nl802154_commands {
+@@ -150,10 +152,9 @@ enum nl802154_attrs {
+ };
+ 
+ enum nl802154_iftype {
+-      /* for backwards compatibility TODO */
+-      NL802154_IFTYPE_UNSPEC = -1,
++      NL802154_IFTYPE_UNSPEC = (~(__u32)0),
+ 
+-      NL802154_IFTYPE_NODE,
++      NL802154_IFTYPE_NODE = 0,
+       NL802154_IFTYPE_MONITOR,
+       NL802154_IFTYPE_COORD,
+ 
+diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
+index 3f40501fc60b1..b39cdbc522ec7 100644
+--- a/include/xen/interface/io/ring.h
++++ b/include/xen/interface/io/ring.h
+@@ -1,21 +1,53 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+ 
/******************************************************************************
+  * ring.h
+  *
+  * Shared producer-consumer ring macros.
+  *
++ * Permission is hereby granted, free of charge, to any person obtaining a 
copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
+  * Tim Deegan and Andrew Warfield November 2004.
+  */
+ 
+ #ifndef __XEN_PUBLIC_IO_RING_H__
+ #define __XEN_PUBLIC_IO_RING_H__
+ 
++/*
++ * When #include'ing this header, you need to provide the following
++ * declaration upfront:
++ * - standard integers types (uint8_t, uint16_t, etc)
++ * They are provided by stdint.h of the standard headers.
++ *
++ * In addition, if you intend to use the FLEX macros, you also need to
++ * provide the following, before invoking the FLEX macros:
++ * - size_t
++ * - memcpy
++ * - grant_ref_t
++ * These declarations are provided by string.h of the standard headers,
++ * and grant_table.h from the Xen public headers.
++ */
++
+ #include <xen/interface/grant_table.h>
+ 
+ typedef unsigned int RING_IDX;
+ 
+ /* Round a 32-bit unsigned constant down to the nearest power of two. */
+-#define __RD2(_x)  (((_x) & 0x00000002) ? 0x2                : ((_x) & 0x1))
++#define __RD2(_x)  (((_x) & 0x00000002) ? 0x2                  : ((_x) & 0x1))
+ #define __RD4(_x)  (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2    : __RD2(_x))
+ #define __RD8(_x)  (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4    : __RD4(_x))
+ #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8    : __RD8(_x))
+@@ -27,82 +59,79 @@ typedef unsigned int RING_IDX;
+  * A ring contains as many entries as will fit, rounded down to the nearest
+  * power of two (so we can mask with (size-1) to loop around).
+  */
+-#define __CONST_RING_SIZE(_s, _sz)                            \
+-      (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) /   \
+-              sizeof(((struct _s##_sring *)0)->ring[0])))
+-
++#define __CONST_RING_SIZE(_s, _sz) \
++    (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
++          sizeof(((struct _s##_sring *)0)->ring[0])))
+ /*
+  * The same for passing in an actual pointer instead of a name tag.
+  */
+-#define __RING_SIZE(_s, _sz)                                          \
+-      (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / 
sizeof((_s)->ring[0])))
++#define __RING_SIZE(_s, _sz) \
++    (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
+ 
+ /*
+  * Macros to make the correct C datatypes for a new kind of ring.
+  *
+  * To make a new ring datatype, you need to have two message structures,
+- * let's say struct request, and struct response already defined.
++ * let's say request_t, and response_t already defined.
+  *
+  * In a header where you want the ring datatype declared, you then do:
+  *
+- *     DEFINE_RING_TYPES(mytag, struct request, struct response);
++ *     DEFINE_RING_TYPES(mytag, request_t, response_t);
+  *
+  * These expand out to give you a set of types, as you can see below.
+  * The most important of these are:
+  *
+- *     struct mytag_sring      - The shared ring.
+- *     struct mytag_front_ring - The 'front' half of the ring.
+- *     struct mytag_back_ring  - The 'back' half of the ring.
++ *     mytag_sring_t      - The shared ring.
++ *     mytag_front_ring_t - The 'front' half of the ring.
++ *     mytag_back_ring_t  - The 'back' half of the ring.
+  *
+  * To initialize a ring in your code you need to know the location and size
+  * of the shared memory area (PAGE_SIZE, for instance). To initialise
+  * the front half:
+  *
+- *     struct mytag_front_ring front_ring;
+- *     SHARED_RING_INIT((struct mytag_sring *)shared_page);
+- *     FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
+- *                   PAGE_SIZE);
++ *     mytag_front_ring_t front_ring;
++ *     SHARED_RING_INIT((mytag_sring_t *)shared_page);
++ *     FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
+  *
+  * Initializing the back follows similarly (note that only the front
+  * initializes the shared ring):
+  *
+- *     struct mytag_back_ring back_ring;
+- *     BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
+- *                  PAGE_SIZE);
++ *     mytag_back_ring_t back_ring;
++ *     BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
+  */
+ 
+-#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t)                   \
+-                                                                      \
+-/* Shared ring entry */                                                       
\
+-union __name##_sring_entry {                                          \
+-    __req_t req;                                                      \
+-    __rsp_t rsp;                                                      \
+-};                                                                    \
+-                                                                      \
+-/* Shared ring page */                                                        
\
+-struct __name##_sring {                                                       
\
+-    RING_IDX req_prod, req_event;                                     \
+-    RING_IDX rsp_prod, rsp_event;                                     \
+-    uint8_t  pad[48];                                                 \
+-    union __name##_sring_entry ring[1]; /* variable-length */         \
+-};                                                                    \
+-                                                                      \
+-/* "Front" end's private variables */                                 \
+-struct __name##_front_ring {                                          \
+-    RING_IDX req_prod_pvt;                                            \
+-    RING_IDX rsp_cons;                                                        
\
+-    unsigned int nr_ents;                                             \
+-    struct __name##_sring *sring;                                     \
+-};                                                                    \
+-                                                                      \
+-/* "Back" end's private variables */                                  \
+-struct __name##_back_ring {                                           \
+-    RING_IDX rsp_prod_pvt;                                            \
+-    RING_IDX req_cons;                                                        
\
+-    unsigned int nr_ents;                                             \
+-    struct __name##_sring *sring;                                     \
+-};
+-
++#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t)                     \
++                                                                        \
++/* Shared ring entry */                                                 \
++union __name##_sring_entry {                                            \
++    __req_t req;                                                        \
++    __rsp_t rsp;                                                        \
++};                                                                      \
++                                                                        \
++/* Shared ring page */                                                  \
++struct __name##_sring {                                                 \
++    RING_IDX req_prod, req_event;                                       \
++    RING_IDX rsp_prod, rsp_event;                                       \
++    uint8_t __pad[48];                                                  \
++    union __name##_sring_entry ring[1]; /* variable-length */           \
++};                                                                      \
++                                                                        \
++/* "Front" end's private variables */                                   \
++struct __name##_front_ring {                                            \
++    RING_IDX req_prod_pvt;                                              \
++    RING_IDX rsp_cons;                                                  \
++    unsigned int nr_ents;                                               \
++    struct __name##_sring *sring;                                       \
++};                                                                      \
++                                                                        \
++/* "Back" end's private variables */                                    \
++struct __name##_back_ring {                                             \
++    RING_IDX rsp_prod_pvt;                                              \
++    RING_IDX req_cons;                                                  \
++    unsigned int nr_ents;                                               \
++    struct __name##_sring *sring;                                       \
++};                                                                      \
++                                                                        \
+ /*
+  * Macros for manipulating rings.
+  *
+@@ -119,105 +148,99 @@ struct __name##_back_ring {                             
                \
+  */
+ 
+ /* Initialising empty rings */
+-#define SHARED_RING_INIT(_s) do {                                     \
+-    (_s)->req_prod  = (_s)->rsp_prod  = 0;                            \
+-    (_s)->req_event = (_s)->rsp_event = 1;                            \
+-    memset((_s)->pad, 0, sizeof((_s)->pad));                          \
++#define SHARED_RING_INIT(_s) do {                                       \
++    (_s)->req_prod  = (_s)->rsp_prod  = 0;                              \
++    (_s)->req_event = (_s)->rsp_event = 1;                              \
++    (void)memset((_s)->__pad, 0, sizeof((_s)->__pad));                  \
+ } while(0)
+ 
+-#define FRONT_RING_INIT(_r, _s, __size) do {                          \
+-    (_r)->req_prod_pvt = 0;                                           \
+-    (_r)->rsp_cons = 0;                                                       
\
+-    (_r)->nr_ents = __RING_SIZE(_s, __size);                          \
+-    (_r)->sring = (_s);                                                       
\
++#define FRONT_RING_ATTACH(_r, _s, _i, __size) do {                      \
++    (_r)->req_prod_pvt = (_i);                                          \
++    (_r)->rsp_cons = (_i);                                              \
++    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
++    (_r)->sring = (_s);                                                 \
+ } while (0)
+ 
+-#define BACK_RING_INIT(_r, _s, __size) do {                           \
+-    (_r)->rsp_prod_pvt = 0;                                           \
+-    (_r)->req_cons = 0;                                                       
\
+-    (_r)->nr_ents = __RING_SIZE(_s, __size);                          \
+-    (_r)->sring = (_s);                                                       
\
+-} while (0)
++#define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size)
+ 
+-/* Initialize to existing shared indexes -- for recovery */
+-#define FRONT_RING_ATTACH(_r, _s, __size) do {                                
\
+-    (_r)->sring = (_s);                                                       
\
+-    (_r)->req_prod_pvt = (_s)->req_prod;                              \
+-    (_r)->rsp_cons = (_s)->rsp_prod;                                  \
+-    (_r)->nr_ents = __RING_SIZE(_s, __size);                          \
++#define BACK_RING_ATTACH(_r, _s, _i, __size) do {                       \
++    (_r)->rsp_prod_pvt = (_i);                                          \
++    (_r)->req_cons = (_i);                                              \
++    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
++    (_r)->sring = (_s);                                                 \
+ } while (0)
+ 
+-#define BACK_RING_ATTACH(_r, _s, __size) do {                         \
+-    (_r)->sring = (_s);                                                       
\
+-    (_r)->rsp_prod_pvt = (_s)->rsp_prod;                              \
+-    (_r)->req_cons = (_s)->req_prod;                                  \
+-    (_r)->nr_ents = __RING_SIZE(_s, __size);                          \
+-} while (0)
++#define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size)
+ 
+ /* How big is this ring? */
+-#define RING_SIZE(_r)                                                 \
++#define RING_SIZE(_r)                                                   \
+     ((_r)->nr_ents)
+ 
+ /* Number of free requests (for use on front side only). */
+-#define RING_FREE_REQUESTS(_r)                                                
\
++#define RING_FREE_REQUESTS(_r)                                          \
+     (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
+ 
+ /* Test if there is an empty slot available on the front ring.
+  * (This is only meaningful from the front. )
+  */
+-#define RING_FULL(_r)                                                 \
++#define RING_FULL(_r)                                                   \
+     (RING_FREE_REQUESTS(_r) == 0)
+ 
+ /* Test if there are outstanding messages to be processed on a ring. */
+-#define RING_HAS_UNCONSUMED_RESPONSES(_r)                             \
++#define RING_HAS_UNCONSUMED_RESPONSES(_r)                               \
+     ((_r)->sring->rsp_prod - (_r)->rsp_cons)
+ 
+-#define RING_HAS_UNCONSUMED_REQUESTS(_r)                              \
+-    ({                                                                        
\
+-      unsigned int req = (_r)->sring->req_prod - (_r)->req_cons;      \
+-      unsigned int rsp = RING_SIZE(_r) -                              \
+-                         ((_r)->req_cons - (_r)->rsp_prod_pvt);       \
+-      req < rsp ? req : rsp;                                          \
+-    })
++#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({                             \
++    unsigned int req = (_r)->sring->req_prod - (_r)->req_cons;          \
++    unsigned int rsp = RING_SIZE(_r) -                                  \
++        ((_r)->req_cons - (_r)->rsp_prod_pvt);                          \
++    req < rsp ? req : rsp;                                              \
++})
+ 
+ /* Direct access to individual ring elements, by index. */
+-#define RING_GET_REQUEST(_r, _idx)                                    \
++#define RING_GET_REQUEST(_r, _idx)                                      \
+     (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
+ 
++#define RING_GET_RESPONSE(_r, _idx)                                     \
++    (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
++
+ /*
+- * Get a local copy of a request.
++ * Get a local copy of a request/response.
+  *
+- * Use this in preference to RING_GET_REQUEST() so all processing is
++ * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing 
is
+  * done on a local copy that cannot be modified by the other end.
+  *
+  * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
+- * to be ineffective where _req is a struct which consists of only bitfields.
++ * to be ineffective where dest is a struct which consists of only bitfields.
+  */
+-#define RING_COPY_REQUEST(_r, _idx, _req) do {                                
\
+-      /* Use volatile to force the copy into _req. */                 \
+-      *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx);   \
++#define RING_COPY_(type, r, idx, dest) do {                           \
++      /* Use volatile to force the copy into dest. */                 \
++      *(dest) = *(volatile typeof(dest))RING_GET_##type(r, idx);      \
+ } while (0)
+ 
+-#define RING_GET_RESPONSE(_r, _idx)                                   \
+-    (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
++#define RING_COPY_REQUEST(r, idx, req)  RING_COPY_(REQUEST, r, idx, req)
++#define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp)
+ 
+ /* Loop termination condition: Would the specified index overflow the ring? */
+-#define RING_REQUEST_CONS_OVERFLOW(_r, _cons)                         \
++#define RING_REQUEST_CONS_OVERFLOW(_r, _cons)                           \
+     (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
+ 
+ /* Ill-behaved frontend determination: Can there be this many requests? */
+-#define RING_REQUEST_PROD_OVERFLOW(_r, _prod)               \
++#define RING_REQUEST_PROD_OVERFLOW(_r, _prod)                           \
+     (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
+ 
++/* Ill-behaved backend determination: Can there be this many responses? */
++#define RING_RESPONSE_PROD_OVERFLOW(_r, _prod)                          \
++    (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r))
+ 
+-#define RING_PUSH_REQUESTS(_r) do {                                   \
+-    virt_wmb(); /* back sees requests /before/ updated producer index */      
\
+-    (_r)->sring->req_prod = (_r)->req_prod_pvt;                               
\
++#define RING_PUSH_REQUESTS(_r) do {                                     \
++    virt_wmb(); /* back sees requests /before/ updated producer index */\
++    (_r)->sring->req_prod = (_r)->req_prod_pvt;                         \
+ } while (0)
+ 
+-#define RING_PUSH_RESPONSES(_r) do {                                  \
+-    virt_wmb(); /* front sees responses /before/ updated producer index */    
\
+-    (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt;                               
\
++#define RING_PUSH_RESPONSES(_r) do {                                    \
++    virt_wmb(); /* front sees resps /before/ updated producer index */  \
++    (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt;                         \
+ } while (0)
+ 
+ /*
+@@ -250,40 +273,40 @@ struct __name##_back_ring {                              
                \
+  *  field appropriately.
+  */
+ 
+-#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do {         \
+-    RING_IDX __old = (_r)->sring->req_prod;                           \
+-    RING_IDX __new = (_r)->req_prod_pvt;                              \
+-    virt_wmb(); /* back sees requests /before/ updated producer index */      
\
+-    (_r)->sring->req_prod = __new;                                    \
+-    virt_mb(); /* back sees new requests /before/ we check req_event */       
\
+-    (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) <         \
+-               (RING_IDX)(__new - __old));                            \
++#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do {           \
++    RING_IDX __old = (_r)->sring->req_prod;                             \
++    RING_IDX __new = (_r)->req_prod_pvt;                                \
++    virt_wmb(); /* back sees requests /before/ updated producer index */\
++    (_r)->sring->req_prod = __new;                                      \
++    virt_mb(); /* back sees new requests /before/ we check req_event */ \
++    (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) <           \
++                 (RING_IDX)(__new - __old));                            \
+ } while (0)
+ 
+-#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do {                
\
+-    RING_IDX __old = (_r)->sring->rsp_prod;                           \
+-    RING_IDX __new = (_r)->rsp_prod_pvt;                              \
+-    virt_wmb(); /* front sees responses /before/ updated producer index */    
\
+-    (_r)->sring->rsp_prod = __new;                                    \
+-    virt_mb(); /* front sees new responses /before/ we check rsp_event */     
\
+-    (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) <         \
+-               (RING_IDX)(__new - __old));                            \
++#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do {          \
++    RING_IDX __old = (_r)->sring->rsp_prod;                             \
++    RING_IDX __new = (_r)->rsp_prod_pvt;                                \
++    virt_wmb(); /* front sees resps /before/ updated producer index */  \
++    (_r)->sring->rsp_prod = __new;                                      \
++    virt_mb(); /* front sees new resps /before/ we check rsp_event */   \
++    (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) <           \
++                 (RING_IDX)(__new - __old));                            \
+ } while (0)
+ 
+-#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do {           \
+-    (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);                 \
+-    if (_work_to_do) break;                                           \
+-    (_r)->sring->req_event = (_r)->req_cons + 1;                      \
+-    virt_mb();                                                                
\
+-    (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);                 \
++#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do {             \
++    (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);                   \
++    if (_work_to_do) break;                                             \
++    (_r)->sring->req_event = (_r)->req_cons + 1;                        \
++    virt_mb();                                                          \
++    (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);                   \
+ } while (0)
+ 
+-#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do {          \
+-    (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);                        
\
+-    if (_work_to_do) break;                                           \
+-    (_r)->sring->rsp_event = (_r)->rsp_cons + 1;                      \
+-    virt_mb();                                                                
\
+-    (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);                        
\
++#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do {            \
++    (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);                  \
++    if (_work_to_do) break;                                             \
++    (_r)->sring->rsp_event = (_r)->rsp_cons + 1;                        \
++    virt_mb();                                                          \
++    (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);                  \
+ } while (0)
+ 
+ 
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 28db51274ed0e..6670a44ec5d45 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -677,7 +677,7 @@ static int load_image_and_restore(void)
+               goto Unlock;
+ 
+       error = swsusp_read(&flags);
+-      swsusp_close(FMODE_READ);
++      swsusp_close(FMODE_READ | FMODE_EXCL);
+       if (!error)
+               hibernation_restore(flags & SF_PLATFORM_MODE);
+ 
+@@ -874,7 +874,7 @@ static int software_resume(void)
+       /* The snapshot device should not be opened while we're running */
+       if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
+               error = -EBUSY;
+-              swsusp_close(FMODE_READ);
++              swsusp_close(FMODE_READ | FMODE_EXCL);
+               goto Unlock;
+       }
+ 
+@@ -910,7 +910,7 @@ static int software_resume(void)
+       pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
+       return error;
+  Close_Finish:
+-      swsusp_close(FMODE_READ);
++      swsusp_close(FMODE_READ | FMODE_EXCL);
+       goto Finish;
+ }
+ 
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 2c4068d8776ea..74185fb040f33 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1365,14 +1365,26 @@ __event_trigger_test_discard(struct trace_event_file 
*file,
+       if (eflags & EVENT_FILE_FL_TRIGGER_COND)
+               *tt = event_triggers_call(file, entry, event);
+ 
+-      if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
+-          (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
+-           !filter_match_preds(file->filter, entry))) {
+-              __trace_event_discard_commit(buffer, event);
+-              return true;
+-      }
++      if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
++                                  EVENT_FILE_FL_FILTERED |
++                                  EVENT_FILE_FL_PID_FILTER))))
++              return false;
++
++      if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
++              goto discard;
++
++      if (file->flags & EVENT_FILE_FL_FILTERED &&
++          !filter_match_preds(file->filter, entry))
++              goto discard;
++
++      if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
++          trace_event_ignore_this_pid(file))
++              goto discard;
+ 
+       return false;
++ discard:
++      __trace_event_discard_commit(buffer, event);
++      return true;
+ }
+ 
+ /**
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index ea43be6b9cc3c..1ca64a9296d0d 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -2255,12 +2255,19 @@ static struct trace_event_file *
+ trace_create_new_event(struct trace_event_call *call,
+                      struct trace_array *tr)
+ {
++      struct trace_pid_list *pid_list;
+       struct trace_event_file *file;
+ 
+       file = kmem_cache_alloc(file_cachep, GFP_TRACE);
+       if (!file)
+               return NULL;
+ 
++      pid_list = rcu_dereference_protected(tr->filtered_pids,
++                                           lockdep_is_held(&event_mutex));
++
++      if (pid_list)
++              file->flags |= EVENT_FILE_FL_PID_FILTER;
++
+       file->event_call = call;
+       file->tr = tr;
+       atomic_set(&file->sm_ref, 0);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index ebcf26bc4cd4b..0c5a2b4e003d5 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3425,6 +3425,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, 
struct vm_area_struct *vma,
+       unsigned long sz = huge_page_size(h);
+       unsigned long mmun_start = start;       /* For mmu_notifiers */
+       unsigned long mmun_end   = end;         /* For mmu_notifiers */
++      bool force_flush = false;
+ 
+       WARN_ON(!is_vm_hugetlb_page(vma));
+       BUG_ON(start & ~huge_page_mask(h));
+@@ -3451,10 +3452,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, 
struct vm_area_struct *vma,
+               ptl = huge_pte_lock(h, mm, ptep);
+               if (huge_pmd_unshare(mm, &address, ptep)) {
+                       spin_unlock(ptl);
+-                      /*
+-                       * We just unmapped a page of PMDs by clearing a PUD.
+-                       * The caller's TLB flush range should cover this area.
+-                       */
++                      tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
++                      force_flush = true;
+                       continue;
+               }
+ 
+@@ -3511,6 +3510,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, 
struct vm_area_struct *vma,
+       }
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+       tlb_end_vma(tlb, vma);
++
++      /*
++       * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
++       * could defer the flush until now, since by holding i_mmap_rwsem we
++       * guaranteed that the last refernece would not be dropped. But we must
++       * do the flushing before we return, as otherwise i_mmap_rwsem will be
++       * dropped and the last reference to the shared PMDs page might be
++       * dropped as well.
++       *
++       * In theory we could defer the freeing of the PMD pages as well, but
++       * huge_pmd_unshare() relies on the exact page_count for the PMD page to
++       * detect sharing, so we cannot defer the release of the page either.
++       * Instead, do flush now.
++       */
++      if (force_flush)
++              tlb_flush_mmu_tlbonly(tlb);
+ }
+ 
+ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+diff --git a/mm/memory.c b/mm/memory.c
+index 49b546cdce0d2..1d03085fde02b 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -324,6 +324,16 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, 
struct page *page, int page_
+       return false;
+ }
+ 
++void tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
++                       unsigned long size)
++{
++      if (tlb->page_size != 0 && tlb->page_size != PMD_SIZE)
++              tlb_flush_mmu(tlb);
++
++      tlb->page_size = PMD_SIZE;
++      tlb->start = min(tlb->start, address);
++      tlb->end = max(tlb->end, address + size);
++}
+ #endif /* HAVE_GENERIC_MMU_GATHER */
+ 
+ #ifdef CONFIG_HAVE_RCU_TABLE_FREE
+diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
+index 8b5ba0a5cd386..93530bd332470 100644
+--- a/net/ipv4/tcp_cubic.c
++++ b/net/ipv4/tcp_cubic.c
+@@ -340,8 +340,6 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, 
u32 acked)
+               return;
+ 
+       if (tcp_in_slow_start(tp)) {
+-              if (hystart && after(ack, ca->end_seq))
+-                      bictcp_hystart_reset(sk);
+               acked = tcp_slow_start(tp, acked);
+               if (!acked)
+                       return;
+@@ -383,6 +381,9 @@ static void hystart_update(struct sock *sk, u32 delay)
+       if (ca->found & hystart_detect)
+               return;
+ 
++      if (after(tp->snd_una, ca->end_seq))
++              bictcp_hystart_reset(sk);
++
+       if (hystart_detect & HYSTART_ACK_TRAIN) {
+               u32 now = bictcp_clock();
+ 
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index fc36f3b0dceb3..251ec12517e93 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -175,7 +175,7 @@ static int ip6_finish_output(struct net *net, struct sock 
*sk, struct sk_buff *s
+ #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
+       /* Policy lookup after SNAT yielded a new policy */
+       if (skb_dst(skb)->xfrm) {
+-              IPCB(skb)->flags |= IPSKB_REROUTED;
++              IP6CB(skb)->flags |= IP6SKB_REROUTED;
+               return dst_output(net, sk, skb);
+       }
+ #endif
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index acaeeaf814415..f20b08db9fe91 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -1850,7 +1850,6 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, 
struct sk_buff *skb, int
+       struct ip_vs_proto_data *pd;
+       struct ip_vs_conn *cp;
+       int ret, pkts;
+-      int conn_reuse_mode;
+       struct sock *sk;
+ 
+       /* Already marked as IPVS request or reply? */
+@@ -1926,15 +1925,16 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int 
hooknum, struct sk_buff *skb, int
+        */
+       cp = pp->conn_in_get(ipvs, af, skb, &iph);
+ 
+-      conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
+-      if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
++      if (!iph.fragoffs && is_new_conn(skb, &iph) && cp) {
++              int conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
+               bool old_ct = false, resched = false;
+ 
+               if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
+                   unlikely(!atomic_read(&cp->dest->weight))) {
+                       resched = true;
+                       old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
+-              } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
++              } else if (conn_reuse_mode &&
++                         is_new_conn_expected(cp, conn_reuse_mode)) {
+                       old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
+                       if (!atomic_read(&cp->n_control)) {
+                               resched = true;
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index 1008bbbb3af9c..0e0dff72a9e4f 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -485,6 +485,11 @@ static int nci_open_device(struct nci_dev *ndev)
+ 
+       mutex_lock(&ndev->req_lock);
+ 
++      if (test_bit(NCI_UNREG, &ndev->flags)) {
++              rc = -ENODEV;
++              goto done;
++      }
++
+       if (test_bit(NCI_UP, &ndev->flags)) {
+               rc = -EALREADY;
+               goto done;
+@@ -548,6 +553,10 @@ done:
+ static int nci_close_device(struct nci_dev *ndev)
+ {
+       nci_req_cancel(ndev, ENODEV);
++
++      /* This mutex needs to be held as a barrier for
++       * caller nci_unregister_device
++       */
+       mutex_lock(&ndev->req_lock);
+ 
+       if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
+@@ -585,8 +594,8 @@ static int nci_close_device(struct nci_dev *ndev)
+       /* Flush cmd wq */
+       flush_workqueue(ndev->cmd_wq);
+ 
+-      /* Clear flags */
+-      ndev->flags = 0;
++      /* Clear flags except NCI_UNREG */
++      ndev->flags &= BIT(NCI_UNREG);
+ 
+       mutex_unlock(&ndev->req_lock);
+ 
+@@ -1268,6 +1277,12 @@ void nci_unregister_device(struct nci_dev *ndev)
+ {
+       struct nci_conn_info    *conn_info, *n;
+ 
++      /* This set_bit is not protected with specialized barrier,
++       * However, it is fine because the mutex_lock(&ndev->req_lock);
++       * in nci_close_device() will help to emit one.
++       */
++      set_bit(NCI_UNREG, &ndev->flags);
++
+       nci_close_device(ndev);
+ 
+       destroy_workqueue(ndev->cmd_wq);
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 9aab4ab8161bd..4c904ab29e0e6 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -1589,8 +1589,10 @@ static __poll_t smc_poll(struct file *file, struct 
socket *sock,
+ static int smc_shutdown(struct socket *sock, int how)
+ {
+       struct sock *sk = sock->sk;
++      bool do_shutdown = true;
+       struct smc_sock *smc;
+       int rc = -EINVAL;
++      int old_state;
+       int rc1 = 0;
+ 
+       smc = smc_sk(sk);
+@@ -1617,7 +1619,11 @@ static int smc_shutdown(struct socket *sock, int how)
+       }
+       switch (how) {
+       case SHUT_RDWR:         /* shutdown in both directions */
++              old_state = sk->sk_state;
+               rc = smc_close_active(smc);
++              if (old_state == SMC_ACTIVE &&
++                  sk->sk_state == SMC_PEERCLOSEWAIT1)
++                      do_shutdown = false;
+               break;
+       case SHUT_WR:
+               rc = smc_close_shutdown_write(smc);
+@@ -1627,7 +1633,7 @@ static int smc_shutdown(struct socket *sock, int how)
+               /* nothing more to do because peer is not involved */
+               break;
+       }
+-      if (smc->clcsock)
++      if (do_shutdown && smc->clcsock)
+               rc1 = kernel_sock_shutdown(smc->clcsock, how);
+       /* map sock_shutdown_cmd constants to sk_shutdown value range */
+       sk->sk_shutdown |= how + 1;
+diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
+index ea2b87f294696..e25c023582f9e 100644
+--- a/net/smc/smc_close.c
++++ b/net/smc/smc_close.c
+@@ -202,6 +202,12 @@ again:
+                       if (rc)
+                               break;
+                       sk->sk_state = SMC_PEERCLOSEWAIT1;
++
++                      /* actively shutdown clcsock before peer close it,
++                       * prevent peer from entering TIME_WAIT state.
++                       */
++                      if (smc->clcsock && smc->clcsock->sk)
++                              rc = kernel_sock_shutdown(smc->clcsock, 
SHUT_RDWR);
+               } else {
+                       /* peer event has changed the state */
+                       goto again;
+diff --git a/sound/pci/ctxfi/ctamixer.c b/sound/pci/ctxfi/ctamixer.c
+index 5fcbb065d8702..d32685ce6c059 100644
+--- a/sound/pci/ctxfi/ctamixer.c
++++ b/sound/pci/ctxfi/ctamixer.c
+@@ -27,16 +27,15 @@
+ 
+ #define BLANK_SLOT            4094
+ 
+-static int amixer_master(struct rsc *rsc)
++static void amixer_master(struct rsc *rsc)
+ {
+       rsc->conj = 0;
+-      return rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
++      rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
+ }
+ 
+-static int amixer_next_conj(struct rsc *rsc)
++static void amixer_next_conj(struct rsc *rsc)
+ {
+       rsc->conj++;
+-      return container_of(rsc, struct amixer, rsc)->idx[rsc->conj];
+ }
+ 
+ static int amixer_index(const struct rsc *rsc)
+@@ -335,16 +334,15 @@ int amixer_mgr_destroy(struct amixer_mgr *amixer_mgr)
+ 
+ /* SUM resource management */
+ 
+-static int sum_master(struct rsc *rsc)
++static void sum_master(struct rsc *rsc)
+ {
+       rsc->conj = 0;
+-      return rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
++      rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
+ }
+ 
+-static int sum_next_conj(struct rsc *rsc)
++static void sum_next_conj(struct rsc *rsc)
+ {
+       rsc->conj++;
+-      return container_of(rsc, struct sum, rsc)->idx[rsc->conj];
+ }
+ 
+ static int sum_index(const struct rsc *rsc)
+diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c
+index f35a7341e4463..ed6e15d1f10f4 100644
+--- a/sound/pci/ctxfi/ctdaio.c
++++ b/sound/pci/ctxfi/ctdaio.c
+@@ -55,12 +55,12 @@ static struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = {
+       [SPDIFIO] = {.left = 0x05, .right = 0x85},
+ };
+ 
+-static int daio_master(struct rsc *rsc)
++static void daio_master(struct rsc *rsc)
+ {
+       /* Actually, this is not the resource index of DAIO.
+        * For DAO, it is the input mapper index. And, for DAI,
+        * it is the output time-slot index. */
+-      return rsc->conj = rsc->idx;
++      rsc->conj = rsc->idx;
+ }
+ 
+ static int daio_index(const struct rsc *rsc)
+@@ -68,19 +68,19 @@ static int daio_index(const struct rsc *rsc)
+       return rsc->conj;
+ }
+ 
+-static int daio_out_next_conj(struct rsc *rsc)
++static void daio_out_next_conj(struct rsc *rsc)
+ {
+-      return rsc->conj += 2;
++      rsc->conj += 2;
+ }
+ 
+-static int daio_in_next_conj_20k1(struct rsc *rsc)
++static void daio_in_next_conj_20k1(struct rsc *rsc)
+ {
+-      return rsc->conj += 0x200;
++      rsc->conj += 0x200;
+ }
+ 
+-static int daio_in_next_conj_20k2(struct rsc *rsc)
++static void daio_in_next_conj_20k2(struct rsc *rsc)
+ {
+-      return rsc->conj += 0x100;
++      rsc->conj += 0x100;
+ }
+ 
+ static const struct rsc_ops daio_out_rsc_ops = {
+diff --git a/sound/pci/ctxfi/ctresource.c b/sound/pci/ctxfi/ctresource.c
+index 80c4d84f9667f..f05a09ed42b8d 100644
+--- a/sound/pci/ctxfi/ctresource.c
++++ b/sound/pci/ctxfi/ctresource.c
+@@ -113,18 +113,17 @@ static int audio_ring_slot(const struct rsc *rsc)
+     return (rsc->conj << 4) + offset_in_audio_slot_block[rsc->type];
+ }
+ 
+-static int rsc_next_conj(struct rsc *rsc)
++static void rsc_next_conj(struct rsc *rsc)
+ {
+       unsigned int i;
+       for (i = 0; (i < 8) && (!(rsc->msr & (0x1 << i))); )
+               i++;
+       rsc->conj += (AUDIO_SLOT_BLOCK_NUM >> i);
+-      return rsc->conj;
+ }
+ 
+-static int rsc_master(struct rsc *rsc)
++static void rsc_master(struct rsc *rsc)
+ {
+-      return rsc->conj = rsc->idx;
++      rsc->conj = rsc->idx;
+ }
+ 
+ static const struct rsc_ops rsc_generic_ops = {
+diff --git a/sound/pci/ctxfi/ctresource.h b/sound/pci/ctxfi/ctresource.h
+index 736d9f7e9e165..29b6fe6de659c 100644
+--- a/sound/pci/ctxfi/ctresource.h
++++ b/sound/pci/ctxfi/ctresource.h
+@@ -43,8 +43,8 @@ struct rsc {
+ };
+ 
+ struct rsc_ops {
+-      int (*master)(struct rsc *rsc); /* Move to master resource */
+-      int (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
++      void (*master)(struct rsc *rsc); /* Move to master resource */
++      void (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource 
*/
+       int (*index)(const struct rsc *rsc); /* Return the index of resource */
+       /* Return the output slot number */
+       int (*output_slot)(const struct rsc *rsc);
+diff --git a/sound/pci/ctxfi/ctsrc.c b/sound/pci/ctxfi/ctsrc.c
+index a4fc10723fc6b..660ca0272e395 100644
+--- a/sound/pci/ctxfi/ctsrc.c
++++ b/sound/pci/ctxfi/ctsrc.c
+@@ -594,16 +594,15 @@ int src_mgr_destroy(struct src_mgr *src_mgr)
+ 
+ /* SRCIMP resource manager operations */
+ 
+-static int srcimp_master(struct rsc *rsc)
++static void srcimp_master(struct rsc *rsc)
+ {
+       rsc->conj = 0;
+-      return rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
++      rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
+ }
+ 
+-static int srcimp_next_conj(struct rsc *rsc)
++static void srcimp_next_conj(struct rsc *rsc)
+ {
+       rsc->conj++;
+-      return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj];
+ }
+ 
+ static int srcimp_index(const struct rsc *rsc)
+diff --git a/sound/soc/qcom/qdsp6/q6routing.c 
b/sound/soc/qcom/qdsp6/q6routing.c
+index 44eee18c658ae..7d2c5de380317 100644
+--- a/sound/soc/qcom/qdsp6/q6routing.c
++++ b/sound/soc/qcom/qdsp6/q6routing.c
+@@ -443,7 +443,11 @@ static int msm_routing_put_audio_mixer(struct 
snd_kcontrol *kcontrol,
+               session->port_id = be_id;
+               snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, update);
+       } else {
+-              session->port_id = -1;
++              if (session->port_id == be_id) {
++                      session->port_id = -1;
++                      return 0;
++              }
++
+               snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, update);
+       }
+ 
+diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
+index 2c6598e07dde3..ccf6dd9411975 100644
+--- a/sound/soc/soc-topology.c
++++ b/sound/soc/soc-topology.c
+@@ -2565,6 +2565,7 @@ EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all);
+ /* remove dynamic controls from the component driver */
+ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
+ {
++      struct snd_card *card = comp->card->snd_card;
+       struct snd_soc_dobj *dobj, *next_dobj;
+       int pass = SOC_TPLG_PASS_END;
+ 
+@@ -2572,6 +2573,7 @@ int snd_soc_tplg_component_remove(struct 
snd_soc_component *comp, u32 index)
+       while (pass >= SOC_TPLG_PASS_START) {
+ 
+               /* remove mixer controls */
++              down_write(&card->controls_rwsem);
+               list_for_each_entry_safe(dobj, next_dobj, &comp->dobj_list,
+                       list) {
+ 
+@@ -2605,6 +2607,7 @@ int snd_soc_tplg_component_remove(struct 
snd_soc_component *comp, u32 index)
+                               break;
+                       }
+               }
++              up_write(&card->controls_rwsem);
+               pass--;
+       }
+ 

Reply via email to