commit:     a15baa1cdaa74379d95243035410d3a16ea473ff
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Sep 20 12:00:09 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Sep 20 12:00:09 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a15baa1c

Linux patch 5.19.10

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1009_linux-5.19.10.patch | 1743 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1747 insertions(+)

diff --git a/0000_README b/0000_README
index 341e7dca..e710df97 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch:  1008_linux-5.19.9.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.19.9
 
+Patch:  1009_linux-5.19.10.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.19.10
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1009_linux-5.19.10.patch b/1009_linux-5.19.10.patch
new file mode 100644
index 00000000..ded561b4
--- /dev/null
+++ b/1009_linux-5.19.10.patch
@@ -0,0 +1,1743 @@
+diff --git a/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml 
b/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml
+index b6bbc312a7cf7..1414ba9977c16 100644
+--- a/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml
++++ b/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml
+@@ -24,8 +24,10 @@ properties:
+ 
+   interrupts:
+     minItems: 1
++    maxItems: 2
+     description:
+       Should be configured with type IRQ_TYPE_EDGE_RISING.
++      If two interrupts are provided, expected order is INT1 and INT2.
+ 
+ required:
+   - compatible
+diff --git a/Documentation/input/joydev/joystick.rst 
b/Documentation/input/joydev/joystick.rst
+index f615906a0821b..6d721396717a2 100644
+--- a/Documentation/input/joydev/joystick.rst
++++ b/Documentation/input/joydev/joystick.rst
+@@ -517,6 +517,7 @@ All I-Force devices are supported by the iforce module. 
This includes:
+ * AVB Mag Turbo Force
+ * AVB Top Shot Pegasus
+ * AVB Top Shot Force Feedback Racing Wheel
++* Boeder Force Feedback Wheel
+ * Logitech WingMan Force
+ * Logitech WingMan Force Wheel
+ * Guillemot Race Leader Force Feedback
+diff --git a/Makefile b/Makefile
+index 1f27c4bd09e67..33a9b6b547c47 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 19
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Superb Owl
+ 
+diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
+index 62b5b07fa4e1c..ca64bf5f5b038 100644
+--- a/arch/loongarch/Kconfig
++++ b/arch/loongarch/Kconfig
+@@ -36,6 +36,7 @@ config LOONGARCH
+       select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
+       select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
+       select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
++      select ARCH_KEEP_MEMBLOCK
+       select ARCH_MIGHT_HAVE_PC_PARPORT
+       select ARCH_MIGHT_HAVE_PC_SERIO
+       select ARCH_SPARSEMEM_ENABLE
+diff --git a/arch/loongarch/include/asm/acpi.h 
b/arch/loongarch/include/asm/acpi.h
+index 62044cd5b7bc5..825c2519b9d1f 100644
+--- a/arch/loongarch/include/asm/acpi.h
++++ b/arch/loongarch/include/asm/acpi.h
+@@ -15,7 +15,7 @@ extern int acpi_pci_disabled;
+ extern int acpi_noirq;
+ 
+ #define acpi_os_ioremap acpi_os_ioremap
+-void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size 
size);
++void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
+ 
+ static inline void disable_acpi(void)
+ {
+diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
+index bb729ee8a2370..796a24055a942 100644
+--- a/arch/loongarch/kernel/acpi.c
++++ b/arch/loongarch/kernel/acpi.c
+@@ -113,7 +113,7 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned 
long size)
+       early_memunmap(map, size);
+ }
+ 
+-void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size 
size)
++void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
+ {
+       if (!memblock_is_memory(phys))
+               return ioremap(phys, size);
+diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
+index 7094a68c9b832..3c3fbff0b8f86 100644
+--- a/arch/loongarch/mm/init.c
++++ b/arch/loongarch/mm/init.c
+@@ -131,18 +131,6 @@ int arch_add_memory(int nid, u64 start, u64 size, struct 
mhp_params *params)
+       return ret;
+ }
+ 
+-#ifdef CONFIG_NUMA
+-int memory_add_physaddr_to_nid(u64 start)
+-{
+-      int nid;
+-
+-      nid = pa_to_nid(start);
+-      return nid;
+-}
+-EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
+-#endif
+-
+-#ifdef CONFIG_MEMORY_HOTREMOVE
+ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
+ {
+       unsigned long start_pfn = start >> PAGE_SHIFT;
+@@ -154,6 +142,16 @@ void arch_remove_memory(u64 start, u64 size, struct 
vmem_altmap *altmap)
+               page += vmem_altmap_offset(altmap);
+       __remove_pages(start_pfn, nr_pages, altmap);
+ }
++
++#ifdef CONFIG_NUMA
++int memory_add_physaddr_to_nid(u64 start)
++{
++      int nid;
++
++      nid = pa_to_nid(start);
++      return nid;
++}
++EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
+ #endif
+ #endif
+ 
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 356226c7ebbdc..aa1ba803659cd 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -5907,47 +5907,18 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
+                                     const struct kvm_memory_slot *memslot,
+                                     int start_level)
+ {
+-      bool flush = false;
+-
+       if (kvm_memslots_have_rmaps(kvm)) {
+               write_lock(&kvm->mmu_lock);
+-              flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
+-                                        start_level, KVM_MAX_HUGEPAGE_LEVEL,
+-                                        false);
++              slot_handle_level(kvm, memslot, slot_rmap_write_protect,
++                                start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
+               write_unlock(&kvm->mmu_lock);
+       }
+ 
+       if (is_tdp_mmu_enabled(kvm)) {
+               read_lock(&kvm->mmu_lock);
+-              flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
++              kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
+               read_unlock(&kvm->mmu_lock);
+       }
+-
+-      /*
+-       * Flush TLBs if any SPTEs had to be write-protected to ensure that
+-       * guest writes are reflected in the dirty bitmap before the memslot
+-       * update completes, i.e. before enabling dirty logging is visible to
+-       * userspace.
+-       *
+-       * Perform the TLB flush outside the mmu_lock to reduce the amount of
+-       * time the lock is held. However, this does mean that another CPU can
+-       * now grab mmu_lock and encounter a write-protected SPTE while CPUs
+-       * still have a writable mapping for the associated GFN in their TLB.
+-       *
+-       * This is safe but requires KVM to be careful when making decisions
+-       * based on the write-protection status of an SPTE. Specifically, KVM
+-       * also write-protects SPTEs to monitor changes to guest page tables
+-       * during shadow paging, and must guarantee no CPUs can write to those
+-       * page before the lock is dropped. As mentioned in the previous
+-       * paragraph, a write-protected SPTE is no guarantee that CPU cannot
+-       * perform writes. So to determine if a TLB flush is truly required, KVM
+-       * will clear a separate software-only bit (MMU-writable) and skip the
+-       * flush if-and-only-if this bit was already clear.
+-       *
+-       * See is_writable_pte() for more details.
+-       */
+-      if (flush)
+-              kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
+ }
+ 
+ /* Must be called with the mmu_lock held in write-mode. */
+@@ -6070,32 +6041,30 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm 
*kvm,
+ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
+                                  const struct kvm_memory_slot *memslot)
+ {
+-      bool flush = false;
+-
+       if (kvm_memslots_have_rmaps(kvm)) {
+               write_lock(&kvm->mmu_lock);
+               /*
+                * Clear dirty bits only on 4k SPTEs since the legacy MMU only
+                * support dirty logging at a 4k granularity.
+                */
+-              flush = slot_handle_level_4k(kvm, memslot, __rmap_clear_dirty, 
false);
++              slot_handle_level_4k(kvm, memslot, __rmap_clear_dirty, false);
+               write_unlock(&kvm->mmu_lock);
+       }
+ 
+       if (is_tdp_mmu_enabled(kvm)) {
+               read_lock(&kvm->mmu_lock);
+-              flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
++              kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
+               read_unlock(&kvm->mmu_lock);
+       }
+ 
+       /*
++       * The caller will flush the TLBs after this function returns.
++       *
+        * It's also safe to flush TLBs out of mmu lock here as currently this
+        * function is only used for dirty logging, in which case flushing TLB
+        * out of mmu lock also guarantees no dirty pages will be lost in
+        * dirty_bitmap.
+        */
+-      if (flush)
+-              kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
+ }
+ 
+ void kvm_mmu_zap_all(struct kvm *kvm)
+diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
+index f80dbb628df57..e09bdcf1e47c5 100644
+--- a/arch/x86/kvm/mmu/spte.h
++++ b/arch/x86/kvm/mmu/spte.h
+@@ -326,7 +326,7 @@ static __always_inline bool is_rsvd_spte(struct 
rsvd_bits_validate *rsvd_check,
+ }
+ 
+ /*
+- * An shadow-present leaf SPTE may be non-writable for 3 possible reasons:
++ * A shadow-present leaf SPTE may be non-writable for 4 possible reasons:
+  *
+  *  1. To intercept writes for dirty logging. KVM write-protects huge pages
+  *     so that they can be split be split down into the dirty logging
+@@ -344,8 +344,13 @@ static __always_inline bool is_rsvd_spte(struct 
rsvd_bits_validate *rsvd_check,
+  *     read-only memslot or guest memory backed by a read-only VMA. Writes to
+  *     such pages are disallowed entirely.
+  *
+- * To keep track of why a given SPTE is write-protected, KVM uses 2
+- * software-only bits in the SPTE:
++ *  4. To emulate the Accessed bit for SPTEs without A/D bits.  Note, in this
++ *     case, the SPTE is access-protected, not just write-protected!
++ *
++ * For cases #1 and #4, KVM can safely make such SPTEs writable without taking
++ * mmu_lock as capturing the Accessed/Dirty state doesn't require taking it.
++ * To differentiate #1 and #4 from #2 and #3, KVM uses two software-only bits
++ * in the SPTE:
+  *
+  *  shadow_mmu_writable_mask, aka MMU-writable -
+  *    Cleared on SPTEs that KVM is currently write-protecting for shadow 
paging
+@@ -374,7 +379,8 @@ static __always_inline bool is_rsvd_spte(struct 
rsvd_bits_validate *rsvd_check,
+  * shadow page tables between vCPUs. Write-protecting an SPTE for dirty 
logging
+  * (which does not clear the MMU-writable bit), does not flush TLBs before
+  * dropping the lock, as it only needs to synchronize guest writes with the
+- * dirty bitmap.
++ * dirty bitmap. Similarly, making the SPTE inaccessible (and non-writable) 
for
++ * access-tracking via the clear_young() MMU notifier also does not flush 
TLBs.
+  *
+  * So, there is the problem: clearing the MMU-writable bit can encounter a
+  * write-protected SPTE while CPUs still have writable mappings for that SPTE
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 55de0d1981e52..5b36866528568 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -12265,6 +12265,50 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
+               } else {
+                       kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K);
+               }
++
++              /*
++               * Unconditionally flush the TLBs after enabling dirty logging.
++               * A flush is almost always going to be necessary (see below),
++               * and unconditionally flushing allows the helpers to omit
++               * the subtly complex checks when removing write access.
++               *
++               * Do the flush outside of mmu_lock to reduce the amount of
++               * time mmu_lock is held.  Flushing after dropping mmu_lock is
++               * safe as KVM only needs to guarantee the slot is fully
++               * write-protected before returning to userspace, i.e. before
++               * userspace can consume the dirty status.
++               *
++               * Flushing outside of mmu_lock requires KVM to be careful when
++               * making decisions based on writable status of an SPTE, e.g. a
++               * !writable SPTE doesn't guarantee a CPU can't perform writes.
++               *
++               * Specifically, KVM also write-protects guest page tables to
++               * monitor changes when using shadow paging, and must guarantee
++               * no CPUs can write to those page before mmu_lock is dropped.
++               * Because CPUs may have stale TLB entries at this point, a
++               * !writable SPTE doesn't guarantee CPUs can't perform writes.
++               *
++               * KVM also allows making SPTES writable outside of mmu_lock,
++               * e.g. to allow dirty logging without taking mmu_lock.
++               *
++               * To handle these scenarios, KVM uses a separate software-only
++               * bit (MMU-writable) to track if a SPTE is !writable due to
++               * a guest page table being write-protected (KVM clears the
++               * MMU-writable flag when write-protecting for shadow paging).
++               *
++               * The use of MMU-writable is also the primary motivation for
++               * the unconditional flush.  Because KVM must guarantee that a
++               * CPU doesn't contain stale, writable TLB entries for a
++               * !MMU-writable SPTE, KVM must flush if it encounters any
++               * MMU-writable SPTE regardless of whether the actual hardware
++               * writable bit was set.  I.e. KVM is almost guaranteed to need
++               * to flush, while unconditionally flushing allows the "remove
++               * write access" helpers to ignore MMU-writable entirely.
++               *
++               * See is_writable_pte() for more details (the case involving
++               * access-tracked SPTEs is particularly relevant).
++               */
++              kvm_arch_flush_remote_tlbs_memslot(kvm, new);
+       }
+ }
+ 
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index c2d4947844250..510cdec375c4d 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -416,6 +416,16 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, 
u8 polarity,
+ {
+       int i;
+ 
++#ifdef CONFIG_X86
++      /*
++       * IRQ override isn't needed on modern AMD Zen systems and
++       * this override breaks active low IRQs on AMD Ryzen 6000 and
++       * newer systems. Skip it.
++       */
++      if (boot_cpu_has(X86_FEATURE_ZEN))
++              return false;
++#endif
++
+       for (i = 0; i < ARRAY_SIZE(skip_override_table); i++) {
+               const struct irq_override_cmp *entry = &skip_override_table[i];
+ 
+diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
+index f118ad9bcd33d..0e95351d47d49 100644
+--- a/drivers/gpio/gpio-104-dio-48e.c
++++ b/drivers/gpio/gpio-104-dio-48e.c
+@@ -271,6 +271,7 @@ static void dio48e_irq_mask(struct irq_data *data)
+               dio48egpio->irq_mask &= ~BIT(0);
+       else
+               dio48egpio->irq_mask &= ~BIT(1);
++      gpiochip_disable_irq(chip, offset);
+ 
+       if (!dio48egpio->irq_mask)
+               /* disable interrupts */
+@@ -298,6 +299,7 @@ static void dio48e_irq_unmask(struct irq_data *data)
+               iowrite8(0x00, dio48egpio->base + 0xB);
+       }
+ 
++      gpiochip_enable_irq(chip, offset);
+       if (offset == 19)
+               dio48egpio->irq_mask |= BIT(0);
+       else
+@@ -320,12 +322,14 @@ static int dio48e_irq_set_type(struct irq_data *data, 
unsigned int flow_type)
+       return 0;
+ }
+ 
+-static struct irq_chip dio48e_irqchip = {
++static const struct irq_chip dio48e_irqchip = {
+       .name = "104-dio-48e",
+       .irq_ack = dio48e_irq_ack,
+       .irq_mask = dio48e_irq_mask,
+       .irq_unmask = dio48e_irq_unmask,
+-      .irq_set_type = dio48e_irq_set_type
++      .irq_set_type = dio48e_irq_set_type,
++      .flags = IRQCHIP_IMMUTABLE,
++      GPIOCHIP_IRQ_RESOURCE_HELPERS,
+ };
+ 
+ static irqreturn_t dio48e_irq_handler(int irq, void *dev_id)
+@@ -414,7 +418,7 @@ static int dio48e_probe(struct device *dev, unsigned int 
id)
+       dio48egpio->chip.set_multiple = dio48e_gpio_set_multiple;
+ 
+       girq = &dio48egpio->chip.irq;
+-      girq->chip = &dio48e_irqchip;
++      gpio_irq_chip_set_chip(girq, &dio48e_irqchip);
+       /* This will let us handle the parent IRQ in the driver */
+       girq->parent_handler = NULL;
+       girq->num_parents = 0;
+diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
+index 45f7ad8573e19..a8b7c8eafac5a 100644
+--- a/drivers/gpio/gpio-104-idio-16.c
++++ b/drivers/gpio/gpio-104-idio-16.c
+@@ -150,10 +150,11 @@ static void idio_16_irq_mask(struct irq_data *data)
+ {
+       struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+       struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
+-      const unsigned long mask = BIT(irqd_to_hwirq(data));
++      const unsigned long offset = irqd_to_hwirq(data);
+       unsigned long flags;
+ 
+-      idio16gpio->irq_mask &= ~mask;
++      idio16gpio->irq_mask &= ~BIT(offset);
++      gpiochip_disable_irq(chip, offset);
+ 
+       if (!idio16gpio->irq_mask) {
+               raw_spin_lock_irqsave(&idio16gpio->lock, flags);
+@@ -168,11 +169,12 @@ static void idio_16_irq_unmask(struct irq_data *data)
+ {
+       struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+       struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
+-      const unsigned long mask = BIT(irqd_to_hwirq(data));
++      const unsigned long offset = irqd_to_hwirq(data);
+       const unsigned long prev_irq_mask = idio16gpio->irq_mask;
+       unsigned long flags;
+ 
+-      idio16gpio->irq_mask |= mask;
++      gpiochip_enable_irq(chip, offset);
++      idio16gpio->irq_mask |= BIT(offset);
+ 
+       if (!prev_irq_mask) {
+               raw_spin_lock_irqsave(&idio16gpio->lock, flags);
+@@ -193,12 +195,14 @@ static int idio_16_irq_set_type(struct irq_data *data, 
unsigned int flow_type)
+       return 0;
+ }
+ 
+-static struct irq_chip idio_16_irqchip = {
++static const struct irq_chip idio_16_irqchip = {
+       .name = "104-idio-16",
+       .irq_ack = idio_16_irq_ack,
+       .irq_mask = idio_16_irq_mask,
+       .irq_unmask = idio_16_irq_unmask,
+-      .irq_set_type = idio_16_irq_set_type
++      .irq_set_type = idio_16_irq_set_type,
++      .flags = IRQCHIP_IMMUTABLE,
++      GPIOCHIP_IRQ_RESOURCE_HELPERS,
+ };
+ 
+ static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
+@@ -275,7 +279,7 @@ static int idio_16_probe(struct device *dev, unsigned int 
id)
+       idio16gpio->out_state = 0xFFFF;
+ 
+       girq = &idio16gpio->chip.irq;
+-      girq->chip = &idio_16_irqchip;
++      gpio_irq_chip_set_chip(girq, &idio_16_irqchip);
+       /* This will let us handle the parent IRQ in the driver */
+       girq->parent_handler = NULL;
+       girq->num_parents = 0;
+diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
+index 8943cea927642..a2e505a7545cd 100644
+--- a/drivers/gpio/gpio-mockup.c
++++ b/drivers/gpio/gpio-mockup.c
+@@ -373,6 +373,13 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
+       }
+ }
+ 
++static void gpio_mockup_debugfs_cleanup(void *data)
++{
++      struct gpio_mockup_chip *chip = data;
++
++      debugfs_remove_recursive(chip->dbg_dir);
++}
++
+ static void gpio_mockup_dispose_mappings(void *data)
+ {
+       struct gpio_mockup_chip *chip = data;
+@@ -455,7 +462,7 @@ static int gpio_mockup_probe(struct platform_device *pdev)
+ 
+       gpio_mockup_debugfs_setup(dev, chip);
+ 
+-      return 0;
++      return devm_add_action_or_reset(dev, gpio_mockup_debugfs_cleanup, chip);
+ }
+ 
+ static const struct of_device_id gpio_mockup_of_match[] = {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+index ecada5eadfe35..e325150879df7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+@@ -66,10 +66,15 @@ static bool is_fru_eeprom_supported(struct amdgpu_device 
*adev)
+               return true;
+       case CHIP_SIENNA_CICHLID:
+               if (strnstr(atom_ctx->vbios_version, "D603",
++                  sizeof(atom_ctx->vbios_version))) {
++                      if (strnstr(atom_ctx->vbios_version, "D603GLXE",
+                           sizeof(atom_ctx->vbios_version)))
+-                      return true;
+-              else
++                              return false;
++                      else
++                              return true;
++              } else {
+                       return false;
++              }
+       default:
+               return false;
+       }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 2b00f8fe15a89..b19bf0c3f3737 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -2372,7 +2372,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
+ static bool fw_load_skip_check(struct psp_context *psp,
+                              struct amdgpu_firmware_info *ucode)
+ {
+-      if (!ucode->fw)
++      if (!ucode->fw || !ucode->ucode_size)
+               return true;
+ 
+       if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index 9cde13b07dd26..d9a5209aa8433 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -382,11 +382,27 @@ static int smu_v13_0_7_append_powerplay_table(struct 
smu_context *smu)
+       return 0;
+ }
+ 
++static int smu_v13_0_7_get_pptable_from_pmfw(struct smu_context *smu,
++                                           void **table,
++                                           uint32_t *size)
++{
++      struct smu_table_context *smu_table = &smu->smu_table;
++      void *combo_pptable = smu_table->combo_pptable;
++      int ret = 0;
++
++      ret = smu_cmn_get_combo_pptable(smu);
++      if (ret)
++              return ret;
++
++      *table = combo_pptable;
++      *size = sizeof(struct smu_13_0_7_powerplay_table);
++
++      return 0;
++}
+ 
+ static int smu_v13_0_7_setup_pptable(struct smu_context *smu)
+ {
+       struct smu_table_context *smu_table = &smu->smu_table;
+-      void *combo_pptable = smu_table->combo_pptable;
+       struct amdgpu_device *adev = smu->adev;
+       int ret = 0;
+ 
+@@ -395,18 +411,11 @@ static int smu_v13_0_7_setup_pptable(struct smu_context 
*smu)
+        * be used directly by driver. To get the raw pptable, we need to
+        * rely on the combo pptable(and its revelant SMU message).
+        */
+-      if (adev->scpm_enabled) {
+-              ret = smu_cmn_get_combo_pptable(smu);
+-              if (ret)
+-                      return ret;
+-
+-              smu->smu_table.power_play_table = combo_pptable;
+-              smu->smu_table.power_play_table_size = sizeof(struct 
smu_13_0_7_powerplay_table);
+-      } else {
+-              ret = smu_v13_0_setup_pptable(smu);
+-              if (ret)
+-                      return ret;
+-      }
++      ret = smu_v13_0_7_get_pptable_from_pmfw(smu,
++                                              &smu_table->power_play_table,
++                                              
&smu_table->power_play_table_size);
++      if (ret)
++              return ret;
+ 
+       ret = smu_v13_0_7_store_powerplay_table(smu);
+       if (ret)
+diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
+index a92ffde53f0b3..db2f847c8535f 100644
+--- a/drivers/gpu/drm/msm/msm_rd.c
++++ b/drivers/gpu/drm/msm/msm_rd.c
+@@ -196,6 +196,9 @@ static int rd_open(struct inode *inode, struct file *file)
+       file->private_data = rd;
+       rd->open = true;
+ 
++      /* Reset fifo to clear any previously unread data: */
++      rd->fifo.head = rd->fifo.tail = 0;
++
+       /* the parsing tools need to know gpu-id to know which
+        * register database to load.
+        *
+diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h 
b/drivers/hid/intel-ish-hid/ishtp-hid.h
+index 6a5cc11aefd89..35dddc5015b37 100644
+--- a/drivers/hid/intel-ish-hid/ishtp-hid.h
++++ b/drivers/hid/intel-ish-hid/ishtp-hid.h
+@@ -105,7 +105,7 @@ struct report_list {
+  * @multi_packet_cnt: Count of fragmented packet count
+  *
+  * This structure is used to store completion flags and per client data like
+- * like report description, number of HID devices etc.
++ * report description, number of HID devices etc.
+  */
+ struct ishtp_cl_data {
+       /* completion flags */
+diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c 
b/drivers/hid/intel-ish-hid/ishtp/client.c
+index 405e0d5212cc8..df0a825694f52 100644
+--- a/drivers/hid/intel-ish-hid/ishtp/client.c
++++ b/drivers/hid/intel-ish-hid/ishtp/client.c
+@@ -626,13 +626,14 @@ static void ishtp_cl_read_complete(struct ishtp_cl_rb 
*rb)
+ }
+ 
+ /**
+- * ipc_tx_callback() - IPC tx callback function
++ * ipc_tx_send() - IPC tx send function
+  * @prm: Pointer to client device instance
+  *
+- * Send message over IPC either first time or on callback on previous message
+- * completion
++ * Send message over IPC. Message will be split into fragments
++ * if message size is bigger than IPC FIFO size, and all
++ * fragments will be sent one by one.
+  */
+-static void ipc_tx_callback(void *prm)
++static void ipc_tx_send(void *prm)
+ {
+       struct ishtp_cl *cl = prm;
+       struct ishtp_cl_tx_ring *cl_msg;
+@@ -677,32 +678,41 @@ static void ipc_tx_callback(void *prm)
+                           list);
+       rem = cl_msg->send_buf.size - cl->tx_offs;
+ 
+-      ishtp_hdr.host_addr = cl->host_client_id;
+-      ishtp_hdr.fw_addr = cl->fw_client_id;
+-      ishtp_hdr.reserved = 0;
+-      pmsg = cl_msg->send_buf.data + cl->tx_offs;
++      while (rem > 0) {
++              ishtp_hdr.host_addr = cl->host_client_id;
++              ishtp_hdr.fw_addr = cl->fw_client_id;
++              ishtp_hdr.reserved = 0;
++              pmsg = cl_msg->send_buf.data + cl->tx_offs;
++
++              if (rem <= dev->mtu) {
++                      /* Last fragment or only one packet */
++                      ishtp_hdr.length = rem;
++                      ishtp_hdr.msg_complete = 1;
++                      /* Submit to IPC queue with no callback */
++                      ishtp_write_message(dev, &ishtp_hdr, pmsg);
++                      cl->tx_offs = 0;
++                      cl->sending = 0;
+ 
+-      if (rem <= dev->mtu) {
+-              ishtp_hdr.length = rem;
+-              ishtp_hdr.msg_complete = 1;
+-              cl->sending = 0;
+-              list_del_init(&cl_msg->list);   /* Must be before write */
+-              spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+-              /* Submit to IPC queue with no callback */
+-              ishtp_write_message(dev, &ishtp_hdr, pmsg);
+-              spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+-              list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+-              ++cl->tx_ring_free_size;
+-              spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+-                      tx_free_flags);
+-      } else {
+-              /* Send IPC fragment */
+-              spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+-              cl->tx_offs += dev->mtu;
+-              ishtp_hdr.length = dev->mtu;
+-              ishtp_hdr.msg_complete = 0;
+-              ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl);
++                      break;
++              } else {
++                      /* Send ipc fragment */
++                      ishtp_hdr.length = dev->mtu;
++                      ishtp_hdr.msg_complete = 0;
++                      /* All fregments submitted to IPC queue with no 
callback */
++                      ishtp_write_message(dev, &ishtp_hdr, pmsg);
++                      cl->tx_offs += dev->mtu;
++                      rem = cl_msg->send_buf.size - cl->tx_offs;
++              }
+       }
++
++      list_del_init(&cl_msg->list);
++      spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
++
++      spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
++      list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
++      ++cl->tx_ring_free_size;
++      spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
++              tx_free_flags);
+ }
+ 
+ /**
+@@ -720,7 +730,7 @@ static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
+               return;
+ 
+       cl->tx_offs = 0;
+-      ipc_tx_callback(cl);
++      ipc_tx_send(cl);
+       ++cl->send_msg_cnt_ipc;
+ }
+ 
+diff --git a/drivers/infiniband/hw/irdma/uk.c 
b/drivers/infiniband/hw/irdma/uk.c
+index d003ad864ee44..a6e5d350a94ce 100644
+--- a/drivers/infiniband/hw/irdma/uk.c
++++ b/drivers/infiniband/hw/irdma/uk.c
+@@ -497,7 +497,8 @@ int irdma_uk_send(struct irdma_qp_uk *qp, struct 
irdma_post_sq_info *info,
+                             FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
+               i = 0;
+       } else {
+-              qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->sg_list,
++              qp->wqe_ops.iw_set_fragment(wqe, 0,
++                                          frag_cnt ? op_info->sg_list : NULL,
+                                           qp->swqe_polarity);
+               i = 1;
+       }
+diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
+index 08371a80fdc26..be189e0525de6 100644
+--- a/drivers/infiniband/hw/mlx5/cq.c
++++ b/drivers/infiniband/hw/mlx5/cq.c
+@@ -523,6 +523,10 @@ repoll:
+                           "Requestor" : "Responder", cq->mcq.cqn);
+               mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
+                           err_cqe->syndrome, err_cqe->vendor_err_synd);
++              if (wc->status != IB_WC_WR_FLUSH_ERR &&
++                  (*cur_qp)->type == MLX5_IB_QPT_REG_UMR)
++                      dev->umrc.state = MLX5_UMR_STATE_RECOVER;
++
+               if (opcode == MLX5_CQE_REQ_ERR) {
+                       wq = &(*cur_qp)->sq;
+                       wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
+diff --git a/drivers/infiniband/hw/mlx5/main.c 
b/drivers/infiniband/hw/mlx5/main.c
+index 63c89a72cc352..bb13164124fdb 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -4336,7 +4336,7 @@ static int mlx5r_probe(struct auxiliary_device *adev,
+       dev->mdev = mdev;
+       dev->num_ports = num_ports;
+ 
+-      if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_init_enabled(mdev))
++      if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev))
+               profile = &raw_eth_profile;
+       else
+               profile = &pf_profile;
+diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h 
b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+index 998b67509a533..c2cca032a6ed4 100644
+--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+@@ -717,13 +717,24 @@ struct mlx5_ib_umr_context {
+       struct completion       done;
+ };
+ 
++enum {
++      MLX5_UMR_STATE_UNINIT,
++      MLX5_UMR_STATE_ACTIVE,
++      MLX5_UMR_STATE_RECOVER,
++      MLX5_UMR_STATE_ERR,
++};
++
+ struct umr_common {
+       struct ib_pd    *pd;
+       struct ib_cq    *cq;
+       struct ib_qp    *qp;
+-      /* control access to UMR QP
++      /* Protects from UMR QP overflow
+        */
+       struct semaphore        sem;
++      /* Protects from using UMR while the UMR is not active
++       */
++      struct mutex lock;
++      unsigned int state;
+ };
+ 
+ struct mlx5_cache_ent {
+diff --git a/drivers/infiniband/hw/mlx5/umr.c 
b/drivers/infiniband/hw/mlx5/umr.c
+index 3a48364c09181..d5105b5c9979b 100644
+--- a/drivers/infiniband/hw/mlx5/umr.c
++++ b/drivers/infiniband/hw/mlx5/umr.c
+@@ -176,6 +176,8 @@ int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev)
+       dev->umrc.pd = pd;
+ 
+       sema_init(&dev->umrc.sem, MAX_UMR_WR);
++      mutex_init(&dev->umrc.lock);
++      dev->umrc.state = MLX5_UMR_STATE_ACTIVE;
+ 
+       return 0;
+ 
+@@ -190,11 +192,38 @@ destroy_pd:
+ 
+ void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev)
+ {
++      if (dev->umrc.state == MLX5_UMR_STATE_UNINIT)
++              return;
+       ib_destroy_qp(dev->umrc.qp);
+       ib_free_cq(dev->umrc.cq);
+       ib_dealloc_pd(dev->umrc.pd);
+ }
+ 
++static int mlx5r_umr_recover(struct mlx5_ib_dev *dev)
++{
++      struct umr_common *umrc = &dev->umrc;
++      struct ib_qp_attr attr;
++      int err;
++
++      attr.qp_state = IB_QPS_RESET;
++      err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE);
++      if (err) {
++              mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
++              goto err;
++      }
++
++      err = mlx5r_umr_qp_rst2rts(dev, umrc->qp);
++      if (err)
++              goto err;
++
++      umrc->state = MLX5_UMR_STATE_ACTIVE;
++      return 0;
++
++err:
++      umrc->state = MLX5_UMR_STATE_ERR;
++      return err;
++}
++
+ static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe 
*cqe,
+                              struct mlx5r_umr_wqe *wqe, bool with_data)
+ {
+@@ -231,7 +260,7 @@ static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 
mkey, struct ib_cqe *cqe,
+ 
+       id.ib_cqe = cqe;
+       mlx5r_finish_wqe(qp, ctrl, seg, size, cur_edge, idx, id.wr_id, 0,
+-                       MLX5_FENCE_MODE_NONE, MLX5_OPCODE_UMR);
++                       MLX5_FENCE_MODE_INITIATOR_SMALL, MLX5_OPCODE_UMR);
+ 
+       mlx5r_ring_db(qp, 1, ctrl);
+ 
+@@ -270,17 +299,49 @@ static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev 
*dev, u32 mkey,
+       mlx5r_umr_init_context(&umr_context);
+ 
+       down(&umrc->sem);
+-      err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe,
+-                                with_data);
+-      if (err)
+-              mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
+-      else {
+-              wait_for_completion(&umr_context.done);
+-              if (umr_context.status != IB_WC_SUCCESS) {
+-                      mlx5_ib_warn(dev, "reg umr failed (%u)\n",
+-                                   umr_context.status);
++      while (true) {
++              mutex_lock(&umrc->lock);
++              if (umrc->state == MLX5_UMR_STATE_ERR) {
++                      mutex_unlock(&umrc->lock);
+                       err = -EFAULT;
++                      break;
++              }
++
++              if (umrc->state == MLX5_UMR_STATE_RECOVER) {
++                      mutex_unlock(&umrc->lock);
++                      usleep_range(3000, 5000);
++                      continue;
++              }
++
++              err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe,
++                                        with_data);
++              mutex_unlock(&umrc->lock);
++              if (err) {
++                      mlx5_ib_warn(dev, "UMR post send failed, err %d\n",
++                                   err);
++                      break;
+               }
++
++              wait_for_completion(&umr_context.done);
++
++              if (umr_context.status == IB_WC_SUCCESS)
++                      break;
++
++              if (umr_context.status == IB_WC_WR_FLUSH_ERR)
++                      continue;
++
++              WARN_ON_ONCE(1);
++              mlx5_ib_warn(dev,
++                      "reg umr failed (%u). Trying to recover and resubmit 
the flushed WQEs\n",
++                      umr_context.status);
++              mutex_lock(&umrc->lock);
++              err = mlx5r_umr_recover(dev);
++              mutex_unlock(&umrc->lock);
++              if (err)
++                      mlx5_ib_warn(dev, "couldn't recover UMR, err %d\n",
++                                   err);
++              err = -EFAULT;
++              break;
+       }
+       up(&umrc->sem);
+       return err;
+diff --git a/drivers/input/joystick/iforce/iforce-main.c 
b/drivers/input/joystick/iforce/iforce-main.c
+index b2a68bc9f0b4d..b86de1312512b 100644
+--- a/drivers/input/joystick/iforce/iforce-main.c
++++ b/drivers/input/joystick/iforce/iforce-main.c
+@@ -50,6 +50,7 @@ static struct iforce_device iforce_device[] = {
+       { 0x046d, 0xc291, "Logitech WingMan Formula Force",             
btn_wheel, abs_wheel, ff_iforce },
+       { 0x05ef, 0x020a, "AVB Top Shot Pegasus",                       
btn_joystick_avb, abs_avb_pegasus, ff_iforce },
+       { 0x05ef, 0x8884, "AVB Mag Turbo Force",                        
btn_wheel, abs_wheel, ff_iforce },
++      { 0x05ef, 0x8886, "Boeder Force Feedback Wheel",                
btn_wheel, abs_wheel, ff_iforce },
+       { 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel",   
btn_wheel, abs_wheel, ff_iforce }, //?
+       { 0x061c, 0xc0a4, "ACT LABS Force RS",                          
btn_wheel, abs_wheel, ff_iforce }, //?
+       { 0x061c, 0xc084, "ACT LABS Force RS",                          
btn_wheel, abs_wheel, ff_iforce },
+diff --git a/drivers/input/touchscreen/goodix.c 
b/drivers/input/touchscreen/goodix.c
+index aa45a9fee6a01..3020ddc1ca48b 100644
+--- a/drivers/input/touchscreen/goodix.c
++++ b/drivers/input/touchscreen/goodix.c
+@@ -95,6 +95,7 @@ static const struct goodix_chip_data gt9x_chip_data = {
+ 
+ static const struct goodix_chip_id goodix_chip_ids[] = {
+       { .id = "1151", .data = &gt1x_chip_data },
++      { .id = "1158", .data = &gt1x_chip_data },
+       { .id = "5663", .data = &gt1x_chip_data },
+       { .id = "5688", .data = &gt1x_chip_data },
+       { .id = "917S", .data = &gt1x_chip_data },
+@@ -1514,6 +1515,7 @@ MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
+ #ifdef CONFIG_OF
+ static const struct of_device_id goodix_of_match[] = {
+       { .compatible = "goodix,gt1151" },
++      { .compatible = "goodix,gt1158" },
+       { .compatible = "goodix,gt5663" },
+       { .compatible = "goodix,gt5688" },
+       { .compatible = "goodix,gt911" },
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 40ac3a78d90ef..c0464959cbcdb 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -168,38 +168,6 @@ static phys_addr_t root_entry_uctp(struct root_entry *re)
+       return re->hi & VTD_PAGE_MASK;
+ }
+ 
+-static inline void context_clear_pasid_enable(struct context_entry *context)
+-{
+-      context->lo &= ~(1ULL << 11);
+-}
+-
+-static inline bool context_pasid_enabled(struct context_entry *context)
+-{
+-      return !!(context->lo & (1ULL << 11));
+-}
+-
+-static inline void context_set_copied(struct context_entry *context)
+-{
+-      context->hi |= (1ull << 3);
+-}
+-
+-static inline bool context_copied(struct context_entry *context)
+-{
+-      return !!(context->hi & (1ULL << 3));
+-}
+-
+-static inline bool __context_present(struct context_entry *context)
+-{
+-      return (context->lo & 1);
+-}
+-
+-bool context_present(struct context_entry *context)
+-{
+-      return context_pasid_enabled(context) ?
+-           __context_present(context) :
+-           __context_present(context) && !context_copied(context);
+-}
+-
+ static inline void context_set_present(struct context_entry *context)
+ {
+       context->lo |= 1;
+@@ -247,6 +215,26 @@ static inline void context_clear_entry(struct 
context_entry *context)
+       context->hi = 0;
+ }
+ 
++static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
++{
++      if (!iommu->copied_tables)
++              return false;
++
++      return test_bit(((long)bus << 8) | devfn, iommu->copied_tables);
++}
++
++static inline void
++set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
++{
++      set_bit(((long)bus << 8) | devfn, iommu->copied_tables);
++}
++
++static inline void
++clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
++{
++      clear_bit(((long)bus << 8) | devfn, iommu->copied_tables);
++}
++
+ /*
+  * This domain is a statically identity mapping domain.
+  *    1. This domain creats a static 1:1 mapping to all usable memory.
+@@ -644,6 +632,13 @@ struct context_entry *iommu_context_addr(struct 
intel_iommu *iommu, u8 bus,
+       struct context_entry *context;
+       u64 *entry;
+ 
++      /*
++       * Except that the caller requested to allocate a new entry,
++       * returning a copied context entry makes no sense.
++       */
++      if (!alloc && context_copied(iommu, bus, devfn))
++              return NULL;
++
+       entry = &root->lo;
+       if (sm_supported(iommu)) {
+               if (devfn >= 0x80) {
+@@ -1770,6 +1765,11 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
+               iommu->domain_ids = NULL;
+       }
+ 
++      if (iommu->copied_tables) {
++              bitmap_free(iommu->copied_tables);
++              iommu->copied_tables = NULL;
++      }
++
+       g_iommus[iommu->seq_id] = NULL;
+ 
+       /* free context mapping */
+@@ -1978,7 +1978,7 @@ static int domain_context_mapping_one(struct dmar_domain 
*domain,
+               goto out_unlock;
+ 
+       ret = 0;
+-      if (context_present(context))
++      if (context_present(context) && !context_copied(iommu, bus, devfn))
+               goto out_unlock;
+ 
+       /*
+@@ -1990,7 +1990,7 @@ static int domain_context_mapping_one(struct dmar_domain 
*domain,
+        * in-flight DMA will exist, and we don't need to worry anymore
+        * hereafter.
+        */
+-      if (context_copied(context)) {
++      if (context_copied(iommu, bus, devfn)) {
+               u16 did_old = context_domain_id(context);
+ 
+               if (did_old < cap_ndoms(iommu->cap)) {
+@@ -2001,6 +2001,8 @@ static int domain_context_mapping_one(struct dmar_domain 
*domain,
+                       iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
+                                                DMA_TLB_DSI_FLUSH);
+               }
++
++              clear_context_copied(iommu, bus, devfn);
+       }
+ 
+       context_clear_entry(context);
+@@ -2783,32 +2785,14 @@ static int copy_context_table(struct intel_iommu 
*iommu,
+               /* Now copy the context entry */
+               memcpy(&ce, old_ce + idx, sizeof(ce));
+ 
+-              if (!__context_present(&ce))
++              if (!context_present(&ce))
+                       continue;
+ 
+               did = context_domain_id(&ce);
+               if (did >= 0 && did < cap_ndoms(iommu->cap))
+                       set_bit(did, iommu->domain_ids);
+ 
+-              /*
+-               * We need a marker for copied context entries. This
+-               * marker needs to work for the old format as well as
+-               * for extended context entries.
+-               *
+-               * Bit 67 of the context entry is used. In the old
+-               * format this bit is available to software, in the
+-               * extended format it is the PGE bit, but PGE is ignored
+-               * by HW if PASIDs are disabled (and thus still
+-               * available).
+-               *
+-               * So disable PASIDs first and then mark the entry
+-               * copied. This means that we don't copy PASID
+-               * translations from the old kernel, but this is fine as
+-               * faults there are not fatal.
+-               */
+-              context_clear_pasid_enable(&ce);
+-              context_set_copied(&ce);
+-
++              set_context_copied(iommu, bus, devfn);
+               new_ce[idx] = ce;
+       }
+ 
+@@ -2835,8 +2819,8 @@ static int copy_translation_tables(struct intel_iommu 
*iommu)
+       bool new_ext, ext;
+ 
+       rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
+-      ext        = !!(rtaddr_reg & DMA_RTADDR_RTT);
+-      new_ext    = !!ecap_ecs(iommu->ecap);
++      ext        = !!(rtaddr_reg & DMA_RTADDR_SMT);
++      new_ext    = !!sm_supported(iommu);
+ 
+       /*
+        * The RTT bit can only be changed when translation is disabled,
+@@ -2847,6 +2831,10 @@ static int copy_translation_tables(struct intel_iommu 
*iommu)
+       if (new_ext != ext)
+               return -EINVAL;
+ 
++      iommu->copied_tables = bitmap_zalloc(BIT_ULL(16), GFP_KERNEL);
++      if (!iommu->copied_tables)
++              return -ENOMEM;
++
+       old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
+       if (!old_rt_phys)
+               return -EINVAL;
+diff --git a/drivers/net/ethernet/broadcom/tg3.c 
b/drivers/net/ethernet/broadcom/tg3.c
+index c28f8cc00d1cf..a9cc85882b315 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -18076,16 +18076,20 @@ static void tg3_shutdown(struct pci_dev *pdev)
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct tg3 *tp = netdev_priv(dev);
+ 
++      tg3_reset_task_cancel(tp);
++
+       rtnl_lock();
++
+       netif_device_detach(dev);
+ 
+       if (netif_running(dev))
+               dev_close(dev);
+ 
+-      if (system_state == SYSTEM_POWER_OFF)
+-              tg3_power_down(tp);
++      tg3_power_down(tp);
+ 
+       rtnl_unlock();
++
++      pci_disable_device(pdev);
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c 
b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+index cfb8bedba5124..079fa44ada71e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+@@ -289,6 +289,10 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t 
*sw_owner_id)
+                                      sw_owner_id[i]);
+       }
+ 
++      if (MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) &&
++          dev->priv.sw_vhca_id > 0)
++              MLX5_SET(init_hca_in, in, sw_vhca_id, dev->priv.sw_vhca_id);
++
+       return mlx5_cmd_exec_in(dev, init_hca, in);
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c 
b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 616207c3b187a..6c8bb74bd8fc6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -90,6 +90,8 @@ module_param_named(prof_sel, prof_sel, uint, 0444);
+ MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
+ 
+ static u32 sw_owner_id[4];
++#define MAX_SW_VHCA_ID (BIT(__mlx5_bit_sz(cmd_hca_cap_2, sw_vhca_id)) - 1)
++static DEFINE_IDA(sw_vhca_ida);
+ 
+ enum {
+       MLX5_ATOMIC_REQ_MODE_BE = 0x0,
+@@ -499,6 +501,49 @@ static int max_uc_list_get_devlink_param(struct 
mlx5_core_dev *dev)
+       return err;
+ }
+ 
++bool mlx5_is_roce_on(struct mlx5_core_dev *dev)
++{
++      struct devlink *devlink = priv_to_devlink(dev);
++      union devlink_param_value val;
++      int err;
++
++      err = devlink_param_driverinit_value_get(devlink,
++                                               
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
++                                               &val);
++
++      if (!err)
++              return val.vbool;
++
++      mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err);
++      return MLX5_CAP_GEN(dev, roce);
++}
++EXPORT_SYMBOL(mlx5_is_roce_on);
++
++static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx)
++{
++      void *set_hca_cap;
++      int err;
++
++      if (!MLX5_CAP_GEN_MAX(dev, hca_cap_2))
++              return 0;
++
++      err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2);
++      if (err)
++              return err;
++
++      if (!MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) ||
++          !(dev->priv.sw_vhca_id > 0))
++              return 0;
++
++      set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
++                                 capability);
++      memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL_2]->cur,
++             MLX5_ST_SZ_BYTES(cmd_hca_cap_2));
++      MLX5_SET(cmd_hca_cap_2, set_hca_cap, sw_vhca_id_valid, 1);
++
++      return set_caps(dev, set_ctx, MLX5_CAP_GENERAL_2);
++}
++
+ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
+ {
+       struct mlx5_profile *prof = &dev->profile;
+@@ -577,7 +622,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void 
*set_ctx)
+                        MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
+ 
+       if (MLX5_CAP_GEN(dev, roce_rw_supported))
+-              MLX5_SET(cmd_hca_cap, set_hca_cap, roce, 
mlx5_is_roce_init_enabled(dev));
++              MLX5_SET(cmd_hca_cap, set_hca_cap, roce,
++                       mlx5_is_roce_on(dev));
+ 
+       max_uc_list = max_uc_list_get_devlink_param(dev);
+       if (max_uc_list > 0)
+@@ -603,7 +649,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void 
*set_ctx)
+  */
+ static bool is_roce_fw_disabled(struct mlx5_core_dev *dev)
+ {
+-      return (MLX5_CAP_GEN(dev, roce_rw_supported) && 
!mlx5_is_roce_init_enabled(dev)) ||
++      return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_on(dev)) 
||
+               (!MLX5_CAP_GEN(dev, roce_rw_supported) && !MLX5_CAP_GEN(dev, 
roce));
+ }
+ 
+@@ -669,6 +715,13 @@ static int set_hca_cap(struct mlx5_core_dev *dev)
+               goto out;
+       }
+ 
++      memset(set_ctx, 0, set_sz);
++      err = handle_hca_cap_2(dev, set_ctx);
++      if (err) {
++              mlx5_core_err(dev, "handle_hca_cap_2 failed\n");
++              goto out;
++      }
++
+ out:
+       kfree(set_ctx);
+       return err;
+@@ -1512,6 +1565,18 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int 
profile_idx)
+       if (err)
+               goto err_hca_caps;
+ 
++      /* The conjunction of sw_vhca_id with sw_owner_id will be a global
++       * unique id per function which uses mlx5_core.
++       * Those values are supplied to FW as part of the init HCA command to
++       * be used by both driver and FW when it's applicable.
++       */
++      dev->priv.sw_vhca_id = ida_alloc_range(&sw_vhca_ida, 1,
++                                             MAX_SW_VHCA_ID,
++                                             GFP_KERNEL);
++      if (dev->priv.sw_vhca_id < 0)
++              mlx5_core_err(dev, "failed to allocate sw_vhca_id, err=%d\n",
++                            dev->priv.sw_vhca_id);
++
+       return 0;
+ 
+ err_hca_caps:
+@@ -1537,6 +1602,9 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
+ {
+       struct mlx5_priv *priv = &dev->priv;
+ 
++      if (priv->sw_vhca_id > 0)
++              ida_free(&sw_vhca_ida, dev->priv.sw_vhca_id);
++
+       mlx5_hca_caps_free(dev);
+       mlx5_adev_cleanup(dev);
+       mlx5_pagealloc_cleanup(dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c 
b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+index ac020cb780727..d5c3173250309 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+@@ -1086,9 +1086,17 @@ int mlx5_nic_vport_affiliate_multiport(struct 
mlx5_core_dev *master_mdev,
+               goto free;
+ 
+       MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
+-      MLX5_SET(modify_nic_vport_context_in, in,
+-               nic_vport_context.affiliated_vhca_id,
+-               MLX5_CAP_GEN(master_mdev, vhca_id));
++      if (MLX5_CAP_GEN_2(master_mdev, sw_vhca_id_valid)) {
++              MLX5_SET(modify_nic_vport_context_in, in,
++                       nic_vport_context.vhca_id_type, VHCA_ID_TYPE_SW);
++              MLX5_SET(modify_nic_vport_context_in, in,
++                       nic_vport_context.affiliated_vhca_id,
++                       MLX5_CAP_GEN_2(master_mdev, sw_vhca_id));
++      } else {
++              MLX5_SET(modify_nic_vport_context_in, in,
++                       nic_vport_context.affiliated_vhca_id,
++                       MLX5_CAP_GEN(master_mdev, vhca_id));
++      }
+       MLX5_SET(modify_nic_vport_context_in, in,
+                nic_vport_context.affiliation_criteria,
+                MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
+diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
+index 1e1f40f628a02..c69b87d3837da 100644
+--- a/drivers/net/ieee802154/cc2520.c
++++ b/drivers/net/ieee802154/cc2520.c
+@@ -504,6 +504,7 @@ cc2520_tx(struct ieee802154_hw *hw, struct sk_buff *skb)
+               goto err_tx;
+ 
+       if (status & CC2520_STATUS_TX_UNDERFLOW) {
++              rc = -EINVAL;
+               dev_err(&priv->spi->dev, "cc2520 tx underflow exception\n");
+               goto err_tx;
+       }
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index 2de09ad5bac03..e11f70911acc1 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -777,6 +777,13 @@ static const struct usb_device_id products[] = {
+ },
+ #endif
+ 
++/* Lenovo ThinkPad OneLink+ Dock (based on Realtek RTL8153) */
++{
++      USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3054, USB_CLASS_COMM,
++                      USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
++      .driver_info = 0,
++},
++
+ /* ThinkPad USB-C Dock (based on Realtek RTL8153) */
+ {
+       USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index d142ac8fcf6e2..688905ea0a6d3 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -770,6 +770,7 @@ enum rtl8152_flags {
+       RX_EPROTO,
+ };
+ 
++#define DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK          0x3054
+ #define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2     0x3082
+ #define DEVICE_ID_THINKPAD_USB_C_DONGLE                       0x720c
+ #define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2            0xa387
+@@ -9581,6 +9582,7 @@ static bool rtl8152_supports_lenovo_macpassthru(struct 
usb_device *udev)
+ 
+       if (vendor_id == VENDOR_ID_LENOVO) {
+               switch (product_id) {
++              case DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK:
+               case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
+               case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
+               case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3:
+@@ -9828,6 +9830,7 @@ static const struct usb_device_id rtl8152_table[] = {
+       REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927),
+       REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101),
+       REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x304f),
++      REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x3054),
+       REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x3062),
+       REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x3069),
+       REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x3082),
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 73d9fcba3b1c0..9f6614f7dbeb1 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3517,6 +3517,8 @@ static const struct pci_device_id nvme_id_table[] = {
+               .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+       { PCI_DEVICE(0xc0a9, 0x540a),   /* Crucial P2 */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
++      { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
++              .driver_data = NVME_QUIRK_BOGUS_NID, },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
+               .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+       { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index dc3b4dc8fe08b..a3694a32f6d52 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -1506,6 +1506,9 @@ static void nvmet_tcp_state_change(struct sock *sk)
+               goto done;
+ 
+       switch (sk->sk_state) {
++      case TCP_FIN_WAIT2:
++      case TCP_LAST_ACK:
++              break;
+       case TCP_FIN_WAIT1:
+       case TCP_CLOSE_WAIT:
+       case TCP_CLOSE:
+diff --git a/drivers/peci/cpu.c b/drivers/peci/cpu.c
+index 68eb61c65d345..de4a7b3e5966e 100644
+--- a/drivers/peci/cpu.c
++++ b/drivers/peci/cpu.c
+@@ -188,8 +188,6 @@ static void adev_release(struct device *dev)
+ {
+       struct auxiliary_device *adev = to_auxiliary_dev(dev);
+ 
+-      auxiliary_device_uninit(adev);
+-
+       kfree(adev->name);
+       kfree(adev);
+ }
+@@ -234,6 +232,7 @@ static void unregister_adev(void *_adev)
+       struct auxiliary_device *adev = _adev;
+ 
+       auxiliary_device_delete(adev);
++      auxiliary_device_uninit(adev);
+ }
+ 
+ static int devm_adev_add(struct device *dev, int idx)
+diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
+index 513de1f54e2d7..933b96e243b84 100644
+--- a/drivers/perf/arm_pmu_platform.c
++++ b/drivers/perf/arm_pmu_platform.c
+@@ -117,7 +117,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
+ 
+       if (num_irqs == 1) {
+               int irq = platform_get_irq(pdev, 0);
+-              if (irq && irq_is_percpu_devid(irq))
++              if ((irq > 0) && irq_is_percpu_devid(irq))
+                       return pmu_parse_percpu_irq(pmu, irq);
+       }
+ 
+diff --git a/drivers/platform/surface/surface_aggregator_registry.c 
b/drivers/platform/surface/surface_aggregator_registry.c
+index ce2bd88feeaa8..08019c6ccc9ca 100644
+--- a/drivers/platform/surface/surface_aggregator_registry.c
++++ b/drivers/platform/surface/surface_aggregator_registry.c
+@@ -556,6 +556,9 @@ static const struct acpi_device_id 
ssam_platform_hub_match[] = {
+       /* Surface Laptop Go 1 */
+       { "MSHW0118", (unsigned long)ssam_node_group_slg1 },
+ 
++      /* Surface Laptop Go 2 */
++      { "MSHW0290", (unsigned long)ssam_node_group_slg1 },
++
+       /* Surface Laptop Studio */
+       { "MSHW0123", (unsigned long)ssam_node_group_sls },
+ 
+diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
+index 9c6943e401a6c..0fbcaffabbfc7 100644
+--- a/drivers/platform/x86/acer-wmi.c
++++ b/drivers/platform/x86/acer-wmi.c
+@@ -99,6 +99,7 @@ static const struct key_entry acer_wmi_keymap[] __initconst 
= {
+       {KE_KEY, 0x22, {KEY_PROG2} },    /* Arcade */
+       {KE_KEY, 0x23, {KEY_PROG3} },    /* P_Key */
+       {KE_KEY, 0x24, {KEY_PROG4} },    /* Social networking_Key */
++      {KE_KEY, 0x27, {KEY_HELP} },
+       {KE_KEY, 0x29, {KEY_PROG3} },    /* P_Key for TM8372 */
+       {KE_IGNORE, 0x41, {KEY_MUTE} },
+       {KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} },
+@@ -112,7 +113,13 @@ static const struct key_entry acer_wmi_keymap[] 
__initconst = {
+       {KE_IGNORE, 0x48, {KEY_VOLUMEUP} },
+       {KE_IGNORE, 0x49, {KEY_VOLUMEDOWN} },
+       {KE_IGNORE, 0x4a, {KEY_VOLUMEDOWN} },
+-      {KE_IGNORE, 0x61, {KEY_SWITCHVIDEOMODE} },
++      /*
++       * 0x61 is KEY_SWITCHVIDEOMODE. Usually this is a duplicate input event
++       * with the "Video Bus" input device events. But sometimes it is not
++       * a dup. Map it to KEY_UNKNOWN instead of using KE_IGNORE so that
++       * udev/hwdb can override it on systems where it is not a dup.
++       */
++      {KE_KEY, 0x61, {KEY_UNKNOWN} },
+       {KE_IGNORE, 0x62, {KEY_BRIGHTNESSUP} },
+       {KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} },
+       {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index 62ce198a34631..a0f31624aee97 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -107,7 +107,7 @@ module_param(fnlock_default, bool, 0444);
+ #define WMI_EVENT_MASK                        0xFFFF
+ 
+ #define FAN_CURVE_POINTS              8
+-#define FAN_CURVE_BUF_LEN             (FAN_CURVE_POINTS * 2)
++#define FAN_CURVE_BUF_LEN             32
+ #define FAN_CURVE_DEV_CPU             0x00
+ #define FAN_CURVE_DEV_GPU             0x01
+ /* Mask to determine if setting temperature or percentage */
+@@ -2208,8 +2208,10 @@ static int fan_curve_get_factory_default(struct 
asus_wmi *asus, u32 fan_dev)
+       curves = &asus->custom_fan_curves[fan_idx];
+       err = asus_wmi_evaluate_method_buf(asus->dsts_id, fan_dev, mode, buf,
+                                          FAN_CURVE_BUF_LEN);
+-      if (err)
++      if (err) {
++              pr_warn("%s (0x%08x) failed: %d\n", __func__, fan_dev, err);
+               return err;
++      }
+ 
+       fan_curve_copy_from_buf(curves, buf);
+       curves->device_id = fan_dev;
+@@ -2227,9 +2229,6 @@ static int fan_curve_check_present(struct asus_wmi 
*asus, bool *available,
+ 
+       err = fan_curve_get_factory_default(asus, fan_dev);
+       if (err) {
+-              pr_debug("fan_curve_get_factory_default(0x%08x) failed: %d\n",
+-                       fan_dev, err);
+-              /* Don't cause probe to fail on devices without fan-curves */
+               return 0;
+       }
+ 
+diff --git a/drivers/usb/storage/unusual_uas.h 
b/drivers/usb/storage/unusual_uas.h
+index 4051c8cd0cd8a..23ab3b048d9be 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -62,6 +62,13 @@ UNUSUAL_DEV(0x0984, 0x0301, 0x0128, 0x0128,
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_IGNORE_UAS),
+ 
++/* Reported-by: Tom Hu <[email protected]> */
++UNUSUAL_DEV(0x0b05, 0x1932, 0x0000, 0x9999,
++              "ASUS",
++              "External HDD",
++              USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++              US_FL_IGNORE_UAS),
++
+ /* Reported-by: David Webb <[email protected]> */
+ UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
+               "Seagate",
+diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
+index 5fcf89faa31ab..d72626d71258f 100644
+--- a/include/linux/intel-iommu.h
++++ b/include/linux/intel-iommu.h
+@@ -196,7 +196,6 @@
+ #define ecap_dis(e)           (((e) >> 27) & 0x1)
+ #define ecap_nest(e)          (((e) >> 26) & 0x1)
+ #define ecap_mts(e)           (((e) >> 25) & 0x1)
+-#define ecap_ecs(e)           (((e) >> 24) & 0x1)
+ #define ecap_iotlb_offset(e)  ((((e) >> 8) & 0x3ff) * 16)
+ #define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
+ #define ecap_coherent(e)      ((e) & 0x1)
+@@ -264,7 +263,6 @@
+ #define DMA_GSTS_CFIS (((u32)1) << 23)
+ 
+ /* DMA_RTADDR_REG */
+-#define DMA_RTADDR_RTT (((u64)1) << 11)
+ #define DMA_RTADDR_SMT (((u64)1) << 10)
+ 
+ /* CCMD_REG */
+@@ -579,6 +577,7 @@ struct intel_iommu {
+ 
+ #ifdef CONFIG_INTEL_IOMMU
+       unsigned long   *domain_ids; /* bitmap of domains */
++      unsigned long   *copied_tables; /* bitmap of copied tables */
+       spinlock_t      lock; /* protect context, domain ids */
+       struct root_entry *root_entry; /* virtual address */
+ 
+@@ -692,6 +691,11 @@ static inline int nr_pte_to_next_page(struct dma_pte *pte)
+               (struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - 
pte;
+ }
+ 
++static inline bool context_present(struct context_entry *context)
++{
++      return (context->lo & 1);
++}
++
+ extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev 
*dev);
+ 
+ extern int dmar_enable_qi(struct intel_iommu *iommu);
+@@ -776,7 +780,6 @@ static inline void intel_iommu_debugfs_init(void) {}
+ #endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
+ 
+ extern const struct attribute_group *intel_iommu_groups[];
+-bool context_present(struct context_entry *context);
+ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
+                                        u8 devfn, int alloc);
+ 
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index b0b4ac92354a2..b3ea245faa515 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -606,6 +606,7 @@ struct mlx5_priv {
+       spinlock_t              ctx_lock;
+       struct mlx5_adev       **adev;
+       int                     adev_idx;
++      int                     sw_vhca_id;
+       struct mlx5_events      *events;
+ 
+       struct mlx5_flow_steering *steering;
+@@ -1274,16 +1275,17 @@ enum {
+       MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
+ };
+ 
+-static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev)
++bool mlx5_is_roce_on(struct mlx5_core_dev *dev);
++
++static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev)
+ {
+-      struct devlink *devlink = priv_to_devlink(dev);
+-      union devlink_param_value val;
+-      int err;
+-
+-      err = devlink_param_driverinit_value_get(devlink,
+-                                               
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+-                                               &val);
+-      return err ? MLX5_CAP_GEN(dev, roce) : val.vbool;
++      if (MLX5_CAP_GEN(dev, roce_rw_supported))
++              return MLX5_CAP_GEN(dev, roce);
++
++      /* If RoCE cap is read-only in FW, get RoCE state from devlink
++       * in order to support RoCE enable/disable feature
++       */
++      return mlx5_is_roce_on(dev);
+ }
+ 
+ #endif /* MLX5_DRIVER_H */
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index fd7d083a34d33..6d57e5ec9718d 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -1804,7 +1804,14 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
+       u8         max_reformat_remove_size[0x8];
+       u8         max_reformat_remove_offset[0x8];
+ 
+-      u8         reserved_at_c0[0x740];
++      u8         reserved_at_c0[0x160];
++
++      u8         reserved_at_220[0x1];
++      u8         sw_vhca_id_valid[0x1];
++      u8         sw_vhca_id[0xe];
++      u8         reserved_at_230[0x10];
++
++      u8         reserved_at_240[0x5c0];
+ };
+ 
+ enum mlx5_ifc_flow_destination_type {
+@@ -3715,6 +3722,11 @@ struct mlx5_ifc_rmpc_bits {
+       struct mlx5_ifc_wq_bits wq;
+ };
+ 
++enum {
++      VHCA_ID_TYPE_HW = 0,
++      VHCA_ID_TYPE_SW = 1,
++};
++
+ struct mlx5_ifc_nic_vport_context_bits {
+       u8         reserved_at_0[0x5];
+       u8         min_wqe_inline_mode[0x3];
+@@ -3731,8 +3743,8 @@ struct mlx5_ifc_nic_vport_context_bits {
+       u8         event_on_mc_address_change[0x1];
+       u8         event_on_uc_address_change[0x1];
+ 
+-      u8         reserved_at_40[0xc];
+-
++      u8         vhca_id_type[0x1];
++      u8         reserved_at_41[0xb];
+       u8         affiliation_criteria[0x4];
+       u8         affiliated_vhca_id[0x10];
+ 
+@@ -7189,7 +7201,12 @@ struct mlx5_ifc_init_hca_in_bits {
+       u8         reserved_at_20[0x10];
+       u8         op_mod[0x10];
+ 
+-      u8         reserved_at_40[0x40];
++      u8         reserved_at_40[0x20];
++
++      u8         reserved_at_60[0x2];
++      u8         sw_vhca_id[0xe];
++      u8         reserved_at_70[0x10];
++
+       u8         sw_owner_id[4][0x20];
+ };
+ 
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index cbdf0e2bc5ae0..d0fb74b0db1d5 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -4420,6 +4420,22 @@ static int set_exp_feature(struct sock *sk, struct 
hci_dev *hdev,
+                              MGMT_STATUS_NOT_SUPPORTED);
+ }
+ 
++static u32 get_params_flags(struct hci_dev *hdev,
++                          struct hci_conn_params *params)
++{
++      u32 flags = hdev->conn_flags;
++
++      /* Devices using RPAs can only be programmed in the acceptlist if
++       * LL Privacy has been enable otherwise they cannot mark
++       * HCI_CONN_FLAG_REMOTE_WAKEUP.
++       */
++      if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
++          hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
++              flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
++
++      return flags;
++}
++
+ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
+                           u16 data_len)
+ {
+@@ -4451,10 +4467,10 @@ static int get_device_flags(struct sock *sk, struct 
hci_dev *hdev, void *data,
+       } else {
+               params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
+                                               le_addr_type(cp->addr.type));
+-
+               if (!params)
+                       goto done;
+ 
++              supported_flags = get_params_flags(hdev, params);
+               current_flags = params->flags;
+       }
+ 
+@@ -4523,38 +4539,35 @@ static int set_device_flags(struct sock *sk, struct 
hci_dev *hdev, void *data,
+                       bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
+                                   &cp->addr.bdaddr, cp->addr.type);
+               }
+-      } else {
+-              params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
+-                                              le_addr_type(cp->addr.type));
+-              if (params) {
+-                      /* Devices using RPAs can only be programmed in the
+-                       * acceptlist LL Privacy has been enable otherwise they
+-                       * cannot mark HCI_CONN_FLAG_REMOTE_WAKEUP.
+-                       */
+-                      if ((current_flags & HCI_CONN_FLAG_REMOTE_WAKEUP) &&
+-                          !use_ll_privacy(hdev) &&
+-                          hci_find_irk_by_addr(hdev, &params->addr,
+-                                               params->addr_type)) {
+-                              bt_dev_warn(hdev,
+-                                          "Cannot set wakeable for RPA");
+-                              goto unlock;
+-                      }
+ 
+-                      params->flags = current_flags;
+-                      status = MGMT_STATUS_SUCCESS;
++              goto unlock;
++      }
+ 
+-                      /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
+-                       * has been set.
+-                       */
+-                      if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
+-                              hci_update_passive_scan(hdev);
+-              } else {
+-                      bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
+-                                  &cp->addr.bdaddr,
+-                                  le_addr_type(cp->addr.type));
+-              }
++      params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
++                                      le_addr_type(cp->addr.type));
++      if (!params) {
++              bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
++                          &cp->addr.bdaddr, le_addr_type(cp->addr.type));
++              goto unlock;
++      }
++
++      supported_flags = get_params_flags(hdev, params);
++
++      if ((supported_flags | current_flags) != supported_flags) {
++              bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
++                          current_flags, supported_flags);
++              goto unlock;
+       }
+ 
++      params->flags = current_flags;
++      status = MGMT_STATUS_SUCCESS;
++
++      /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
++       * has been set.
++       */
++      if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
++              hci_update_passive_scan(hdev);
++
+ unlock:
+       hci_dev_unlock(hdev);
+ 
+diff --git a/net/dsa/tag_hellcreek.c b/net/dsa/tag_hellcreek.c
+index eb204ad36eeec..846588c0070a5 100644
+--- a/net/dsa/tag_hellcreek.c
++++ b/net/dsa/tag_hellcreek.c
+@@ -45,7 +45,7 @@ static struct sk_buff *hellcreek_rcv(struct sk_buff *skb,
+ 
+       skb->dev = dsa_master_find_slave(dev, 0, port);
+       if (!skb->dev) {
+-              netdev_warn(dev, "Failed to get source port: %d\n", port);
++              netdev_warn_once(dev, "Failed to get source port: %d\n", port);
+               return NULL;
+       }
+ 

Reply via email to