commit:     e9a4a8d320e321ce780a3598be19da864ea4a595
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 22 11:38:14 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep 22 11:38:14 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e9a4a8d3

Linux patch 5.10.68

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1067_linux-5.10.68.patch | 3868 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3872 insertions(+)

diff --git a/0000_README b/0000_README
index 20bba3a..416061d 100644
--- a/0000_README
+++ b/0000_README
@@ -311,6 +311,10 @@ Patch:  1066_linux-5.10.67.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.10.67
 
+Patch:  1067_linux-5.10.68.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.10.68
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1067_linux-5.10.68.patch b/1067_linux-5.10.68.patch
new file mode 100644
index 0000000..7a0e47b
--- /dev/null
+++ b/1067_linux-5.10.68.patch
@@ -0,0 +1,3868 @@
+diff --git a/Documentation/devicetree/bindings/arm/tegra.yaml 
b/Documentation/devicetree/bindings/arm/tegra.yaml
+index 767e86354c8e9..2c6911c775c8e 100644
+--- a/Documentation/devicetree/bindings/arm/tegra.yaml
++++ b/Documentation/devicetree/bindings/arm/tegra.yaml
+@@ -54,7 +54,7 @@ properties:
+           - const: toradex,apalis_t30
+           - const: nvidia,tegra30
+       - items:
+-          - const: toradex,apalis_t30-eval-v1.1
++          - const: toradex,apalis_t30-v1.1-eval
+           - const: toradex,apalis_t30-eval
+           - const: toradex,apalis_t30-v1.1
+           - const: toradex,apalis_t30
+diff --git a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt 
b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
+index 44919d48d2415..c459f169a9044 100644
+--- a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
++++ b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
+@@ -122,7 +122,7 @@ on various other factors also like;
+       so the device should have enough free bytes available its OOB/Spare
+       area to accommodate ECC for entire page. In general following expression
+       helps in determining if given device can accommodate ECC syndrome:
+-      "2 + (PAGESIZE / 512) * ECC_BYTES" >= OOBSIZE"
++      "2 + (PAGESIZE / 512) * ECC_BYTES" <= OOBSIZE"
+       where
+               OOBSIZE         number of bytes in OOB/spare area
+               PAGESIZE        number of bytes in main-area of device page
+diff --git a/Makefile b/Makefile
+index a47273ecfdf21..e50581c9db50e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 10
+-SUBLEVEL = 67
++SUBLEVEL = 68
+ EXTRAVERSION =
+ NAME = Dare mighty things
+ 
+diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
+index a2fbea3ee07c7..102418ac5ff4a 100644
+--- a/arch/arc/mm/cache.c
++++ b/arch/arc/mm/cache.c
+@@ -1123,7 +1123,7 @@ void clear_user_page(void *to, unsigned long u_vaddr, 
struct page *page)
+       clear_page(to);
+       clear_bit(PG_dc_clean, &page->flags);
+ }
+-
++EXPORT_SYMBOL(clear_user_page);
+ 
+ /**********************************************************************
+  * Explicit Cache flush request from user space via syscall
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index 062b21f30f942..a9bbfb800ec2b 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -510,7 +510,7 @@ size_t sve_state_size(struct task_struct const *task)
+ void sve_alloc(struct task_struct *task)
+ {
+       if (task->thread.sve_state) {
+-              memset(task->thread.sve_state, 0, sve_state_size(current));
++              memset(task->thread.sve_state, 0, sve_state_size(task));
+               return;
+       }
+ 
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 5e5dd99e8cee8..5bc978be80434 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -1143,6 +1143,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
+               if (copy_from_user(&reg, argp, sizeof(reg)))
+                       break;
+ 
++              /*
++               * We could owe a reset due to PSCI. Handle the pending reset
++               * here to ensure userspace register accesses are ordered after
++               * the reset.
++               */
++              if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
++                      kvm_reset_vcpu(vcpu);
++
+               if (ioctl == KVM_SET_ONE_REG)
+                       r = kvm_arm_set_reg(vcpu, &reg);
+               else
+diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
+index b969c2157ad2e..204c62debf06e 100644
+--- a/arch/arm64/kvm/reset.c
++++ b/arch/arm64/kvm/reset.c
+@@ -263,10 +263,16 @@ static bool vcpu_allowed_register_width(struct kvm_vcpu 
*vcpu)
+  */
+ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+ {
++      struct vcpu_reset_state reset_state;
+       int ret;
+       bool loaded;
+       u32 pstate;
+ 
++      mutex_lock(&vcpu->kvm->lock);
++      reset_state = vcpu->arch.reset_state;
++      WRITE_ONCE(vcpu->arch.reset_state.reset, false);
++      mutex_unlock(&vcpu->kvm->lock);
++
+       /* Reset PMU outside of the non-preemptible section */
+       kvm_pmu_vcpu_reset(vcpu);
+ 
+@@ -325,8 +331,8 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+        * Additional reset state handling that PSCI may have imposed on us.
+        * Must be done after all the sys_reg reset.
+        */
+-      if (vcpu->arch.reset_state.reset) {
+-              unsigned long target_pc = vcpu->arch.reset_state.pc;
++      if (reset_state.reset) {
++              unsigned long target_pc = reset_state.pc;
+ 
+               /* Gracefully handle Thumb2 entry point */
+               if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
+@@ -335,13 +341,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+               }
+ 
+               /* Propagate caller endianness */
+-              if (vcpu->arch.reset_state.be)
++              if (reset_state.be)
+                       kvm_vcpu_set_be(vcpu);
+ 
+               *vcpu_pc(vcpu) = target_pc;
+-              vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
+-
+-              vcpu->arch.reset_state.reset = false;
++              vcpu_set_reg(vcpu, 0, reset_state.r0);
+       }
+ 
+       /* Reset timer */
+@@ -366,6 +370,14 @@ int kvm_set_ipa_limit(void)
+       mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+       parange = cpuid_feature_extract_unsigned_field(mmfr0,
+                               ID_AA64MMFR0_PARANGE_SHIFT);
++      /*
++       * IPA size beyond 48 bits could not be supported
++       * on either 4K or 16K page size. Hence let's cap
++       * it to 48 bits, in case it's reported as larger
++       * on the system.
++       */
++      if (PAGE_SIZE != SZ_64K)
++              parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48);
+ 
+       /*
+        * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S 
b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index cd9995ee84419..5777b72bb8b62 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -3146,7 +3146,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
+       /* The following code handles the fake_suspend = 1 case */
+       mflr    r0
+       std     r0, PPC_LR_STKOFF(r1)
+-      stdu    r1, -PPC_MIN_STKFRM(r1)
++      stdu    r1, -TM_FRAME_SIZE(r1)
+ 
+       /* Turn on TM. */
+       mfmsr   r8
+@@ -3161,10 +3161,42 @@ BEGIN_FTR_SECTION
+ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
+       nop
+ 
++      /*
++       * It's possible that treclaim. may modify registers, if we have lost
++       * track of fake-suspend state in the guest due to it using rfscv.
++       * Save and restore registers in case this occurs.
++       */
++      mfspr   r3, SPRN_DSCR
++      mfspr   r4, SPRN_XER
++      mfspr   r5, SPRN_AMR
++      /* SPRN_TAR would need to be saved here if the kernel ever used it */
++      mfcr    r12
++      SAVE_NVGPRS(r1)
++      SAVE_GPR(2, r1)
++      SAVE_GPR(3, r1)
++      SAVE_GPR(4, r1)
++      SAVE_GPR(5, r1)
++      stw     r12, 8(r1)
++      std     r1, HSTATE_HOST_R1(r13)
++
+       /* We have to treclaim here because that's the only way to do S->N */
+       li      r3, TM_CAUSE_KVM_RESCHED
+       TRECLAIM(R3)
+ 
++      GET_PACA(r13)
++      ld      r1, HSTATE_HOST_R1(r13)
++      REST_GPR(2, r1)
++      REST_GPR(3, r1)
++      REST_GPR(4, r1)
++      REST_GPR(5, r1)
++      lwz     r12, 8(r1)
++      REST_NVGPRS(r1)
++      mtspr   SPRN_DSCR, r3
++      mtspr   SPRN_XER, r4
++      mtspr   SPRN_AMR, r5
++      mtcr    r12
++      HMT_MEDIUM
++
+       /*
+        * We were in fake suspend, so we are not going to save the
+        * register state as the guest checkpointed state (since
+@@ -3192,7 +3224,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
+       std     r5, VCPU_TFHAR(r9)
+       std     r6, VCPU_TFIAR(r9)
+ 
+-      addi    r1, r1, PPC_MIN_STKFRM
++      addi    r1, r1, TM_FRAME_SIZE
+       ld      r0, PPC_LR_STKOFF(r1)
+       mtlr    r0
+       blr
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index dee01d3b23a40..8d9047d2d1e11 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -248,8 +248,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 
b1)
+ 
+ #define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask)           \
+ ({                                                            \
+-      /* Branch instruction needs 6 bytes */                  \
+-      int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\
++      int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2;      \
+       _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
+       REG_SET_SEEN(b1);                                       \
+       REG_SET_SEEN(b2);                                       \
+@@ -761,10 +760,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, 
struct bpf_prog *fp,
+               EMIT4(0xb9080000, dst_reg, src_reg);
+               break;
+       case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
+-              if (!imm)
+-                      break;
+-              /* alfi %dst,imm */
+-              EMIT6_IMM(0xc20b0000, dst_reg, imm);
++              if (imm != 0) {
++                      /* alfi %dst,imm */
++                      EMIT6_IMM(0xc20b0000, dst_reg, imm);
++              }
+               EMIT_ZERO(dst_reg);
+               break;
+       case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
+@@ -786,17 +785,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, 
struct bpf_prog *fp,
+               EMIT4(0xb9090000, dst_reg, src_reg);
+               break;
+       case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
+-              if (!imm)
+-                      break;
+-              /* alfi %dst,-imm */
+-              EMIT6_IMM(0xc20b0000, dst_reg, -imm);
++              if (imm != 0) {
++                      /* alfi %dst,-imm */
++                      EMIT6_IMM(0xc20b0000, dst_reg, -imm);
++              }
+               EMIT_ZERO(dst_reg);
+               break;
+       case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
+               if (!imm)
+                       break;
+-              /* agfi %dst,-imm */
+-              EMIT6_IMM(0xc2080000, dst_reg, -imm);
++              if (imm == -0x80000000) {
++                      /* algfi %dst,0x80000000 */
++                      EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
++              } else {
++                      /* agfi %dst,-imm */
++                      EMIT6_IMM(0xc2080000, dst_reg, -imm);
++              }
+               break;
+       /*
+        * BPF_MUL
+@@ -811,10 +815,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, 
struct bpf_prog *fp,
+               EMIT4(0xb90c0000, dst_reg, src_reg);
+               break;
+       case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
+-              if (imm == 1)
+-                      break;
+-              /* msfi %r5,imm */
+-              EMIT6_IMM(0xc2010000, dst_reg, imm);
++              if (imm != 1) {
++                      /* msfi %r5,imm */
++                      EMIT6_IMM(0xc2010000, dst_reg, imm);
++              }
+               EMIT_ZERO(dst_reg);
+               break;
+       case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
+@@ -867,6 +871,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, 
struct bpf_prog *fp,
+                       if (BPF_OP(insn->code) == BPF_MOD)
+                               /* lhgi %dst,0 */
+                               EMIT4_IMM(0xa7090000, dst_reg, 0);
++                      else
++                              EMIT_ZERO(dst_reg);
+                       break;
+               }
+               /* lhi %w0,0 */
+@@ -999,10 +1005,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, 
struct bpf_prog *fp,
+               EMIT4(0xb9820000, dst_reg, src_reg);
+               break;
+       case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
+-              if (!imm)
+-                      break;
+-              /* xilf %dst,imm */
+-              EMIT6_IMM(0xc0070000, dst_reg, imm);
++              if (imm != 0) {
++                      /* xilf %dst,imm */
++                      EMIT6_IMM(0xc0070000, dst_reg, imm);
++              }
+               EMIT_ZERO(dst_reg);
+               break;
+       case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
+@@ -1033,10 +1039,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, 
struct bpf_prog *fp,
+               EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
+               break;
+       case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
+-              if (imm == 0)
+-                      break;
+-              /* sll %dst,imm(%r0) */
+-              EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
++              if (imm != 0) {
++                      /* sll %dst,imm(%r0) */
++                      EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
++              }
+               EMIT_ZERO(dst_reg);
+               break;
+       case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
+@@ -1058,10 +1064,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, 
struct bpf_prog *fp,
+               EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
+               break;
+       case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
+-              if (imm == 0)
+-                      break;
+-              /* srl %dst,imm(%r0) */
+-              EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
++              if (imm != 0) {
++                      /* srl %dst,imm(%r0) */
++                      EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
++              }
+               EMIT_ZERO(dst_reg);
+               break;
+       case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
+@@ -1083,10 +1089,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, 
struct bpf_prog *fp,
+               EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
+               break;
+       case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
+-              if (imm == 0)
+-                      break;
+-              /* sra %dst,imm(%r0) */
+-              EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
++              if (imm != 0) {
++                      /* sra %dst,imm(%r0) */
++                      EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
++              }
+               EMIT_ZERO(dst_reg);
+               break;
+       case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index c9fa7be3df82d..5c95d242f38d7 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -301,8 +301,8 @@ do {                                                       
                \
+       unsigned int __gu_low, __gu_high;                               \
+       const unsigned int __user *__gu_ptr;                            \
+       __gu_ptr = (const void __user *)(ptr);                          \
+-      __get_user_asm(__gu_low, ptr, "l", "=r", label);                \
+-      __get_user_asm(__gu_high, ptr+1, "l", "=r", label);             \
++      __get_user_asm(__gu_low, __gu_ptr, "l", "=r", label);           \
++      __get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label);        \
+       (x) = ((unsigned long long)__gu_high << 32) | __gu_low;         \
+ } while (0)
+ #else
+diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
+index 056d0367864e9..14b34963eb1f7 100644
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -1241,6 +1241,9 @@ static void __mc_scan_banks(struct mce *m, struct 
pt_regs *regs, struct mce *fin
+ 
+ static void kill_me_now(struct callback_head *ch)
+ {
++      struct task_struct *p = container_of(ch, struct task_struct, 
mce_kill_me);
++
++      p->mce_count = 0;
+       force_sig(SIGBUS);
+ }
+ 
+@@ -1249,6 +1252,7 @@ static void kill_me_maybe(struct callback_head *cb)
+       struct task_struct *p = container_of(cb, struct task_struct, 
mce_kill_me);
+       int flags = MF_ACTION_REQUIRED;
+ 
++      p->mce_count = 0;
+       pr_err("Uncorrected hardware memory error in user-access at %llx", 
p->mce_addr);
+ 
+       if (!p->mce_ripv)
+@@ -1269,17 +1273,34 @@ static void kill_me_maybe(struct callback_head *cb)
+       }
+ }
+ 
+-static void queue_task_work(struct mce *m, int kill_it)
++static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
+ {
+-      current->mce_addr = m->addr;
+-      current->mce_kflags = m->kflags;
+-      current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
+-      current->mce_whole_page = whole_page(m);
++      int count = ++current->mce_count;
+ 
+-      if (kill_it)
+-              current->mce_kill_me.func = kill_me_now;
+-      else
+-              current->mce_kill_me.func = kill_me_maybe;
++      /* First call, save all the details */
++      if (count == 1) {
++              current->mce_addr = m->addr;
++              current->mce_kflags = m->kflags;
++              current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
++              current->mce_whole_page = whole_page(m);
++
++              if (kill_current_task)
++                      current->mce_kill_me.func = kill_me_now;
++              else
++                      current->mce_kill_me.func = kill_me_maybe;
++      }
++
++      /* Ten is likely overkill. Don't expect more than two faults before 
task_work() */
++      if (count > 10)
++              mce_panic("Too many consecutive machine checks while accessing 
user data", m, msg);
++
++      /* Second or later call, make sure page address matches the one from 
first call */
++      if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> 
PAGE_SHIFT))
++              mce_panic("Consecutive machine checks to different user pages", 
m, msg);
++
++      /* Do not call task_work_add() more than once */
++      if (count > 1)
++              return;
+ 
+       task_work_add(current, &current->mce_kill_me, TWA_RESUME);
+ }
+@@ -1427,7 +1448,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
+               /* If this triggers there is no way to recover. Die hard. */
+               BUG_ON(!on_thread_stack() || !user_mode(regs));
+ 
+-              queue_task_work(&m, kill_it);
++              queue_task_work(&m, msg, kill_it);
+ 
+       } else {
+               /*
+@@ -1445,7 +1466,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
+               }
+ 
+               if (m.kflags & MCE_IN_KERNEL_COPYIN)
+-                      queue_task_work(&m, kill_it);
++                      queue_task_work(&m, msg, kill_it);
+       }
+ out:
+       mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index b5a3fa4033d38..067ca92e69ef9 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -1389,18 +1389,18 @@ int kern_addr_valid(unsigned long addr)
+               return 0;
+ 
+       p4d = p4d_offset(pgd, addr);
+-      if (p4d_none(*p4d))
++      if (!p4d_present(*p4d))
+               return 0;
+ 
+       pud = pud_offset(p4d, addr);
+-      if (pud_none(*pud))
++      if (!pud_present(*pud))
+               return 0;
+ 
+       if (pud_large(*pud))
+               return pfn_valid(pud_pfn(*pud));
+ 
+       pmd = pmd_offset(pud, addr);
+-      if (pmd_none(*pmd))
++      if (!pmd_present(*pmd))
+               return 0;
+ 
+       if (pmd_large(*pmd))
+diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
+index ca311aaa67b88..232932bda4e5e 100644
+--- a/arch/x86/mm/pat/memtype.c
++++ b/arch/x86/mm/pat/memtype.c
+@@ -583,7 +583,12 @@ int memtype_reserve(u64 start, u64 end, enum 
page_cache_mode req_type,
+       int err = 0;
+ 
+       start = sanitize_phys(start);
+-      end = sanitize_phys(end);
++
++      /*
++       * The end address passed into this function is exclusive, but
++       * sanitize_phys() expects an inclusive address.
++       */
++      end = sanitize_phys(end - 1) + 1;
+       if (start >= end) {
+               WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
+                               start, end - 1, cattr_name(req_type));
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index d3cdf467d91fa..c758fd913cedd 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -1204,6 +1204,11 @@ static void __init xen_dom0_set_legacy_features(void)
+       x86_platform.legacy.rtc = 1;
+ }
+ 
++static void __init xen_domu_set_legacy_features(void)
++{
++      x86_platform.legacy.rtc = 0;
++}
++
+ /* First C function to be called on Xen boot */
+ asmlinkage __visible void __init xen_start_kernel(void)
+ {
+@@ -1356,6 +1361,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
+               add_preferred_console("xenboot", 0, NULL);
+               if (pci_xen)
+                       x86_init.pci.arch_init = pci_xen_init;
++              x86_platform.set_legacy_features =
++                              xen_domu_set_legacy_features;
+       } else {
+               const struct dom0_vga_console_info *info =
+                       (void *)((char *)xen_start_info +
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index b8c2ddc01aec3..65c200e0ecb59 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -2526,6 +2526,15 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct 
bfq_queue *new_bfqq)
+        * are likely to increase the throughput.
+        */
+       bfqq->new_bfqq = new_bfqq;
++      /*
++       * The above assignment schedules the following redirections:
++       * each time some I/O for bfqq arrives, the process that
++       * generated that I/O is disassociated from bfqq and
++       * associated with new_bfqq. Here we increases new_bfqq->ref
++       * in advance, adding the number of processes that are
++       * expected to be associated with new_bfqq as they happen to
++       * issue I/O.
++       */
+       new_bfqq->ref += process_refs;
+       return new_bfqq;
+ }
+@@ -2585,6 +2594,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct 
bfq_queue *bfqq,
+ {
+       struct bfq_queue *in_service_bfqq, *new_bfqq;
+ 
++      /* if a merge has already been setup, then proceed with that first */
++      if (bfqq->new_bfqq)
++              return bfqq->new_bfqq;
++
+       /*
+        * Do not perform queue merging if the device is non
+        * rotational and performs internal queueing. In fact, such a
+@@ -2639,9 +2652,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct 
bfq_queue *bfqq,
+       if (bfq_too_late_for_merging(bfqq))
+               return NULL;
+ 
+-      if (bfqq->new_bfqq)
+-              return bfqq->new_bfqq;
+-
+       if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
+               return NULL;
+ 
+diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
+index a97f33d0c59f9..94665037f4a35 100644
+--- a/drivers/base/power/trace.c
++++ b/drivers/base/power/trace.c
+@@ -13,6 +13,7 @@
+ #include <linux/export.h>
+ #include <linux/rtc.h>
+ #include <linux/suspend.h>
++#include <linux/init.h>
+ 
+ #include <linux/mc146818rtc.h>
+ 
+@@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int 
user)
+       const char *file = *(const char **)(tracedata + 2);
+       unsigned int user_hash_value, file_hash_value;
+ 
++      if (!x86_platform.legacy.rtc)
++              return;
++
+       user_hash_value = user % USERHASH;
+       file_hash_value = hash_string(lineno, file, FILEHASH);
+       set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
+@@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = {
+ 
+ static int __init early_resume_init(void)
+ {
++      if (!x86_platform.legacy.rtc)
++              return 0;
++
+       hash_value_early_read = read_magic_time();
+       register_pm_notifier(&pm_trace_nb);
+       return 0;
+@@ -277,6 +284,9 @@ static int __init late_resume_init(void)
+       unsigned int val = hash_value_early_read;
+       unsigned int user, file, dev;
+ 
++      if (!x86_platform.legacy.rtc)
++              return 0;
++
+       user = val % USERHASH;
+       val = val / USERHASH;
+       file = val % FILEHASH;
+diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
+index 3c2fa44d9279b..d60d5520707dc 100644
+--- a/drivers/gpio/gpio-mpc8xxx.c
++++ b/drivers/gpio/gpio-mpc8xxx.c
+@@ -374,7 +374,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
+           of_device_is_compatible(np, "fsl,ls1088a-gpio"))
+               gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
+ 
+-      ret = gpiochip_add_data(gc, mpc8xxx_gc);
++      ret = devm_gpiochip_add_data(&pdev->dev, gc, mpc8xxx_gc);
+       if (ret) {
+               pr_err("%pOF: GPIO chip registration failed with status %d\n",
+                      np, ret);
+@@ -406,6 +406,8 @@ static int mpc8xxx_probe(struct platform_device *pdev)
+ 
+       return 0;
+ err:
++      if (mpc8xxx_gc->irq)
++              irq_domain_remove(mpc8xxx_gc->irq);
+       iounmap(mpc8xxx_gc->regs);
+       return ret;
+ }
+@@ -419,7 +421,6 @@ static int mpc8xxx_remove(struct platform_device *pdev)
+               irq_domain_remove(mpc8xxx_gc->irq);
+       }
+ 
+-      gpiochip_remove(&mpc8xxx_gc->gc);
+       iounmap(mpc8xxx_gc->regs);
+ 
+       return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 76c31aa7b84df..d949d6c52f24b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -717,7 +717,7 @@ enum amd_hw_ip_block_type {
+       MAX_HWIP
+ };
+ 
+-#define HWIP_MAX_INSTANCE     8
++#define HWIP_MAX_INSTANCE     10
+ 
+ struct amd_powerplay {
+       void *pp_handle;
+diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c 
b/drivers/gpu/drm/bridge/lontium-lt9611.c
+index c1926154eda84..29b1ce2140abc 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
++++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
+@@ -867,8 +867,14 @@ static enum drm_mode_status 
lt9611_bridge_mode_valid(struct drm_bridge *bridge,
+                                                    const struct 
drm_display_mode *mode)
+ {
+       struct lt9611_mode *lt9611_mode = lt9611_find_mode(mode);
++      struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+ 
+-      return lt9611_mode ? MODE_OK : MODE_BAD;
++      if (!lt9611_mode)
++              return MODE_BAD;
++      else if (lt9611_mode->intfs > 1 && !lt9611->dsi1)
++              return MODE_PANEL;
++      else
++              return MODE_OK;
+ }
+ 
+ static void lt9611_bridge_pre_enable(struct drm_bridge *bridge)
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c 
b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+index 76d38561c9103..cf741c5c82d25 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+@@ -397,8 +397,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 
exec_state,
+               if (switch_mmu_context) {
+                       struct etnaviv_iommu_context *old_context = 
gpu->mmu_context;
+ 
+-                      etnaviv_iommu_context_get(mmu_context);
+-                      gpu->mmu_context = mmu_context;
++                      gpu->mmu_context = 
etnaviv_iommu_context_get(mmu_context);
+                       etnaviv_iommu_context_put(old_context);
+               }
+ 
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c 
b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+index 2b7e85318a76a..424474041c943 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+@@ -305,8 +305,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
+               list_del(&mapping->obj_node);
+       }
+ 
+-      etnaviv_iommu_context_get(mmu_context);
+-      mapping->context = mmu_context;
++      mapping->context = etnaviv_iommu_context_get(mmu_context);
+       mapping->use = 1;
+ 
+       ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 
b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+index d05c359945799..5f24cc52c2878 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+@@ -532,8 +532,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void 
*data,
+               goto err_submit_objects;
+ 
+       submit->ctx = file->driver_priv;
+-      etnaviv_iommu_context_get(submit->ctx->mmu);
+-      submit->mmu_context = submit->ctx->mmu;
++      submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
+       submit->exec_state = args->exec_state;
+       submit->flags = args->flags;
+ 
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c 
b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+index c6404b8d067f1..2520b7dad6ce7 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+@@ -561,6 +561,12 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
+       /* We rely on the GPU running, so program the clock */
+       etnaviv_gpu_update_clock(gpu);
+ 
++      gpu->fe_running = false;
++      gpu->exec_state = -1;
++      if (gpu->mmu_context)
++              etnaviv_iommu_context_put(gpu->mmu_context);
++      gpu->mmu_context = NULL;
++
+       return 0;
+ }
+ 
+@@ -623,19 +629,23 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 
address, u16 prefetch)
+                         VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
+                         VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
+       }
++
++      gpu->fe_running = true;
+ }
+ 
+-static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
++static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
++                                        struct etnaviv_iommu_context *context)
+ {
+-      u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
+-                              &gpu->mmu_context->cmdbuf_mapping);
+       u16 prefetch;
++      u32 address;
+ 
+       /* setup the MMU */
+-      etnaviv_iommu_restore(gpu, gpu->mmu_context);
++      etnaviv_iommu_restore(gpu, context);
+ 
+       /* Start command processor */
+       prefetch = etnaviv_buffer_init(gpu);
++      address = etnaviv_cmdbuf_get_va(&gpu->buffer,
++                                      &gpu->mmu_context->cmdbuf_mapping);
+ 
+       etnaviv_gpu_start_fe(gpu, address, prefetch);
+ }
+@@ -814,7 +824,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
+       /* Now program the hardware */
+       mutex_lock(&gpu->lock);
+       etnaviv_gpu_hw_init(gpu);
+-      gpu->exec_state = -1;
+       mutex_unlock(&gpu->lock);
+ 
+       pm_runtime_mark_last_busy(gpu->dev);
+@@ -1039,8 +1048,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
+       spin_unlock(&gpu->event_spinlock);
+ 
+       etnaviv_gpu_hw_init(gpu);
+-      gpu->exec_state = -1;
+-      gpu->mmu_context = NULL;
+ 
+       mutex_unlock(&gpu->lock);
+       pm_runtime_mark_last_busy(gpu->dev);
+@@ -1352,14 +1359,12 @@ struct dma_fence *etnaviv_gpu_submit(struct 
etnaviv_gem_submit *submit)
+               goto out_unlock;
+       }
+ 
+-      if (!gpu->mmu_context) {
+-              etnaviv_iommu_context_get(submit->mmu_context);
+-              gpu->mmu_context = submit->mmu_context;
+-              etnaviv_gpu_start_fe_idleloop(gpu);
+-      } else {
+-              etnaviv_iommu_context_get(gpu->mmu_context);
+-              submit->prev_mmu_context = gpu->mmu_context;
+-      }
++      if (!gpu->fe_running)
++              etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
++
++      if (submit->prev_mmu_context)
++              etnaviv_iommu_context_put(submit->prev_mmu_context);
++      submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
+ 
+       if (submit->nr_pmrs) {
+               gpu->event[event[1]].sync_point = 
&sync_point_perfmon_sample_pre;
+@@ -1561,7 +1566,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, 
unsigned int timeout_ms)
+ 
+ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
+ {
+-      if (gpu->initialized && gpu->mmu_context) {
++      if (gpu->initialized && gpu->fe_running) {
+               /* Replace the last WAIT with END */
+               mutex_lock(&gpu->lock);
+               etnaviv_buffer_end(gpu);
+@@ -1574,8 +1579,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu 
*gpu)
+                */
+               etnaviv_gpu_wait_idle(gpu, 100);
+ 
+-              etnaviv_iommu_context_put(gpu->mmu_context);
+-              gpu->mmu_context = NULL;
++              gpu->fe_running = false;
+       }
+ 
+       gpu->exec_state = -1;
+@@ -1723,6 +1727,9 @@ static void etnaviv_gpu_unbind(struct device *dev, 
struct device *master,
+       etnaviv_gpu_hw_suspend(gpu);
+ #endif
+ 
++      if (gpu->mmu_context)
++              etnaviv_iommu_context_put(gpu->mmu_context);
++
+       if (gpu->initialized) {
+               etnaviv_cmdbuf_free(&gpu->buffer);
+               etnaviv_iommu_global_fini(gpu);
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h 
b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+index 8ea48697d1321..1c75c8ed5bcea 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+@@ -101,6 +101,7 @@ struct etnaviv_gpu {
+       struct workqueue_struct *wq;
+       struct drm_gpu_scheduler sched;
+       bool initialized;
++      bool fe_running;
+ 
+       /* 'ring'-buffer: */
+       struct etnaviv_cmdbuf buffer;
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c 
b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+index 1a7c89a67bea3..afe5dd6a9925b 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+@@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
+       struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
+       u32 pgtable;
+ 
++      if (gpu->mmu_context)
++              etnaviv_iommu_context_put(gpu->mmu_context);
++      gpu->mmu_context = etnaviv_iommu_context_get(context);
++
+       /* set base addresses */
+       gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, 
context->global->memory_base);
+       gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, 
context->global->memory_base);
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c 
b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+index f8bf488e9d717..d664ae29ae209 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+@@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct 
etnaviv_gpu *gpu,
+       if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
+               return;
+ 
++      if (gpu->mmu_context)
++              etnaviv_iommu_context_put(gpu->mmu_context);
++      gpu->mmu_context = etnaviv_iommu_context_get(context);
++
+       prefetch = etnaviv_buffer_config_mmuv2(gpu,
+                               (u32)v2_context->mtlb_dma,
+                               (u32)context->global->bad_page_dma);
+@@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct 
etnaviv_gpu *gpu,
+       if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & 
VIVS_MMUv2_SEC_CONTROL_ENABLE)
+               return;
+ 
++      if (gpu->mmu_context)
++              etnaviv_iommu_context_put(gpu->mmu_context);
++      gpu->mmu_context = etnaviv_iommu_context_get(context);
++
+       gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
+                 lower_32_bits(context->global->v2.pta_dma));
+       gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c 
b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+index 15d9fa3879e5d..984569a59a90a 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+@@ -197,6 +197,7 @@ static int etnaviv_iommu_find_iova(struct 
etnaviv_iommu_context *context,
+                */
+               list_for_each_entry_safe(m, n, &list, scan_node) {
+                       etnaviv_iommu_remove_mapping(context, m);
++                      etnaviv_iommu_context_put(m->context);
+                       m->context = NULL;
+                       list_del_init(&m->mmu_node);
+                       list_del_init(&m->scan_node);
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h 
b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+index d1d6902fd13be..e4a0b7d09c2ea 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
++++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+@@ -105,9 +105,11 @@ void etnaviv_iommu_dump(struct etnaviv_iommu_context 
*ctx, void *buf);
+ struct etnaviv_iommu_context *
+ etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
+                          struct etnaviv_cmdbuf_suballoc *suballoc);
+-static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context 
*ctx)
++static inline struct etnaviv_iommu_context *
++etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
+ {
+       kref_get(&ctx->refcount);
++      return ctx;
+ }
+ void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
+ void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
+diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c 
b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+index 6802d9b65f828..dec54c70e0082 100644
+--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+@@ -1122,7 +1122,7 @@ static int cdn_dp_suspend(struct device *dev)
+       return ret;
+ }
+ 
+-static int cdn_dp_resume(struct device *dev)
++static __maybe_unused int cdn_dp_resume(struct device *dev)
+ {
+       struct cdn_dp_device *dp = dev_get_drvdata(dev);
+ 
+diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
+index a3bac9da8cbbc..4cea63a4cab73 100644
+--- a/drivers/mfd/ab8500-core.c
++++ b/drivers/mfd/ab8500-core.c
+@@ -493,7 +493,7 @@ static int ab8500_handle_hierarchical_line(struct ab8500 
*ab8500,
+               if (line == AB8540_INT_GPIO43F || line == AB8540_INT_GPIO44F)
+                       line += 1;
+ 
+-              handle_nested_irq(irq_create_mapping(ab8500->domain, line));
++              handle_nested_irq(irq_find_mapping(ab8500->domain, line));
+       }
+ 
+       return 0;
+diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
+index aa59496e43768..9db1000944c34 100644
+--- a/drivers/mfd/axp20x.c
++++ b/drivers/mfd/axp20x.c
+@@ -125,12 +125,13 @@ static const struct regmap_range 
axp288_writeable_ranges[] = {
+ 
+ static const struct regmap_range axp288_volatile_ranges[] = {
+       regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP288_POWER_REASON),
++      regmap_reg_range(AXP22X_PWR_OUT_CTRL1, AXP22X_ALDO3_V_OUT),
+       regmap_reg_range(AXP288_BC_GLOBAL, AXP288_BC_GLOBAL),
+       regmap_reg_range(AXP288_BC_DET_STAT, AXP20X_VBUS_IPSOUT_MGMT),
+       regmap_reg_range(AXP20X_CHRG_BAK_CTRL, AXP20X_CHRG_BAK_CTRL),
+       regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IPSOUT_V_HIGH_L),
+       regmap_reg_range(AXP20X_TIMER_CTRL, AXP20X_TIMER_CTRL),
+-      regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE),
++      regmap_reg_range(AXP20X_GPIO1_CTRL, AXP22X_GPIO_STATE),
+       regmap_reg_range(AXP288_RT_BATT_V_H, AXP288_RT_BATT_V_L),
+       regmap_reg_range(AXP20X_FG_RES, AXP288_FG_CC_CAP_REG),
+ };
+diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
+index a5983d515db03..8d5f8f07d8a66 100644
+--- a/drivers/mfd/db8500-prcmu.c
++++ b/drivers/mfd/db8500-prcmu.c
+@@ -1622,22 +1622,20 @@ static long round_clock_rate(u8 clock, unsigned long 
rate)
+ }
+ 
+ static const unsigned long db8500_armss_freqs[] = {
+-      200000000,
+-      400000000,
+-      800000000,
++      199680000,
++      399360000,
++      798720000,
+       998400000
+ };
+ 
+ /* The DB8520 has slightly higher ARMSS max frequency */
+ static const unsigned long db8520_armss_freqs[] = {
+-      200000000,
+-      400000000,
+-      800000000,
++      199680000,
++      399360000,
++      798720000,
+       1152000000
+ };
+ 
+-
+-
+ static long round_armss_rate(unsigned long rate)
+ {
+       unsigned long freq = 0;
+diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
+index f27eb8dabc1c8..9ab9adce06fdd 100644
+--- a/drivers/mfd/lpc_sch.c
++++ b/drivers/mfd/lpc_sch.c
+@@ -22,13 +22,10 @@
+ #define SMBASE                0x40
+ #define SMBUS_IO_SIZE 64
+ 
+-#define GPIOBASE      0x44
++#define GPIO_BASE     0x44
+ #define GPIO_IO_SIZE  64
+ #define GPIO_IO_SIZE_CENTERTON        128
+ 
+-/* Intel Quark X1000 GPIO IRQ Number */
+-#define GPIO_IRQ_QUARK_X1000  9
+-
+ #define WDTBASE               0x84
+ #define WDT_IO_SIZE   64
+ 
+@@ -43,30 +40,25 @@ struct lpc_sch_info {
+       unsigned int io_size_smbus;
+       unsigned int io_size_gpio;
+       unsigned int io_size_wdt;
+-      int irq_gpio;
+ };
+ 
+ static struct lpc_sch_info sch_chipset_info[] = {
+       [LPC_SCH] = {
+               .io_size_smbus = SMBUS_IO_SIZE,
+               .io_size_gpio = GPIO_IO_SIZE,
+-              .irq_gpio = -1,
+       },
+       [LPC_ITC] = {
+               .io_size_smbus = SMBUS_IO_SIZE,
+               .io_size_gpio = GPIO_IO_SIZE,
+               .io_size_wdt = WDT_IO_SIZE,
+-              .irq_gpio = -1,
+       },
+       [LPC_CENTERTON] = {
+               .io_size_smbus = SMBUS_IO_SIZE,
+               .io_size_gpio = GPIO_IO_SIZE_CENTERTON,
+               .io_size_wdt = WDT_IO_SIZE,
+-              .irq_gpio = -1,
+       },
+       [LPC_QUARK_X1000] = {
+               .io_size_gpio = GPIO_IO_SIZE,
+-              .irq_gpio = GPIO_IRQ_QUARK_X1000,
+               .io_size_wdt = WDT_IO_SIZE,
+       },
+ };
+@@ -113,13 +105,13 @@ static int lpc_sch_get_io(struct pci_dev *pdev, int 
where, const char *name,
+ }
+ 
+ static int lpc_sch_populate_cell(struct pci_dev *pdev, int where,
+-                               const char *name, int size, int irq,
+-                               int id, struct mfd_cell *cell)
++                               const char *name, int size, int id,
++                               struct mfd_cell *cell)
+ {
+       struct resource *res;
+       int ret;
+ 
+-      res = devm_kcalloc(&pdev->dev, 2, sizeof(*res), GFP_KERNEL);
++      res = devm_kzalloc(&pdev->dev, sizeof(*res), GFP_KERNEL);
+       if (!res)
+               return -ENOMEM;
+ 
+@@ -135,18 +127,6 @@ static int lpc_sch_populate_cell(struct pci_dev *pdev, 
int where,
+       cell->ignore_resource_conflicts = true;
+       cell->id = id;
+ 
+-      /* Check if we need to add an IRQ resource */
+-      if (irq < 0)
+-              return 0;
+-
+-      res++;
+-
+-      res->start = irq;
+-      res->end = irq;
+-      res->flags = IORESOURCE_IRQ;
+-
+-      cell->num_resources++;
+-
+       return 0;
+ }
+ 
+@@ -158,15 +138,15 @@ static int lpc_sch_probe(struct pci_dev *dev, const 
struct pci_device_id *id)
+       int ret;
+ 
+       ret = lpc_sch_populate_cell(dev, SMBASE, "isch_smbus",
+-                                  info->io_size_smbus, -1,
++                                  info->io_size_smbus,
+                                   id->device, &lpc_sch_cells[cells]);
+       if (ret < 0)
+               return ret;
+       if (ret == 0)
+               cells++;
+ 
+-      ret = lpc_sch_populate_cell(dev, GPIOBASE, "sch_gpio",
+-                                  info->io_size_gpio, info->irq_gpio,
++      ret = lpc_sch_populate_cell(dev, GPIO_BASE, "sch_gpio",
++                                  info->io_size_gpio,
+                                   id->device, &lpc_sch_cells[cells]);
+       if (ret < 0)
+               return ret;
+@@ -174,7 +154,7 @@ static int lpc_sch_probe(struct pci_dev *dev, const struct 
pci_device_id *id)
+               cells++;
+ 
+       ret = lpc_sch_populate_cell(dev, WDTBASE, "ie6xx_wdt",
+-                                  info->io_size_wdt, -1,
++                                  info->io_size_wdt,
+                                   id->device, &lpc_sch_cells[cells]);
+       if (ret < 0)
+               return ret;
+diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
+index 1aee3b3253fc9..508349399f8af 100644
+--- a/drivers/mfd/stmpe.c
++++ b/drivers/mfd/stmpe.c
+@@ -1091,7 +1091,7 @@ static irqreturn_t stmpe_irq(int irq, void *data)
+ 
+       if (variant->id_val == STMPE801_ID ||
+           variant->id_val == STMPE1600_ID) {
+-              int base = irq_create_mapping(stmpe->domain, 0);
++              int base = irq_find_mapping(stmpe->domain, 0);
+ 
+               handle_nested_irq(base);
+               return IRQ_HANDLED;
+@@ -1119,7 +1119,7 @@ static irqreturn_t stmpe_irq(int irq, void *data)
+               while (status) {
+                       int bit = __ffs(status);
+                       int line = bank * 8 + bit;
+-                      int nestedirq = irq_create_mapping(stmpe->domain, line);
++                      int nestedirq = irq_find_mapping(stmpe->domain, line);
+ 
+                       handle_nested_irq(nestedirq);
+                       status &= ~(1 << bit);
+diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
+index 7882a37ffc352..5c2d5a6a6da9c 100644
+--- a/drivers/mfd/tc3589x.c
++++ b/drivers/mfd/tc3589x.c
+@@ -187,7 +187,7 @@ again:
+ 
+       while (status) {
+               int bit = __ffs(status);
+-              int virq = irq_create_mapping(tc3589x->domain, bit);
++              int virq = irq_find_mapping(tc3589x->domain, bit);
+ 
+               handle_nested_irq(virq);
+               status &= ~(1 << bit);
+diff --git a/drivers/mfd/tqmx86.c b/drivers/mfd/tqmx86.c
+index ddddf08b6a4cc..732013f40e4e8 100644
+--- a/drivers/mfd/tqmx86.c
++++ b/drivers/mfd/tqmx86.c
+@@ -209,6 +209,8 @@ static int tqmx86_probe(struct platform_device *pdev)
+ 
+               /* Assumes the IRQ resource is first. */
+               tqmx_gpio_resources[0].start = gpio_irq;
++      } else {
++              tqmx_gpio_resources[0].flags = 0;
+       }
+ 
+       ocores_platfom_data.clock_khz = tqmx86_board_id_to_clk_rate(board_id);
+diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
+index 6c3a619e26286..651a028bc519a 100644
+--- a/drivers/mfd/wm8994-irq.c
++++ b/drivers/mfd/wm8994-irq.c
+@@ -154,7 +154,7 @@ static irqreturn_t wm8994_edge_irq(int irq, void *data)
+       struct wm8994 *wm8994 = data;
+ 
+       while (gpio_get_value_cansleep(wm8994->pdata.irq_gpio))
+-              handle_nested_irq(irq_create_mapping(wm8994->edge_irq, 0));
++              handle_nested_irq(irq_find_mapping(wm8994->edge_irq, 0));
+ 
+       return IRQ_HANDLED;
+ }
+diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
+index 6e4d0017c0bd4..f685a581df481 100644
+--- a/drivers/mtd/mtdconcat.c
++++ b/drivers/mtd/mtdconcat.c
+@@ -641,6 +641,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info 
*subdev[],      /* subdevices to c
+       int i;
+       size_t size;
+       struct mtd_concat *concat;
++      struct mtd_info *subdev_master = NULL;
+       uint32_t max_erasesize, curr_erasesize;
+       int num_erase_region;
+       int max_writebufsize = 0;
+@@ -679,18 +680,24 @@ struct mtd_info *mtd_concat_create(struct mtd_info 
*subdev[],    /* subdevices to c
+       concat->mtd.subpage_sft = subdev[0]->subpage_sft;
+       concat->mtd.oobsize = subdev[0]->oobsize;
+       concat->mtd.oobavail = subdev[0]->oobavail;
+-      if (subdev[0]->_writev)
++
++      subdev_master = mtd_get_master(subdev[0]);
++      if (subdev_master->_writev)
+               concat->mtd._writev = concat_writev;
+-      if (subdev[0]->_read_oob)
++      if (subdev_master->_read_oob)
+               concat->mtd._read_oob = concat_read_oob;
+-      if (subdev[0]->_write_oob)
++      if (subdev_master->_write_oob)
+               concat->mtd._write_oob = concat_write_oob;
+-      if (subdev[0]->_block_isbad)
++      if (subdev_master->_block_isbad)
+               concat->mtd._block_isbad = concat_block_isbad;
+-      if (subdev[0]->_block_markbad)
++      if (subdev_master->_block_markbad)
+               concat->mtd._block_markbad = concat_block_markbad;
+-      if (subdev[0]->_panic_write)
++      if (subdev_master->_panic_write)
+               concat->mtd._panic_write = concat_panic_write;
++      if (subdev_master->_read)
++              concat->mtd._read = concat_read;
++      if (subdev_master->_write)
++              concat->mtd._write = concat_write;
+ 
+       concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
+ 
+@@ -721,14 +728,22 @@ struct mtd_info *mtd_concat_create(struct mtd_info 
*subdev[],    /* subdevices to c
+                                   subdev[i]->flags & MTD_WRITEABLE;
+               }
+ 
++              subdev_master = mtd_get_master(subdev[i]);
+               concat->mtd.size += subdev[i]->size;
+               concat->mtd.ecc_stats.badblocks +=
+                       subdev[i]->ecc_stats.badblocks;
+               if (concat->mtd.writesize   !=  subdev[i]->writesize ||
+                   concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
+                   concat->mtd.oobsize    !=  subdev[i]->oobsize ||
+-                  !concat->mtd._read_oob  != !subdev[i]->_read_oob ||
+-                  !concat->mtd._write_oob != !subdev[i]->_write_oob) {
++                  !concat->mtd._read_oob  != !subdev_master->_read_oob ||
++                  !concat->mtd._write_oob != !subdev_master->_write_oob) {
++                      /*
++                       * Check against subdev[i] for data members, because
++                       * subdev's attributes may be different from master
++                       * mtd device. Check against subdev's master mtd
++                       * device for callbacks, because the existence of
++                       * subdev's callbacks is decided by master mtd device.
++                       */
+                       kfree(concat);
+                       printk("Incompatible OOB or ECC data on \"%s\"\n",
+                              subdev[i]->name);
+@@ -744,8 +759,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info 
*subdev[],      /* subdevices to c
+       concat->mtd.name = name;
+ 
+       concat->mtd._erase = concat_erase;
+-      concat->mtd._read = concat_read;
+-      concat->mtd._write = concat_write;
+       concat->mtd._sync = concat_sync;
+       concat->mtd._lock = concat_lock;
+       concat->mtd._unlock = concat_unlock;
+diff --git a/drivers/mtd/nand/raw/cafe_nand.c 
b/drivers/mtd/nand/raw/cafe_nand.c
+index 2b94f385a1a88..04502d22efc9c 100644
+--- a/drivers/mtd/nand/raw/cafe_nand.c
++++ b/drivers/mtd/nand/raw/cafe_nand.c
+@@ -751,7 +751,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
+                         "CAFE NAND", mtd);
+       if (err) {
+               dev_warn(&pdev->dev, "Could not register IRQ %d\n", pdev->irq);
+-              goto out_ior;
++              goto out_free_rs;
+       }
+ 
+       /* Disable master reset, enable NAND clock */
+@@ -795,6 +795,8 @@ static int cafe_nand_probe(struct pci_dev *pdev,
+       /* Disable NAND IRQ in global IRQ mask register */
+       cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), 
GLOBAL_IRQ_MASK);
+       free_irq(pdev->irq, mtd);
++ out_free_rs:
++      free_rs(cafe->rs);
+  out_ior:
+       pci_iounmap(pdev, cafe->mmio);
+  out_free_mtd:
+diff --git a/drivers/net/dsa/b53/b53_common.c 
b/drivers/net/dsa/b53/b53_common.c
+index 52100d4fe5a25..d3b37cebcfde8 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1083,7 +1083,7 @@ static void b53_force_link(struct b53_device *dev, int 
port, int link)
+       u8 reg, val, off;
+ 
+       /* Override the port settings */
+-      if (port == dev->cpu_port) {
++      if (port == dev->imp_port) {
+               off = B53_PORT_OVERRIDE_CTRL;
+               val = PORT_OVERRIDE_EN;
+       } else {
+@@ -1107,7 +1107,7 @@ static void b53_force_port_config(struct b53_device 
*dev, int port,
+       u8 reg, val, off;
+ 
+       /* Override the port settings */
+-      if (port == dev->cpu_port) {
++      if (port == dev->imp_port) {
+               off = B53_PORT_OVERRIDE_CTRL;
+               val = PORT_OVERRIDE_EN;
+       } else {
+@@ -1175,7 +1175,7 @@ static void b53_adjust_link(struct dsa_switch *ds, int 
port,
+       b53_force_link(dev, port, phydev->link);
+ 
+       if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
+-              if (port == 8)
++              if (port == dev->imp_port)
+                       off = B53_RGMII_CTRL_IMP;
+               else
+                       off = B53_RGMII_CTRL_P(port);
+@@ -2238,6 +2238,7 @@ struct b53_chip_data {
+       const char *dev_name;
+       u16 vlans;
+       u16 enabled_ports;
++      u8 imp_port;
+       u8 cpu_port;
+       u8 vta_regs[3];
+       u8 arl_bins;
+@@ -2262,6 +2263,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .enabled_ports = 0x1f,
+               .arl_bins = 2,
+               .arl_buckets = 1024,
++              .imp_port = 5,
+               .cpu_port = B53_CPU_PORT_25,
+               .duplex_reg = B53_DUPLEX_STAT_FE,
+       },
+@@ -2272,6 +2274,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .enabled_ports = 0x1f,
+               .arl_bins = 2,
+               .arl_buckets = 1024,
++              .imp_port = 5,
+               .cpu_port = B53_CPU_PORT_25,
+               .duplex_reg = B53_DUPLEX_STAT_FE,
+       },
+@@ -2282,6 +2285,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .enabled_ports = 0x1f,
+               .arl_bins = 4,
+               .arl_buckets = 1024,
++              .imp_port = 8,
+               .cpu_port = B53_CPU_PORT,
+               .vta_regs = B53_VTA_REGS,
+               .duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2295,6 +2299,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .enabled_ports = 0x1f,
+               .arl_bins = 4,
+               .arl_buckets = 1024,
++              .imp_port = 8,
+               .cpu_port = B53_CPU_PORT,
+               .vta_regs = B53_VTA_REGS,
+               .duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2308,6 +2313,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .enabled_ports = 0x1f,
+               .arl_bins = 4,
+               .arl_buckets = 1024,
++              .imp_port = 8,
+               .cpu_port = B53_CPU_PORT,
+               .vta_regs = B53_VTA_REGS_9798,
+               .duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2321,6 +2327,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .enabled_ports = 0x7f,
+               .arl_bins = 4,
+               .arl_buckets = 1024,
++              .imp_port = 8,
+               .cpu_port = B53_CPU_PORT,
+               .vta_regs = B53_VTA_REGS_9798,
+               .duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2335,6 +2342,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .arl_bins = 4,
+               .arl_buckets = 1024,
+               .vta_regs = B53_VTA_REGS,
++              .imp_port = 8,
+               .cpu_port = B53_CPU_PORT,
+               .duplex_reg = B53_DUPLEX_STAT_GE,
+               .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+@@ -2347,6 +2355,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .enabled_ports = 0xff,
+               .arl_bins = 4,
+               .arl_buckets = 1024,
++              .imp_port = 8,
+               .cpu_port = B53_CPU_PORT,
+               .vta_regs = B53_VTA_REGS,
+               .duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2360,6 +2369,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .enabled_ports = 0x1ff,
+               .arl_bins = 4,
+               .arl_buckets = 1024,
++              .imp_port = 8,
+               .cpu_port = B53_CPU_PORT,
+               .vta_regs = B53_VTA_REGS,
+               .duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2373,6 +2383,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .enabled_ports = 0, /* pdata must provide them */
+               .arl_bins = 4,
+               .arl_buckets = 1024,
++              .imp_port = 8,
+               .cpu_port = B53_CPU_PORT,
+               .vta_regs = B53_VTA_REGS_63XX,
+               .duplex_reg = B53_DUPLEX_STAT_63XX,
+@@ -2386,6 +2397,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .enabled_ports = 0x1f,
+               .arl_bins = 4,
+               .arl_buckets = 1024,
++              .imp_port = 8,
+               .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+               .vta_regs = B53_VTA_REGS,
+               .duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2399,6 +2411,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .enabled_ports = 0x1bf,
+               .arl_bins = 4,
+               .arl_buckets = 1024,
++              .imp_port = 8,
+               .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+               .vta_regs = B53_VTA_REGS,
+               .duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2412,6 +2425,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .enabled_ports = 0x1bf,
+               .arl_bins = 4,
+               .arl_buckets = 1024,
++              .imp_port = 8,
+               .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+               .vta_regs = B53_VTA_REGS,
+               .duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2425,6 +2439,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .enabled_ports = 0x1f,
+               .arl_bins = 4,
+               .arl_buckets = 1024,
++              .imp_port = 8,
+               .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+               .vta_regs = B53_VTA_REGS,
+               .duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2438,6 +2453,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .enabled_ports = 0x1f,
+               .arl_bins = 4,
+               .arl_buckets = 1024,
++              .imp_port = 8,
+               .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+               .vta_regs = B53_VTA_REGS,
+               .duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2451,6 +2467,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .enabled_ports = 0x1ff,
+               .arl_bins = 4,
+               .arl_buckets = 1024,
++              .imp_port = 8,
+               .cpu_port = B53_CPU_PORT,
+               .vta_regs = B53_VTA_REGS,
+               .duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2464,6 +2481,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .enabled_ports = 0x103,
+               .arl_bins = 4,
+               .arl_buckets = 1024,
++              .imp_port = 8,
+               .cpu_port = B53_CPU_PORT,
+               .vta_regs = B53_VTA_REGS,
+               .duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2477,6 +2495,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .enabled_ports = 0x1ff,
+               .arl_bins = 4,
+               .arl_buckets = 1024,
++              .imp_port = 8,
+               .cpu_port = B53_CPU_PORT,
+               .vta_regs = B53_VTA_REGS,
+               .duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2490,6 +2509,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .enabled_ports = 0x1ff,
+               .arl_bins = 4,
+               .arl_buckets = 256,
++              .imp_port = 8,
+               .cpu_port = B53_CPU_PORT,
+               .vta_regs = B53_VTA_REGS,
+               .duplex_reg = B53_DUPLEX_STAT_GE,
+@@ -2515,6 +2535,7 @@ static int b53_switch_init(struct b53_device *dev)
+                       dev->vta_regs[1] = chip->vta_regs[1];
+                       dev->vta_regs[2] = chip->vta_regs[2];
+                       dev->jumbo_pm_reg = chip->jumbo_pm_reg;
++                      dev->imp_port = chip->imp_port;
+                       dev->cpu_port = chip->cpu_port;
+                       dev->num_vlans = chip->vlans;
+                       dev->num_arl_bins = chip->arl_bins;
+@@ -2556,9 +2577,10 @@ static int b53_switch_init(struct b53_device *dev)
+                       dev->cpu_port = 5;
+       }
+ 
+-      /* cpu port is always last */
+-      dev->num_ports = dev->cpu_port + 1;
+       dev->enabled_ports |= BIT(dev->cpu_port);
++      dev->num_ports = fls(dev->enabled_ports);
++
++      dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS);
+ 
+       /* Include non standard CPU port built-in PHYs to be probed */
+       if (is539x(dev) || is531x5(dev)) {
+@@ -2604,7 +2626,6 @@ struct b53_device *b53_switch_alloc(struct device *base,
+               return NULL;
+ 
+       ds->dev = base;
+-      ds->num_ports = DSA_MAX_PORTS;
+ 
+       dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
+index 7c67409bb186d..bdb2ade7ad622 100644
+--- a/drivers/net/dsa/b53/b53_priv.h
++++ b/drivers/net/dsa/b53/b53_priv.h
+@@ -122,6 +122,7 @@ struct b53_device {
+ 
+       /* used ports mask */
+       u16 enabled_ports;
++      unsigned int imp_port;
+       unsigned int cpu_port;
+ 
+       /* connect specific data */
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index 510324916e916..690e9d9495e75 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -38,7 +38,7 @@ static unsigned int bcm_sf2_num_active_ports(struct 
dsa_switch *ds)
+       struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+       unsigned int port, count = 0;
+ 
+-      for (port = 0; port < ARRAY_SIZE(priv->port_sts); port++) {
++      for (port = 0; port < ds->num_ports; port++) {
+               if (dsa_is_cpu_port(ds, port))
+                       continue;
+               if (priv->port_sts[port].enabled)
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c 
b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+index 9108b497b3c99..03eb0179ec008 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+@@ -1225,7 +1225,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int 
int_mode_param,
+ 
+       /* SR-IOV capability was enabled but there are no VFs*/
+       if (iov->total == 0) {
+-              err = -EINVAL;
++              err = 0;
+               goto failed;
+       }
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c 
b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 849ae99a955a3..26179e437bbfd 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -272,6 +272,7 @@ static const u16 bnxt_async_events_arr[] = {
+       ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
+       ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
+       ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
++      ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
+       ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
+ };
+ 
+@@ -1304,8 +1305,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct 
bnxt_rx_ring_info *rxr,
+       } else {
+               tpa_info->hash_type = PKT_HASH_TYPE_NONE;
+               tpa_info->gso_type = 0;
+-              if (netif_msg_rx_err(bp))
+-                      netdev_warn(bp->dev, "TPA packet without valid hash\n");
++              netif_warn(bp, rx_err, bp->dev, "TPA packet without valid 
hash\n");
+       }
+       tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
+       tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
+@@ -2081,10 +2081,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
+                       goto async_event_process_exit;
+               set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
+               break;
+-      case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY:
+-              if (netif_msg_hw(bp))
+-                      netdev_warn(bp->dev, "Received RESET_NOTIFY event, 
data1: 0x%x, data2: 0x%x\n",
+-                                  data1, data2);
++      case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
++              char *fatal_str = "non-fatal";
++
+               if (!bp->fw_health)
+                       goto async_event_process_exit;
+ 
+@@ -2096,42 +2095,57 @@ static int bnxt_async_event_process(struct bnxt *bp,
+               if (!bp->fw_reset_max_dsecs)
+                       bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
+               if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
+-                      netdev_warn(bp->dev, "Firmware fatal reset event 
received\n");
++                      fatal_str = "fatal";
+                       set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+-              } else {
+-                      netdev_warn(bp->dev, "Firmware non-fatal reset event 
received, max wait time %d msec\n",
+-                                  bp->fw_reset_max_dsecs * 100);
+               }
++              netif_warn(bp, hw, bp->dev,
++                         "Firmware %s reset event, data1: 0x%x, data2: 0x%x, 
min wait %u ms, max wait %u ms\n",
++                         fatal_str, data1, data2,
++                         bp->fw_reset_min_dsecs * 100,
++                         bp->fw_reset_max_dsecs * 100);
+               set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
+               break;
++      }
+       case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
+               struct bnxt_fw_health *fw_health = bp->fw_health;
+ 
+               if (!fw_health)
+                       goto async_event_process_exit;
+ 
+-              fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
+-              fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
+-              if (!fw_health->enabled)
++              if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
++                      fw_health->enabled = false;
++                      netif_info(bp, drv, bp->dev,
++                                 "Error recovery info: error recovery[0]\n");
+                       break;
+-
+-              if (netif_msg_drv(bp))
+-                      netdev_info(bp->dev, "Error recovery info: error 
recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
+-                                  fw_health->enabled, fw_health->master,
+-                                  bnxt_fw_health_readl(bp,
+-                                                       BNXT_FW_RESET_CNT_REG),
+-                                  bnxt_fw_health_readl(bp,
+-                                                       BNXT_FW_HEALTH_REG));
++              }
++              fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
+               fw_health->tmr_multiplier =
+                       DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
+                                    bp->current_interval * 10);
+               fw_health->tmr_counter = fw_health->tmr_multiplier;
+-              fw_health->last_fw_heartbeat =
+-                      bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
++              if (!fw_health->enabled)
++                      fw_health->last_fw_heartbeat =
++                              bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
+               fw_health->last_fw_reset_cnt =
+                       bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
++              netif_info(bp, drv, bp->dev,
++                         "Error recovery info: error recovery[1], master[%d], 
reset count[%u], health status: 0x%x\n",
++                         fw_health->master, fw_health->last_fw_reset_cnt,
++                         bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
++              if (!fw_health->enabled) {
++                      /* Make sure tmr_counter is set and visible to
++                       * bnxt_health_check() before setting enabled to true.
++                       */
++                      smp_wmb();
++                      fw_health->enabled = true;
++              }
+               goto async_event_process_exit;
+       }
++      case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
++              netif_notice(bp, hw, bp->dev,
++                           "Received firmware debug notification, data1: 
0x%x, data2: 0x%x\n",
++                           data1, data2);
++              goto async_event_process_exit;
+       case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
+               struct bnxt_rx_ring_info *rxr;
+               u16 grp_idx;
+@@ -2591,6 +2605,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
+               struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+               int j;
+ 
++              if (!txr->tx_buf_ring)
++                      continue;
++
+               for (j = 0; j < max_idx;) {
+                       struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
+                       struct sk_buff *skb;
+@@ -2675,6 +2692,9 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, 
int ring_nr)
+       }
+ 
+ skip_rx_tpa_free:
++      if (!rxr->rx_buf_ring)
++              goto skip_rx_buf_free;
++
+       for (i = 0; i < max_idx; i++) {
+               struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
+               dma_addr_t mapping = rx_buf->mapping;
+@@ -2697,6 +2717,11 @@ skip_rx_tpa_free:
+                       kfree(data);
+               }
+       }
++
++skip_rx_buf_free:
++      if (!rxr->rx_agg_ring)
++              goto skip_rx_agg_free;
++
+       for (i = 0; i < max_agg_idx; i++) {
+               struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
+               struct page *page = rx_agg_buf->page;
+@@ -2713,6 +2738,8 @@ skip_rx_tpa_free:
+ 
+               __free_page(page);
+       }
++
++skip_rx_agg_free:
+       if (rxr->rx_page) {
+               __free_page(rxr->rx_page);
+               rxr->rx_page = NULL;
+@@ -10719,6 +10746,8 @@ static void bnxt_fw_health_check(struct bnxt *bp)
+       if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+               return;
+ 
++      /* Make sure it is enabled before checking the tmr_counter. */
++      smp_rmb();
+       if (fw_health->tmr_counter) {
+               fw_health->tmr_counter--;
+               return;
+@@ -11623,6 +11652,11 @@ static void bnxt_fw_reset_task(struct work_struct 
*work)
+                       dev_close(bp->dev);
+               }
+ 
++              if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
++                  bp->fw_health->enabled) {
++                      bp->fw_health->last_fw_reset_cnt =
++                              bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
++              }
+               bp->fw_reset_state = 0;
+               /* Make sure fw_reset_state is 0 before clearing the flag */
+               smp_mb__before_atomic();
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c 
b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+index 8b0e916afe6b1..e2fd625fc6d20 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+@@ -452,7 +452,7 @@ static int bnxt_dl_info_get(struct devlink *dl, struct 
devlink_info_req *req,
+               return rc;
+ 
+       ver_resp = &bp->ver_resp;
+-      sprintf(buf, "%X", ver_resp->chip_rev);
++      sprintf(buf, "%c%d", 'A' + ver_resp->chip_rev, ver_resp->chip_metal);
+       rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED,
+                             DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf);
+       if (rc)
+@@ -474,8 +474,8 @@ static int bnxt_dl_info_get(struct devlink *dl, struct 
devlink_info_req *req,
+       if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) {
+               u32 ver = nvm_cfg_ver.vu32;
+ 
+-              sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xf, (ver >> 8) & 0xf,
+-                      ver & 0xf);
++              sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xff, (ver >> 8) & 0xff,
++                      ver & 0xff);
+               rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+                                     DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+                                     buf);
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c 
b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+index 5e4429b14b8ca..2186706cf9130 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+@@ -1870,9 +1870,6 @@ bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct 
net_device *netdev)
+ {
+       struct bnxt_flower_indr_block_cb_priv *cb_priv;
+ 
+-      /* All callback list access should be protected by RTNL. */
+-      ASSERT_RTNL();
+-
+       list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list)
+               if (cb_priv->tunnel_netdev == netdev)
+                       return cb_priv;
+diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c 
b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+index 0e4a0f413960a..c6db85fe16291 100644
+--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
++++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+@@ -1153,6 +1153,7 @@ static int init_one(struct pci_dev *pdev, const struct 
pci_device_id *ent)
+       if (!adapter->registered_device_map) {
+               pr_err("%s: could not register any net devices\n",
+                      pci_name(pdev));
++              err = -EINVAL;
+               goto out_release_adapter_res;
+       }
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 
b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 92ca3b21968fe..936b9cfe1a62f 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -60,6 +60,7 @@ MODULE_PARM_DESC(debug, " Network interface message level 
setting");
+ #define HNS3_OUTER_VLAN_TAG   2
+ 
+ #define HNS3_MIN_TX_LEN               33U
++#define HNS3_MIN_TUN_PKT_LEN  65U
+ 
+ /* hns3_pci_tbl - PCI Device ID Table
+  *
+@@ -913,8 +914,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 
ol4_proto,
+                              l4.tcp->doff);
+               break;
+       case IPPROTO_UDP:
+-              if (hns3_tunnel_csum_bug(skb))
+-                      return skb_checksum_help(skb);
++              if (hns3_tunnel_csum_bug(skb)) {
++                      int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN);
++
++                      return ret ? ret : skb_checksum_help(skb);
++              }
+ 
+               hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+               hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 
b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 2261de5caf863..59ec538eba1f0 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -1463,9 +1463,10 @@ static void hclge_init_kdump_kernel_config(struct 
hclge_dev *hdev)
+ 
+ static int hclge_configure(struct hclge_dev *hdev)
+ {
++      const struct cpumask *cpumask = cpu_online_mask;
+       struct hclge_cfg cfg;
+       unsigned int i;
+-      int ret;
++      int node, ret;
+ 
+       ret = hclge_get_cfg(hdev, &cfg);
+       if (ret)
+@@ -1526,11 +1527,12 @@ static int hclge_configure(struct hclge_dev *hdev)
+ 
+       hclge_init_kdump_kernel_config(hdev);
+ 
+-      /* Set the init affinity based on pci func number */
+-      i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
+-      i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
+-      cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
+-                      &hdev->affinity_mask);
++      /* Set the affinity based on numa node */
++      node = dev_to_node(&hdev->pdev->dev);
++      if (node != NUMA_NO_NODE)
++              cpumask = cpumask_of_node(node);
++
++      cpumask_copy(&hdev->affinity_mask, cpumask);
+ 
+       return ret;
+ }
+@@ -7003,11 +7005,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
+       hclge_clear_arfs_rules(handle);
+       spin_unlock_bh(&hdev->fd_rule_lock);
+ 
+-      /* If it is not PF reset, the firmware will disable the MAC,
++      /* If it is not PF reset or FLR, the firmware will disable the MAC,
+        * so it only need to stop phy here.
+        */
+       if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
+-          hdev->reset_type != HNAE3_FUNC_RESET) {
++          hdev->reset_type != HNAE3_FUNC_RESET &&
++          hdev->reset_type != HNAE3_FLR_RESET) {
+               hclge_mac_stop_phy(hdev);
+               hclge_update_link_status(hdev);
+               return;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c 
b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index d3010d5ab3665..447457cacf973 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -2352,6 +2352,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void 
*data)
+ 
+       hclgevf_enable_vector(&hdev->misc_vector, false);
+       event_cause = hclgevf_check_evt_cause(hdev, &clearval);
++      if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
++              hclgevf_clear_event_cause(hdev, clearval);
+ 
+       switch (event_cause) {
+       case HCLGEVF_VECTOR0_EVENT_RST:
+@@ -2364,10 +2366,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, 
void *data)
+               break;
+       }
+ 
+-      if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
+-              hclgevf_clear_event_cause(hdev, clearval);
++      if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
+               hclgevf_enable_vector(&hdev->misc_vector, true);
+-      }
+ 
+       return IRQ_HANDLED;
+ }
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c 
b/drivers/net/ethernet/ibm/ibmvnic.c
+index 3134c1988db36..bb8d0a0f48ee0 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -4478,6 +4478,14 @@ static int handle_login_rsp(union ibmvnic_crq 
*login_rsp_crq,
+               return 0;
+       }
+ 
++      if (adapter->failover_pending) {
++              adapter->init_done_rc = -EAGAIN;
++              netdev_dbg(netdev, "Failover pending, ignoring login 
response\n");
++              complete(&adapter->init_done);
++              /* login response buffer will be released on reset */
++              return 0;
++      }
++
+       netdev->mtu = adapter->req_mtu - ETH_HLEN;
+ 
+       netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c 
b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+index 644d28b0692b3..c26652436c53a 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+@@ -84,7 +84,8 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
+  */
+ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
+ {
+-      unsigned long timeout = jiffies + usecs_to_jiffies(10000);
++      unsigned long timeout = jiffies + usecs_to_jiffies(20000);
++      bool twice = false;
+       void __iomem *reg;
+       u64 reg_val;
+ 
+@@ -99,6 +100,15 @@ again:
+               usleep_range(1, 5);
+               goto again;
+       }
++      /* In scenarios where CPU is scheduled out before checking
++       * 'time_before' (above) and gets scheduled in such that
++       * jiffies are beyond timeout value, then check again if HW is
++       * done with the operation in the meantime.
++       */
++      if (!twice) {
++              twice = true;
++              goto again;
++      }
+       return -EBUSY;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c 
b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+index 3dfcb20e97c6f..857be86b4a11a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+@@ -1007,7 +1007,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
+       err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
+       if (err) {
+               mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", 
err);
+-              return err;
++              goto err_cancel_work;
+       }
+ 
+       err = mlx5_fw_tracer_create_mkey(tracer);
+@@ -1031,6 +1031,7 @@ err_notifier_unregister:
+       mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
+ err_dealloc_pd:
+       mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
++err_cancel_work:
+       cancel_work_sync(&tracer->read_fw_strings_work);
+       return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+index e6f782743fbe8..2fdea05eec1de 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+@@ -298,9 +298,6 @@ mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv 
*rpriv,
+ {
+       struct mlx5e_rep_indr_block_priv *cb_priv;
+ 
+-      /* All callback list access should be protected by RTNL. */
+-      ASSERT_RTNL();
+-
+       list_for_each_entry(cb_priv,
+                           &rpriv->uplink_priv.tc_indr_block_priv_list,
+                           list)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 
b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 1d4b4e6f6fb41..0ff034b0866e2 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -1675,14 +1675,13 @@ static int build_match_list(struct match_list 
*match_head,
+ 
+               curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
+               if (!curr_match) {
++                      rcu_read_unlock();
+                       free_match_list(match_head, ft_locked);
+-                      err = -ENOMEM;
+-                      goto out;
++                      return -ENOMEM;
+               }
+               curr_match->g = g;
+               list_add_tail(&curr_match->list, &match_head->list);
+       }
+-out:
+       rcu_read_unlock();
+       return err;
+ }
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c 
b/drivers/net/ethernet/netronome/nfp/flower/offload.c
+index e95969c462e46..3f34e6da72958 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
+@@ -1732,9 +1732,6 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
+       struct nfp_flower_indr_block_cb_priv *cb_priv;
+       struct nfp_flower_priv *priv = app->priv;
+ 
+-      /* All callback list access should be protected by RTNL. */
+-      ASSERT_RTNL();
+-
+       list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
+               if (cb_priv->netdev == netdev)
+                       return cb_priv;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c 
b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+index caeef25c89bb1..2cd14ee95c1ff 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+@@ -3376,6 +3376,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
+                         struct qed_nvm_image_att *p_image_att)
+ {
+       enum nvm_image_type type;
++      int rc;
+       u32 i;
+ 
+       /* Translate image_id into MFW definitions */
+@@ -3404,7 +3405,10 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
+               return -EINVAL;
+       }
+ 
+-      qed_mcp_nvm_info_populate(p_hwfn);
++      rc = qed_mcp_nvm_info_populate(p_hwfn);
++      if (rc)
++              return rc;
++
+       for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
+               if (type == p_hwfn->nvm_info.image_att[i].image_type)
+                       break;
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 
b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+index e6784023bce42..aa7ee43f92525 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+@@ -439,7 +439,6 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
+       QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, 1);
+       msleep(20);
+ 
+-      qlcnic_rom_unlock(adapter);
+       /* big hammer don't reset CAM block on reset */
+       QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
+ 
+diff --git a/drivers/net/ethernet/rdc/r6040.c 
b/drivers/net/ethernet/rdc/r6040.c
+index 7c74318620b1d..ccdfa930130bc 100644
+--- a/drivers/net/ethernet/rdc/r6040.c
++++ b/drivers/net/ethernet/rdc/r6040.c
+@@ -119,6 +119,8 @@
+ #define PHY_ST                0x8A    /* PHY status register */
+ #define MAC_SM                0xAC    /* MAC status machine */
+ #define  MAC_SM_RST   0x0002  /* MAC status machine reset */
++#define MD_CSC                0xb6    /* MDC speed control register */
++#define  MD_CSC_DEFAULT       0x0030
+ #define MAC_ID                0xBE    /* Identifier register */
+ 
+ #define TX_DCNT               0x80    /* TX descriptor count */
+@@ -355,8 +357,9 @@ static void r6040_reset_mac(struct r6040_private *lp)
+ {
+       void __iomem *ioaddr = lp->base;
+       int limit = MAC_DEF_TIMEOUT;
+-      u16 cmd;
++      u16 cmd, md_csc;
+ 
++      md_csc = ioread16(ioaddr + MD_CSC);
+       iowrite16(MAC_RST, ioaddr + MCR1);
+       while (limit--) {
+               cmd = ioread16(ioaddr + MCR1);
+@@ -368,6 +371,10 @@ static void r6040_reset_mac(struct r6040_private *lp)
+       iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
+       iowrite16(0, ioaddr + MAC_SM);
+       mdelay(5);
++
++      /* Restore MDIO clock frequency */
++      if (md_csc != MD_CSC_DEFAULT)
++              iowrite16(md_csc, ioaddr + MD_CSC);
+ }
+ 
+ static void r6040_init_mac_regs(struct net_device *dev)
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c 
b/drivers/net/ethernet/renesas/sh_eth.c
+index 5cab2d3c00236..8927d59977458 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -2533,6 +2533,7 @@ static netdev_tx_t sh_eth_start_xmit(struct sk_buff *skb,
+       else
+               txdesc->status |= cpu_to_le32(TD_TACT);
+ 
++      wmb(); /* cur_tx must be incremented after TACT bit was set */
+       mdp->cur_tx++;
+ 
+       if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns))
+diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
+index b3790aa952a15..0747866d60abc 100644
+--- a/drivers/net/ipa/ipa_table.c
++++ b/drivers/net/ipa/ipa_table.c
+@@ -451,7 +451,8 @@ static void ipa_table_init_add(struct gsi_trans *trans, 
bool filter,
+        * table region determines the number of entries it has.
+        */
+       if (filter) {
+-              count = hweight32(ipa->filter_map);
++              /* Include one extra "slot" to hold the filter map itself */
++              count = 1 + hweight32(ipa->filter_map);
+               hash_count = hash_mem->size ? count : 0;
+       } else {
+               count = mem->size / IPA_TABLE_ENTRY_SIZE;
+diff --git a/drivers/net/phy/dp83640_reg.h b/drivers/net/phy/dp83640_reg.h
+index 21aa24c741b96..daae7fa58fb82 100644
+--- a/drivers/net/phy/dp83640_reg.h
++++ b/drivers/net/phy/dp83640_reg.h
+@@ -5,7 +5,7 @@
+ #ifndef HAVE_DP83640_REGISTERS
+ #define HAVE_DP83640_REGISTERS
+ 
+-#define PAGE0                     0x0000
++/* #define PAGE0                  0x0000 */
+ #define PHYCR2                    0x001c /* PHY Control Register 2 */
+ 
+ #define PAGE4                     0x0004
+diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
+index eb100eb33de3d..77ac5a721e7b6 100644
+--- a/drivers/net/usb/cdc_mbim.c
++++ b/drivers/net/usb/cdc_mbim.c
+@@ -653,6 +653,11 @@ static const struct usb_device_id mbim_devs[] = {
+         .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
+       },
+ 
++      /* Telit LN920 */
++      { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1061, USB_CLASS_COMM, 
USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
++        .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
++      },
++
+       /* default entry */
+       { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, 
USB_CDC_PROTO_NONE),
+         .driver_info = (unsigned long)&cdc_mbim_info_zlp,
+diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
+index 5b3aff2c279f7..f269337c82c58 100644
+--- a/drivers/net/usb/hso.c
++++ b/drivers/net/usb/hso.c
+@@ -2537,13 +2537,17 @@ static struct hso_device *hso_create_net_device(struct 
usb_interface *interface,
+       if (!hso_net->mux_bulk_tx_buf)
+               goto err_free_tx_urb;
+ 
+-      add_net_device(hso_dev);
++      result = add_net_device(hso_dev);
++      if (result) {
++              dev_err(&interface->dev, "Failed to add net device\n");
++              goto err_free_tx_buf;
++      }
+ 
+       /* registering our net device */
+       result = register_netdev(net);
+       if (result) {
+               dev_err(&interface->dev, "Failed to register device\n");
+-              goto err_free_tx_buf;
++              goto err_rmv_ndev;
+       }
+ 
+       hso_log_port(hso_dev);
+@@ -2552,8 +2556,9 @@ static struct hso_device *hso_create_net_device(struct 
usb_interface *interface,
+ 
+       return hso_dev;
+ 
+-err_free_tx_buf:
++err_rmv_ndev:
+       remove_net_device(hso_dev);
++err_free_tx_buf:
+       kfree(hso_net->mux_bulk_tx_buf);
+ err_free_tx_urb:
+       usb_free_urb(hso_net->mux_bulk_tx_urb);
+diff --git a/drivers/ntb/test/ntb_msi_test.c b/drivers/ntb/test/ntb_msi_test.c
+index 7095ecd6223a7..4e18e08776c98 100644
+--- a/drivers/ntb/test/ntb_msi_test.c
++++ b/drivers/ntb/test/ntb_msi_test.c
+@@ -369,8 +369,10 @@ static int ntb_msit_probe(struct ntb_client *client, 
struct ntb_dev *ntb)
+       if (ret)
+               goto remove_dbgfs;
+ 
+-      if (!nm->isr_ctx)
++      if (!nm->isr_ctx) {
++              ret = -ENOMEM;
+               goto remove_dbgfs;
++      }
+ 
+       ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
+ 
+diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
+index 89df1350fefd8..65e1e5cf1b29a 100644
+--- a/drivers/ntb/test/ntb_perf.c
++++ b/drivers/ntb/test/ntb_perf.c
+@@ -598,6 +598,7 @@ static int perf_setup_inbuf(struct perf_peer *peer)
+               return -ENOMEM;
+       }
+       if (!IS_ALIGNED(peer->inbuf_xlat, xlat_align)) {
++              ret = -EINVAL;
+               dev_err(&perf->ntb->dev, "Unaligned inbuf allocated\n");
+               goto err_free_inbuf;
+       }
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index c9a925999c6ea..a6b3b07627630 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -273,6 +273,12 @@ static inline void nvme_tcp_send_all(struct 
nvme_tcp_queue *queue)
+       } while (ret > 0);
+ }
+ 
++static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
++{
++      return !list_empty(&queue->send_list) ||
++              !llist_empty(&queue->req_list) || queue->more_requests;
++}
++
+ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
+               bool sync, bool last)
+ {
+@@ -293,9 +299,10 @@ static inline void nvme_tcp_queue_request(struct 
nvme_tcp_request *req,
+               nvme_tcp_send_all(queue);
+               queue->more_requests = false;
+               mutex_unlock(&queue->send_mutex);
+-      } else if (last) {
+-              queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+       }
++
++      if (last && nvme_tcp_queue_more(queue))
++              queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+ }
+ 
+ static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
+@@ -890,12 +897,6 @@ done:
+       read_unlock_bh(&sk->sk_callback_lock);
+ }
+ 
+-static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+-{
+-      return !list_empty(&queue->send_list) ||
+-              !llist_empty(&queue->req_list) || queue->more_requests;
+-}
+-
+ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
+ {
+       queue->request = NULL;
+@@ -1132,8 +1133,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
+                               pending = true;
+                       else if (unlikely(result < 0))
+                               break;
+-              } else
+-                      pending = !llist_empty(&queue->req_list);
++              }
+ 
+               result = nvme_tcp_try_recv(queue);
+               if (result > 0)
+diff --git a/drivers/pci/controller/cadence/pci-j721e.c 
b/drivers/pci/controller/cadence/pci-j721e.c
+index d34ca0fda0f66..8a6d68e13f301 100644
+--- a/drivers/pci/controller/cadence/pci-j721e.c
++++ b/drivers/pci/controller/cadence/pci-j721e.c
+@@ -25,6 +25,7 @@
+ #define STATUS_REG_SYS_2      0x508
+ #define STATUS_CLR_REG_SYS_2  0x708
+ #define LINK_DOWN             BIT(1)
++#define J7200_LINK_DOWN               BIT(10)
+ 
+ #define J721E_PCIE_USER_CMD_STATUS    0x4
+ #define LINK_TRAINING_ENABLE          BIT(0)
+@@ -54,6 +55,7 @@ struct j721e_pcie {
+       struct cdns_pcie        *cdns_pcie;
+       void __iomem            *user_cfg_base;
+       void __iomem            *intd_cfg_base;
++      u32                     linkdown_irq_regfield;
+ };
+ 
+ enum j721e_pcie_mode {
+@@ -63,7 +65,10 @@ enum j721e_pcie_mode {
+ 
+ struct j721e_pcie_data {
+       enum j721e_pcie_mode    mode;
+-      bool quirk_retrain_flag;
++      unsigned int            quirk_retrain_flag:1;
++      unsigned int            quirk_detect_quiet_flag:1;
++      u32                     linkdown_irq_regfield;
++      unsigned int            byte_access_allowed:1;
+ };
+ 
+ static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset)
+@@ -95,12 +100,12 @@ static irqreturn_t j721e_pcie_link_irq_handler(int irq, 
void *priv)
+       u32 reg;
+ 
+       reg = j721e_pcie_intd_readl(pcie, STATUS_REG_SYS_2);
+-      if (!(reg & LINK_DOWN))
++      if (!(reg & pcie->linkdown_irq_regfield))
+               return IRQ_NONE;
+ 
+       dev_err(dev, "LINK DOWN!\n");
+ 
+-      j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, LINK_DOWN);
++      j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, 
pcie->linkdown_irq_regfield);
+       return IRQ_HANDLED;
+ }
+ 
+@@ -109,7 +114,7 @@ static void j721e_pcie_config_link_irq(struct j721e_pcie 
*pcie)
+       u32 reg;
+ 
+       reg = j721e_pcie_intd_readl(pcie, ENABLE_REG_SYS_2);
+-      reg |= LINK_DOWN;
++      reg |= pcie->linkdown_irq_regfield;
+       j721e_pcie_intd_writel(pcie, ENABLE_REG_SYS_2, reg);
+ }
+ 
+@@ -272,10 +277,36 @@ static struct pci_ops cdns_ti_pcie_host_ops = {
+ static const struct j721e_pcie_data j721e_pcie_rc_data = {
+       .mode = PCI_MODE_RC,
+       .quirk_retrain_flag = true,
++      .byte_access_allowed = false,
++      .linkdown_irq_regfield = LINK_DOWN,
+ };
+ 
+ static const struct j721e_pcie_data j721e_pcie_ep_data = {
+       .mode = PCI_MODE_EP,
++      .linkdown_irq_regfield = LINK_DOWN,
++};
++
++static const struct j721e_pcie_data j7200_pcie_rc_data = {
++      .mode = PCI_MODE_RC,
++      .quirk_detect_quiet_flag = true,
++      .linkdown_irq_regfield = J7200_LINK_DOWN,
++      .byte_access_allowed = true,
++};
++
++static const struct j721e_pcie_data j7200_pcie_ep_data = {
++      .mode = PCI_MODE_EP,
++      .quirk_detect_quiet_flag = true,
++};
++
++static const struct j721e_pcie_data am64_pcie_rc_data = {
++      .mode = PCI_MODE_RC,
++      .linkdown_irq_regfield = J7200_LINK_DOWN,
++      .byte_access_allowed = true,
++};
++
++static const struct j721e_pcie_data am64_pcie_ep_data = {
++      .mode = PCI_MODE_EP,
++      .linkdown_irq_regfield = J7200_LINK_DOWN,
+ };
+ 
+ static const struct of_device_id of_j721e_pcie_match[] = {
+@@ -287,6 +318,22 @@ static const struct of_device_id of_j721e_pcie_match[] = {
+               .compatible = "ti,j721e-pcie-ep",
+               .data = &j721e_pcie_ep_data,
+       },
++      {
++              .compatible = "ti,j7200-pcie-host",
++              .data = &j7200_pcie_rc_data,
++      },
++      {
++              .compatible = "ti,j7200-pcie-ep",
++              .data = &j7200_pcie_ep_data,
++      },
++      {
++              .compatible = "ti,am64-pcie-host",
++              .data = &am64_pcie_rc_data,
++      },
++      {
++              .compatible = "ti,am64-pcie-ep",
++              .data = &am64_pcie_ep_data,
++      },
+       {},
+ };
+ 
+@@ -319,6 +366,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
+ 
+       pcie->dev = dev;
+       pcie->mode = mode;
++      pcie->linkdown_irq_regfield = data->linkdown_irq_regfield;
+ 
+       base = devm_platform_ioremap_resource_byname(pdev, "intd_cfg");
+       if (IS_ERR(base))
+@@ -378,9 +426,11 @@ static int j721e_pcie_probe(struct platform_device *pdev)
+                       goto err_get_sync;
+               }
+ 
+-              bridge->ops = &cdns_ti_pcie_host_ops;
++              if (!data->byte_access_allowed)
++                      bridge->ops = &cdns_ti_pcie_host_ops;
+               rc = pci_host_bridge_priv(bridge);
+               rc->quirk_retrain_flag = data->quirk_retrain_flag;
++              rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
+ 
+               cdns_pcie = &rc->pcie;
+               cdns_pcie->dev = dev;
+@@ -430,6 +480,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
+                       ret = -ENOMEM;
+                       goto err_get_sync;
+               }
++              ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
+ 
+               cdns_pcie = &ep->pcie;
+               cdns_pcie->dev = dev;
+diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c 
b/drivers/pci/controller/cadence/pcie-cadence-ep.c
+index 84cc58dc8512c..1af14474abcf1 100644
+--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
++++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
+@@ -578,6 +578,10 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
+       ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
+       /* Reserve region 0 for IRQs */
+       set_bit(0, &ep->ob_region_map);
++
++      if (ep->quirk_detect_quiet_flag)
++              cdns_pcie_detect_quiet_min_delay_set(&ep->pcie);
++
+       spin_lock_init(&ep->lock);
+ 
+       return 0;
+diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c 
b/drivers/pci/controller/cadence/pcie-cadence-host.c
+index 73dcf8cf98fbf..a40ed9e12b4bb 100644
+--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
++++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
+@@ -497,6 +497,9 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
+               return PTR_ERR(rc->cfg_base);
+       rc->cfg_res = res;
+ 
++      if (rc->quirk_detect_quiet_flag)
++              cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
++
+       ret = cdns_pcie_start_link(pcie);
+       if (ret) {
+               dev_err(dev, "Failed to start link\n");
+diff --git a/drivers/pci/controller/cadence/pcie-cadence.c 
b/drivers/pci/controller/cadence/pcie-cadence.c
+index 3c3646502d05c..52767f26048fd 100644
+--- a/drivers/pci/controller/cadence/pcie-cadence.c
++++ b/drivers/pci/controller/cadence/pcie-cadence.c
+@@ -7,6 +7,22 @@
+ 
+ #include "pcie-cadence.h"
+ 
++void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
++{
++      u32 delay = 0x3;
++      u32 ltssm_control_cap;
++
++      /*
++       * Set the LTSSM Detect Quiet state min. delay to 2ms.
++       */
++      ltssm_control_cap = cdns_pcie_readl(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP);
++      ltssm_control_cap = ((ltssm_control_cap &
++                          ~CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) |
++                          CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay));
++
++      cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
++}
++
+ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
+                                  u32 r, bool is_io,
+                                  u64 cpu_addr, u64 pci_addr, size_t size)
+diff --git a/drivers/pci/controller/cadence/pcie-cadence.h 
b/drivers/pci/controller/cadence/pcie-cadence.h
+index 6705a5fedfbb0..e0b59730bffb7 100644
+--- a/drivers/pci/controller/cadence/pcie-cadence.h
++++ b/drivers/pci/controller/cadence/pcie-cadence.h
+@@ -189,6 +189,14 @@
+ /* AXI link down register */
+ #define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
+ 
++/* LTSSM Capabilities register */
++#define CDNS_PCIE_LTSSM_CONTROL_CAP             (CDNS_PCIE_LM_BASE + 0x0054)
++#define  CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK  GENMASK(2, 1)
++#define  CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1
++#define  CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \
++       (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \
++       CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK)
++
+ enum cdns_pcie_rp_bar {
+       RP_BAR_UNDEFINED = -1,
+       RP_BAR0,
+@@ -291,6 +299,7 @@ struct cdns_pcie {
+  * @avail_ib_bar: Satus of RP_BAR0, RP_BAR1 and       RP_NO_BAR if it's free 
or
+  *                available
+  * @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
++ * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
+  */
+ struct cdns_pcie_rc {
+       struct cdns_pcie        pcie;
+@@ -299,7 +308,8 @@ struct cdns_pcie_rc {
+       u32                     vendor_id;
+       u32                     device_id;
+       bool                    avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
+-      bool                    quirk_retrain_flag;
++      unsigned int            quirk_retrain_flag:1;
++      unsigned int            quirk_detect_quiet_flag:1;
+ };
+ 
+ /**
+@@ -330,6 +340,7 @@ struct cdns_pcie_epf {
+  *        registers fields (RMW) accessible by both remote RC and EP to
+  *        minimize time between read and write
+  * @epf: Structure to hold info about endpoint function
++ * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
+  */
+ struct cdns_pcie_ep {
+       struct cdns_pcie        pcie;
+@@ -344,6 +355,7 @@ struct cdns_pcie_ep {
+       /* protect writing to PCI_STATUS while raising legacy interrupts */
+       spinlock_t              lock;
+       struct cdns_pcie_epf    *epf;
++      unsigned int            quirk_detect_quiet_flag:1;
+ };
+ 
+ 
+@@ -504,6 +516,9 @@ static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep 
*ep)
+       return 0;
+ }
+ #endif
++
++void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
++
+ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
+                                  u32 r, bool is_io,
+                                  u64 cpu_addr, u64 pci_addr, size_t size);
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c 
b/drivers/pci/controller/dwc/pcie-tegra194.c
+index 506f6a294eac3..a5b677ec07690 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -515,19 +515,19 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void 
*arg)
+       struct tegra_pcie_dw *pcie = arg;
+       struct dw_pcie_ep *ep = &pcie->pci.ep;
+       int spurious = 1;
+-      u32 val, tmp;
++      u32 status_l0, status_l1, link_status;
+ 
+-      val = appl_readl(pcie, APPL_INTR_STATUS_L0);
+-      if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+-              val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+-              appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0);
++      status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
++      if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
++              status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
++              appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
+ 
+-              if (val & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
++              if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
+                       pex_ep_event_hot_rst_done(pcie);
+ 
+-              if (val & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
+-                      tmp = appl_readl(pcie, APPL_LINK_STATUS);
+-                      if (tmp & APPL_LINK_STATUS_RDLH_LINK_UP) {
++              if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
++                      link_status = appl_readl(pcie, APPL_LINK_STATUS);
++                      if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
+                               dev_dbg(pcie->dev, "Link is up with Host\n");
+                               dw_pcie_ep_linkup(ep);
+                       }
+@@ -536,11 +536,11 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void 
*arg)
+               spurious = 0;
+       }
+ 
+-      if (val & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
+-              val = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
+-              appl_writel(pcie, val, APPL_INTR_STATUS_L1_15);
++      if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
++              status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
++              appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15);
+ 
+-              if (val & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
++              if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
+                       return IRQ_WAKE_THREAD;
+ 
+               spurious = 0;
+@@ -548,8 +548,8 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void 
*arg)
+ 
+       if (spurious) {
+               dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
+-                       val);
+-              appl_writel(pcie, val, APPL_INTR_STATUS_L0);
++                       status_l0);
++              appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0);
+       }
+ 
+       return IRQ_HANDLED;
+@@ -1778,7 +1778,7 @@ static void pex_ep_event_pex_rst_deassert(struct 
tegra_pcie_dw *pcie)
+       val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
+       val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
+       dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
+-      val = (lower_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
++      val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
+       dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
+ 
+       ret = dw_pcie_ep_init_complete(ep);
+diff --git a/drivers/pci/controller/pci-tegra.c 
b/drivers/pci/controller/pci-tegra.c
+index 1a2af963599ca..b4eb75f25906e 100644
+--- a/drivers/pci/controller/pci-tegra.c
++++ b/drivers/pci/controller/pci-tegra.c
+@@ -2160,13 +2160,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
+               rp->np = port;
+ 
+               rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
+-              if (IS_ERR(rp->base))
+-                      return PTR_ERR(rp->base);
++              if (IS_ERR(rp->base)) {
++                      err = PTR_ERR(rp->base);
++                      goto err_node_put;
++              }
+ 
+               label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
+               if (!label) {
+-                      dev_err(dev, "failed to create reset GPIO label\n");
+-                      return -ENOMEM;
++                      err = -ENOMEM;
++                      goto err_node_put;
+               }
+ 
+               /*
+@@ -2184,7 +2186,8 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
+                       } else {
+                               dev_err(dev, "failed to get reset GPIO: %ld\n",
+                                       PTR_ERR(rp->reset_gpio));
+-                              return PTR_ERR(rp->reset_gpio);
++                              err = PTR_ERR(rp->reset_gpio);
++                              goto err_node_put;
+                       }
+               }
+ 
+diff --git a/drivers/pci/controller/pcie-iproc-bcma.c 
b/drivers/pci/controller/pcie-iproc-bcma.c
+index 56b8ee7bf3307..f918c713afb08 100644
+--- a/drivers/pci/controller/pcie-iproc-bcma.c
++++ b/drivers/pci/controller/pcie-iproc-bcma.c
+@@ -35,7 +35,6 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
+ {
+       struct device *dev = &bdev->dev;
+       struct iproc_pcie *pcie;
+-      LIST_HEAD(resources);
+       struct pci_host_bridge *bridge;
+       int ret;
+ 
+@@ -60,19 +59,16 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
+       pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1;
+       pcie->mem.name = "PCIe MEM space";
+       pcie->mem.flags = IORESOURCE_MEM;
+-      pci_add_resource(&resources, &pcie->mem);
++      pci_add_resource(&bridge->windows, &pcie->mem);
++      ret = devm_request_pci_bus_resources(dev, &bridge->windows);
++      if (ret)
++              return ret;
+ 
+       pcie->map_irq = iproc_pcie_bcma_map_irq;
+ 
+-      ret = iproc_pcie_setup(pcie, &resources);
+-      if (ret) {
+-              dev_err(dev, "PCIe controller setup failed\n");
+-              pci_free_resource_list(&resources);
+-              return ret;
+-      }
+-
+       bcma_set_drvdata(bdev, pcie);
+-      return 0;
++
++      return iproc_pcie_setup(pcie, &bridge->windows);
+ }
+ 
+ static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
+diff --git a/drivers/pci/controller/pcie-rcar-ep.c 
b/drivers/pci/controller/pcie-rcar-ep.c
+index b4a288e24aafb..c91d85b151290 100644
+--- a/drivers/pci/controller/pcie-rcar-ep.c
++++ b/drivers/pci/controller/pcie-rcar-ep.c
+@@ -492,9 +492,9 @@ static int rcar_pcie_ep_probe(struct platform_device *pdev)
+       pcie->dev = dev;
+ 
+       pm_runtime_enable(dev);
+-      err = pm_runtime_get_sync(dev);
++      err = pm_runtime_resume_and_get(dev);
+       if (err < 0) {
+-              dev_err(dev, "pm_runtime_get_sync failed\n");
++              dev_err(dev, "pm_runtime_resume_and_get failed\n");
+               goto err_pm_disable;
+       }
+ 
+diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO
+index a32070be5adf9..cc6194aa24c15 100644
+--- a/drivers/pci/hotplug/TODO
++++ b/drivers/pci/hotplug/TODO
+@@ -40,9 +40,6 @@ ibmphp:
+ 
+ * The return value of pci_hp_register() is not checked.
+ 
+-* iounmap(io_mem) is called in the error path of ebda_rsrc_controller()
+-  and once more in the error path of its caller ibmphp_access_ebda().
+-
+ * The various slot data structures are difficult to follow and need to be
+   simplified.  A lot of functions are too large and too complex, they need
+   to be broken up into smaller, manageable pieces.  Negative examples are
+diff --git a/drivers/pci/hotplug/ibmphp_ebda.c 
b/drivers/pci/hotplug/ibmphp_ebda.c
+index 11a2661dc0627..7fb75401ad8a7 100644
+--- a/drivers/pci/hotplug/ibmphp_ebda.c
++++ b/drivers/pci/hotplug/ibmphp_ebda.c
+@@ -714,8 +714,7 @@ static int __init ebda_rsrc_controller(void)
+               /* init hpc structure */
+               hpc_ptr = alloc_ebda_hpc(slot_num, bus_num);
+               if (!hpc_ptr) {
+-                      rc = -ENOMEM;
+-                      goto error_no_hpc;
++                      return -ENOMEM;
+               }
+               hpc_ptr->ctlr_id = ctlr_id;
+               hpc_ptr->ctlr_relative_id = ctlr;
+@@ -910,8 +909,6 @@ error:
+       kfree(tmp_slot);
+ error_no_slot:
+       free_ebda_hpc(hpc_ptr);
+-error_no_hpc:
+-      iounmap(io_mem);
+       return rc;
+ }
+ 
+diff --git a/drivers/pci/of.c b/drivers/pci/of.c
+index ac24cd5439a93..3f6ef2f45e57a 100644
+--- a/drivers/pci/of.c
++++ b/drivers/pci/of.c
+@@ -295,7 +295,7 @@ static int devm_of_pci_get_host_bridge_resources(struct 
device *dev,
+       /* Check for ranges property */
+       err = of_pci_range_parser_init(&parser, dev_node);
+       if (err)
+-              goto failed;
++              return 0;
+ 
+       dev_dbg(dev, "Parsing ranges property...\n");
+       for_each_of_pci_range(&parser, &range) {
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index eae6a9fdd33d4..0d7109018a91f 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -265,7 +265,7 @@ static int pci_dev_str_match_path(struct pci_dev *dev, 
const char *path,
+ 
+       *endptr = strchrnul(path, ';');
+ 
+-      wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
++      wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
+       if (!wpath)
+               return -ENOMEM;
+ 
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index bad294c352519..5d2acebc3e966 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -4626,6 +4626,18 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, 
u16 acs_flags)
+               PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+ }
+ 
++/*
++ * Each of these NXP Root Ports is in a Root Complex with a unique segment
++ * number and does provide isolation features to disable peer transactions
++ * and validate bus numbers in requests, but does not provide an ACS
++ * capability.
++ */
++static int pci_quirk_nxp_rp_acs(struct pci_dev *dev, u16 acs_flags)
++{
++      return pci_acs_ctrl_enabled(acs_flags,
++              PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
++}
++
+ static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
+ {
+       if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
+@@ -4852,6 +4864,10 @@ static const struct pci_dev_acs_enabled {
+       { 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
+       /* Cavium ThunderX */
+       { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
++      /* Cavium multi-function devices */
++      { PCI_VENDOR_ID_CAVIUM, 0xA026, pci_quirk_mf_endpoint_acs },
++      { PCI_VENDOR_ID_CAVIUM, 0xA059, pci_quirk_mf_endpoint_acs },
++      { PCI_VENDOR_ID_CAVIUM, 0xA060, pci_quirk_mf_endpoint_acs },
+       /* APM X-Gene */
+       { PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs },
+       /* Ampere Computing */
+@@ -4872,6 +4888,39 @@ static const struct pci_dev_acs_enabled {
+       { PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs },
+       { PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs },
+       { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
++      /* NXP root ports, xx=16, 12, or 08 cores */
++      /* LX2xx0A : without security features + CAN-FD */
++      { PCI_VENDOR_ID_NXP, 0x8d81, pci_quirk_nxp_rp_acs },
++      { PCI_VENDOR_ID_NXP, 0x8da1, pci_quirk_nxp_rp_acs },
++      { PCI_VENDOR_ID_NXP, 0x8d83, pci_quirk_nxp_rp_acs },
++      /* LX2xx0C : security features + CAN-FD */
++      { PCI_VENDOR_ID_NXP, 0x8d80, pci_quirk_nxp_rp_acs },
++      { PCI_VENDOR_ID_NXP, 0x8da0, pci_quirk_nxp_rp_acs },
++      { PCI_VENDOR_ID_NXP, 0x8d82, pci_quirk_nxp_rp_acs },
++      /* LX2xx0E : security features + CAN */
++      { PCI_VENDOR_ID_NXP, 0x8d90, pci_quirk_nxp_rp_acs },
++      { PCI_VENDOR_ID_NXP, 0x8db0, pci_quirk_nxp_rp_acs },
++      { PCI_VENDOR_ID_NXP, 0x8d92, pci_quirk_nxp_rp_acs },
++      /* LX2xx0N : without security features + CAN */
++      { PCI_VENDOR_ID_NXP, 0x8d91, pci_quirk_nxp_rp_acs },
++      { PCI_VENDOR_ID_NXP, 0x8db1, pci_quirk_nxp_rp_acs },
++      { PCI_VENDOR_ID_NXP, 0x8d93, pci_quirk_nxp_rp_acs },
++      /* LX2xx2A : without security features + CAN-FD */
++      { PCI_VENDOR_ID_NXP, 0x8d89, pci_quirk_nxp_rp_acs },
++      { PCI_VENDOR_ID_NXP, 0x8da9, pci_quirk_nxp_rp_acs },
++      { PCI_VENDOR_ID_NXP, 0x8d8b, pci_quirk_nxp_rp_acs },
++      /* LX2xx2C : security features + CAN-FD */
++      { PCI_VENDOR_ID_NXP, 0x8d88, pci_quirk_nxp_rp_acs },
++      { PCI_VENDOR_ID_NXP, 0x8da8, pci_quirk_nxp_rp_acs },
++      { PCI_VENDOR_ID_NXP, 0x8d8a, pci_quirk_nxp_rp_acs },
++      /* LX2xx2E : security features + CAN */
++      { PCI_VENDOR_ID_NXP, 0x8d98, pci_quirk_nxp_rp_acs },
++      { PCI_VENDOR_ID_NXP, 0x8db8, pci_quirk_nxp_rp_acs },
++      { PCI_VENDOR_ID_NXP, 0x8d9a, pci_quirk_nxp_rp_acs },
++      /* LX2xx2N : without security features + CAN */
++      { PCI_VENDOR_ID_NXP, 0x8d99, pci_quirk_nxp_rp_acs },
++      { PCI_VENDOR_ID_NXP, 0x8db9, pci_quirk_nxp_rp_acs },
++      { PCI_VENDOR_ID_NXP, 0x8d9b, pci_quirk_nxp_rp_acs },
+       /* Zhaoxin Root/Downstream Ports */
+       { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
+       { 0 }
+@@ -5346,7 +5395,7 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 
PCI_ANY_ID,
+                             PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
+ 
+ /*
+- * Create device link for NVIDIA GPU with integrated USB xHCI Host
++ * Create device link for GPUs with integrated USB xHCI Host
+  * controller to VGA.
+  */
+ static void quirk_gpu_usb(struct pci_dev *usb)
+@@ -5355,9 +5404,11 @@ static void quirk_gpu_usb(struct pci_dev *usb)
+ }
+ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+                             PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
++                            PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
+ 
+ /*
+- * Create device link for NVIDIA GPU with integrated Type-C UCSI controller
++ * Create device link for GPUs with integrated Type-C UCSI controller
+  * to VGA. Currently there is no class code defined for UCSI device over PCI
+  * so using UNKNOWN class for now and it will be updated when UCSI
+  * over PCI gets a class code.
+@@ -5370,6 +5421,9 @@ static void quirk_gpu_usb_typec_ucsi(struct pci_dev 
*ucsi)
+ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+                             PCI_CLASS_SERIAL_UNKNOWN, 8,
+                             quirk_gpu_usb_typec_ucsi);
++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
++                            PCI_CLASS_SERIAL_UNKNOWN, 8,
++                            quirk_gpu_usb_typec_ucsi);
+ 
+ /*
+  * Enable the NVIDIA GPU integrated HDA controller if the BIOS left it
+diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
+index cc5e84b80c699..faa3a4b8ed91d 100644
+--- a/drivers/s390/char/sclp_early.c
++++ b/drivers/s390/char/sclp_early.c
+@@ -40,13 +40,14 @@ static void __init sclp_early_facilities_detect(struct 
read_info_sccb *sccb)
+       sclp.has_gisaf = !!(sccb->fac118 & 0x08);
+       sclp.has_hvs = !!(sccb->fac119 & 0x80);
+       sclp.has_kss = !!(sccb->fac98 & 0x01);
+-      sclp.has_sipl = !!(sccb->cbl & 0x4000);
+       if (sccb->fac85 & 0x02)
+               S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
+       if (sccb->fac91 & 0x40)
+               S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST;
+       if (sccb->cpuoff > 134)
+               sclp.has_diag318 = !!(sccb->byte_134 & 0x80);
++      if (sccb->cpuoff > 137)
++              sclp.has_sipl = !!(sccb->cbl & 0x4000);
+       sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
+       sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
+       sclp.rzm <<= 20;
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index c8784dfafdd73..da02c3e96e7b2 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -466,7 +466,7 @@ static void vhost_tx_batch(struct vhost_net *net,
+               .num = nvq->batched_xdp,
+               .ptr = nvq->xdp,
+       };
+-      int err;
++      int i, err;
+ 
+       if (nvq->batched_xdp == 0)
+               goto signal_used;
+@@ -475,6 +475,15 @@ static void vhost_tx_batch(struct vhost_net *net,
+       err = sock->ops->sendmsg(sock, msghdr, 0);
+       if (unlikely(err < 0)) {
+               vq_err(&nvq->vq, "Fail to batch sending packets\n");
++
++              /* free pages owned by XDP; since this is an unlikely error 
path,
++               * keep it simple and avoid more complex bulk update for the
++               * used pages
++               */
++              for (i = 0; i < nvq->batched_xdp; ++i)
++                      put_page(virt_to_head_page(nvq->xdp[i].data));
++              nvq->batched_xdp = 0;
++              nvq->done_idx = 0;
+               return;
+       }
+ 
+diff --git a/drivers/video/backlight/ktd253-backlight.c 
b/drivers/video/backlight/ktd253-backlight.c
+index e3fee3f1f5828..9d355fd989d86 100644
+--- a/drivers/video/backlight/ktd253-backlight.c
++++ b/drivers/video/backlight/ktd253-backlight.c
+@@ -25,6 +25,7 @@
+ 
+ #define KTD253_T_LOW_NS (200 + 10) /* Additional 10ns as safety factor */
+ #define KTD253_T_HIGH_NS (200 + 10) /* Additional 10ns as safety factor */
++#define KTD253_T_OFF_CRIT_NS 100000 /* 100 us, now it doesn't look good */
+ #define KTD253_T_OFF_MS 3
+ 
+ struct ktd253_backlight {
+@@ -34,13 +35,50 @@ struct ktd253_backlight {
+       u16 ratio;
+ };
+ 
++static void ktd253_backlight_set_max_ratio(struct ktd253_backlight *ktd253)
++{
++      gpiod_set_value_cansleep(ktd253->gpiod, 1);
++      ndelay(KTD253_T_HIGH_NS);
++      /* We always fall back to this when we power on */
++}
++
++static int ktd253_backlight_stepdown(struct ktd253_backlight *ktd253)
++{
++      /*
++       * These GPIO operations absolutely can NOT sleep so no _cansleep
++       * suffixes, and no using GPIO expanders on slow buses for this!
++       *
++       * The maximum number of cycles of the loop is 32  so the time taken
++       * should nominally be:
++       * (T_LOW_NS + T_HIGH_NS + loop_time) * 32
++       *
++       * Architectures do not always support ndelay() and we will get a few us
++       * instead. If we get to a critical time limit an interrupt has likely
++       * occured in the low part of the loop and we need to restart from the
++       * top so we have the backlight in a known state.
++       */
++      u64 ns;
++
++      ns = ktime_get_ns();
++      gpiod_set_value(ktd253->gpiod, 0);
++      ndelay(KTD253_T_LOW_NS);
++      gpiod_set_value(ktd253->gpiod, 1);
++      ns = ktime_get_ns() - ns;
++      if (ns >= KTD253_T_OFF_CRIT_NS) {
++              dev_err(ktd253->dev, "PCM on backlight took too long (%llu 
ns)\n", ns);
++              return -EAGAIN;
++      }
++      ndelay(KTD253_T_HIGH_NS);
++      return 0;
++}
++
+ static int ktd253_backlight_update_status(struct backlight_device *bl)
+ {
+       struct ktd253_backlight *ktd253 = bl_get_data(bl);
+       int brightness = backlight_get_brightness(bl);
+       u16 target_ratio;
+       u16 current_ratio = ktd253->ratio;
+-      unsigned long flags;
++      int ret;
+ 
+       dev_dbg(ktd253->dev, "new brightness/ratio: %d/32\n", brightness);
+ 
+@@ -62,37 +100,34 @@ static int ktd253_backlight_update_status(struct 
backlight_device *bl)
+       }
+ 
+       if (current_ratio == 0) {
+-              gpiod_set_value_cansleep(ktd253->gpiod, 1);
+-              ndelay(KTD253_T_HIGH_NS);
+-              /* We always fall back to this when we power on */
++              ktd253_backlight_set_max_ratio(ktd253);
+               current_ratio = KTD253_MAX_RATIO;
+       }
+ 
+-      /*
+-       * WARNING:
+-       * The loop to set the correct current level is performed
+-       * with interrupts disabled as it is timing critical.
+-       * The maximum number of cycles of the loop is 32
+-       * so the time taken will be (T_LOW_NS + T_HIGH_NS + loop_time) * 32,
+-       */
+-      local_irq_save(flags);
+       while (current_ratio != target_ratio) {
+               /*
+                * These GPIO operations absolutely can NOT sleep so no
+                * _cansleep suffixes, and no using GPIO expanders on
+                * slow buses for this!
+                */
+-              gpiod_set_value(ktd253->gpiod, 0);
+-              ndelay(KTD253_T_LOW_NS);
+-              gpiod_set_value(ktd253->gpiod, 1);
+-              ndelay(KTD253_T_HIGH_NS);
+-              /* After 1/32 we loop back to 32/32 */
+-              if (current_ratio == KTD253_MIN_RATIO)
++              ret = ktd253_backlight_stepdown(ktd253);
++              if (ret == -EAGAIN) {
++                      /*
++                       * Something disturbed the backlight setting code when
++                       * running so we need to bring the PWM back to a known
++                       * state. This shouldn't happen too much.
++                       */
++                      gpiod_set_value_cansleep(ktd253->gpiod, 0);
++                      msleep(KTD253_T_OFF_MS);
++                      ktd253_backlight_set_max_ratio(ktd253);
++                      current_ratio = KTD253_MAX_RATIO;
++              } else if (current_ratio == KTD253_MIN_RATIO) {
++                      /* After 1/32 we loop back to 32/32 */
+                       current_ratio = KTD253_MAX_RATIO;
+-              else
++              } else {
+                       current_ratio--;
++              }
+       }
+-      local_irq_restore(flags);
+       ktd253->ratio = current_ratio;
+ 
+       dev_dbg(ktd253->dev, "new ratio set to %d/32\n", target_ratio);
+diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
+index 2946f3a63110c..2ee017442dfcd 100644
+--- a/drivers/watchdog/watchdog_dev.c
++++ b/drivers/watchdog/watchdog_dev.c
+@@ -1164,7 +1164,10 @@ int watchdog_set_last_hw_keepalive(struct 
watchdog_device *wdd,
+ 
+       wd_data->last_hw_keepalive = ktime_sub(now, ms_to_ktime(last_ping_ms));
+ 
+-      return __watchdog_ping(wdd);
++      if (watchdog_hw_running(wdd) && handle_boot_enabled)
++              return __watchdog_ping(wdd);
++
++      return 0;
+ }
+ EXPORT_SYMBOL_GPL(watchdog_set_last_hw_keepalive);
+ 
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index e025cd8f3f071..ef7df2141f34f 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3019,6 +3019,29 @@ int __cold open_ctree(struct super_block *sb, struct 
btrfs_fs_devices *fs_device
+        */
+       fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
+ 
++      /*
++       * Flag our filesystem as having big metadata blocks if they are bigger
++       * than the page size
++       */
++      if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
++              if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
++                      btrfs_info(fs_info,
++                              "flagging fs with big metadata feature");
++              features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
++      }
++
++      /* Set up fs_info before parsing mount options */
++      nodesize = btrfs_super_nodesize(disk_super);
++      sectorsize = btrfs_super_sectorsize(disk_super);
++      stripesize = sectorsize;
++      fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
++      fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
++
++      /* Cache block sizes */
++      fs_info->nodesize = nodesize;
++      fs_info->sectorsize = sectorsize;
++      fs_info->stripesize = stripesize;
++
+       ret = btrfs_parse_options(fs_info, options, sb->s_flags);
+       if (ret) {
+               err = ret;
+@@ -3045,28 +3068,6 @@ int __cold open_ctree(struct super_block *sb, struct 
btrfs_fs_devices *fs_device
+       if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
+               btrfs_info(fs_info, "has skinny extents");
+ 
+-      /*
+-       * flag our filesystem as having big metadata blocks if
+-       * they are bigger than the page size
+-       */
+-      if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
+-              if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
+-                      btrfs_info(fs_info,
+-                              "flagging fs with big metadata feature");
+-              features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
+-      }
+-
+-      nodesize = btrfs_super_nodesize(disk_super);
+-      sectorsize = btrfs_super_sectorsize(disk_super);
+-      stripesize = sectorsize;
+-      fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
+-      fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
+-
+-      /* Cache block sizes */
+-      fs_info->nodesize = nodesize;
+-      fs_info->sectorsize = sectorsize;
+-      fs_info->stripesize = stripesize;
+-
+       /*
+        * mixed block groups end up with duplicate but slightly offset
+        * extent buffers for the same range.  It leads to corruptions
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 4140d5c3ab5a5..f943eea9fe4e1 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -288,10 +288,10 @@ void fuse_request_end(struct fuse_req *req)
+ 
+       /*
+        * test_and_set_bit() implies smp_mb() between bit
+-       * changing and below intr_entry check. Pairs with
++       * changing and below FR_INTERRUPTED check. Pairs with
+        * smp_mb() from queue_interrupt().
+        */
+-      if (!list_empty(&req->intr_entry)) {
++      if (test_bit(FR_INTERRUPTED, &req->flags)) {
+               spin_lock(&fiq->lock);
+               list_del_init(&req->intr_entry);
+               spin_unlock(&fiq->lock);
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index d0089039fee79..a8d07273ddc05 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -3206,12 +3206,15 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb 
*req, struct iov_iter *iter)
+                               ret = nr;
+                       break;
+               }
++              if (!iov_iter_is_bvec(iter)) {
++                      iov_iter_advance(iter, nr);
++              } else {
++                      req->rw.len -= nr;
++                      req->rw.addr += nr;
++              }
+               ret += nr;
+               if (nr != iovec.iov_len)
+                       break;
+-              req->rw.len -= nr;
+-              req->rw.addr += nr;
+-              iov_iter_advance(iter, nr);
+       }
+ 
+       return ret;
+diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
+index 551093b74596b..1dafc7c7f5cfe 100644
+--- a/include/linux/memory_hotplug.h
++++ b/include/linux/memory_hotplug.h
+@@ -359,8 +359,8 @@ extern void sparse_remove_section(struct mem_section *ms,
+               unsigned long map_offset, struct vmem_altmap *altmap);
+ extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
+                                         unsigned long pnum);
+-extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned 
start_pfn,
+-              unsigned long nr_pages);
++extern struct zone *zone_for_pfn_range(int online_type, int nid,
++              unsigned long start_pfn, unsigned long nr_pages);
+ #endif /* CONFIG_MEMORY_HOTPLUG */
+ 
+ #endif /* __LINUX_MEMORY_HOTPLUG_H */
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 22207a79762c2..a55097b4d9927 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1713,8 +1713,9 @@ static inline void pci_disable_device(struct pci_dev 
*dev) { }
+ static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
+ static inline int pci_assign_resource(struct pci_dev *dev, int i)
+ { return -EBUSY; }
+-static inline int __pci_register_driver(struct pci_driver *drv,
+-                                      struct module *owner)
++static inline int __must_check __pci_register_driver(struct pci_driver *drv,
++                                                   struct module *owner,
++                                                   const char *mod_name)
+ { return 0; }
+ static inline int pci_register_driver(struct pci_driver *drv)
+ { return 0; }
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 1ab1e24bcbce5..635a9243cce0d 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2476,7 +2476,8 @@
+ #define PCI_VENDOR_ID_TDI               0x192E
+ #define PCI_DEVICE_ID_TDI_EHCI          0x0101
+ 
+-#define PCI_VENDOR_ID_FREESCALE               0x1957
++#define PCI_VENDOR_ID_FREESCALE               0x1957  /* duplicate: NXP */
++#define PCI_VENDOR_ID_NXP             0x1957  /* duplicate: FREESCALE */
+ #define PCI_DEVICE_ID_MPC8308         0xc006
+ #define PCI_DEVICE_ID_MPC8315E                0x00b4
+ #define PCI_DEVICE_ID_MPC8315         0x00b5
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 2660ee4b08adf..29c7ccd5ae42e 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1354,6 +1354,7 @@ struct task_struct {
+                                       mce_whole_page : 1,
+                                       __mce_reserved : 62;
+       struct callback_head            mce_kill_me;
++      int                             mce_count;
+ #endif
+ 
+       /*
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 0a1239819fd2a..acbf1875ad506 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1908,7 +1908,7 @@ static inline void __skb_insert(struct sk_buff *newsk,
+       WRITE_ONCE(newsk->prev, prev);
+       WRITE_ONCE(next->prev, newsk);
+       WRITE_ONCE(prev->next, newsk);
+-      list->qlen++;
++      WRITE_ONCE(list->qlen, list->qlen + 1);
+ }
+ 
+ static inline void __skb_queue_splice(const struct sk_buff_head *list,
+diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
+index 9e7c2c6078456..69079fbf3ed2d 100644
+--- a/include/uapi/linux/pkt_sched.h
++++ b/include/uapi/linux/pkt_sched.h
+@@ -826,6 +826,8 @@ struct tc_codel_xstats {
+ 
+ /* FQ_CODEL */
+ 
++#define FQ_CODEL_QUANTUM_MAX (1 << 20)
++
+ enum {
+       TCA_FQ_CODEL_UNSPEC,
+       TCA_FQ_CODEL_TARGET,
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 7e0fdc19043e4..c677f934353af 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -9973,7 +9973,7 @@ static void perf_event_addr_filters_apply(struct 
perf_event *event)
+               return;
+ 
+       if (ifh->nr_file_filters) {
+-              mm = get_task_mm(event->ctx->task);
++              mm = get_task_mm(task);
+               if (!mm)
+                       goto restart;
+ 
+diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
+index a82f03f385f89..0996d59750ff0 100644
+--- a/kernel/trace/trace_boot.c
++++ b/kernel/trace/trace_boot.c
+@@ -205,12 +205,15 @@ trace_boot_init_one_event(struct trace_array *tr, struct 
xbc_node *gnode,
+                       pr_err("Failed to apply filter: %s\n", buf);
+       }
+ 
+-      xbc_node_for_each_array_value(enode, "actions", anode, p) {
+-              if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
+-                      pr_err("action string is too long: %s\n", p);
+-              else if (trigger_process_regex(file, buf) < 0)
+-                      pr_err("Failed to apply an action: %s\n", buf);
+-      }
++      if (IS_ENABLED(CONFIG_HIST_TRIGGERS)) {
++              xbc_node_for_each_array_value(enode, "actions", anode, p) {
++                      if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
++                              pr_err("action string is too long: %s\n", p);
++                      else if (trigger_process_regex(file, buf) < 0)
++                              pr_err("Failed to apply an action: %s\n", buf);
++              }
++      } else if (xbc_node_find_value(enode, "actions", NULL))
++              pr_err("Failed to apply event actions because 
CONFIG_HIST_TRIGGERS is not set.\n");
+ 
+       if (xbc_node_find_value(enode, "enable", NULL)) {
+               if (trace_event_enable_disable(file, 1, 0) < 0)
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index 68150b9cbde92..552dbc9d52260 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -647,7 +647,11 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
+       /* Register new event */
+       ret = register_kprobe_event(tk);
+       if (ret) {
+-              pr_warn("Failed to register probe event(%d)\n", ret);
++              if (ret == -EEXIST) {
++                      trace_probe_log_set_index(0);
++                      trace_probe_log_err(0, EVENT_EXIST);
++              } else
++                      pr_warn("Failed to register probe event(%d)\n", ret);
+               goto end;
+       }
+ 
+diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
+index d2867ccc6acaa..1d31bc4acf7a5 100644
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -1029,11 +1029,36 @@ error:
+       return ret;
+ }
+ 
++static struct trace_event_call *
++find_trace_event_call(const char *system, const char *event_name)
++{
++      struct trace_event_call *tp_event;
++      const char *name;
++
++      list_for_each_entry(tp_event, &ftrace_events, list) {
++              if (!tp_event->class->system ||
++                  strcmp(system, tp_event->class->system))
++                      continue;
++              name = trace_event_name(tp_event);
++              if (!name || strcmp(event_name, name))
++                      continue;
++              return tp_event;
++      }
++
++      return NULL;
++}
++
+ int trace_probe_register_event_call(struct trace_probe *tp)
+ {
+       struct trace_event_call *call = trace_probe_event_call(tp);
+       int ret;
+ 
++      lockdep_assert_held(&event_mutex);
++
++      if (find_trace_event_call(trace_probe_group_name(tp),
++                                trace_probe_name(tp)))
++              return -EEXIST;
++
+       ret = register_trace_event(&call->event);
+       if (!ret)
+               return -ENODEV;
+diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
+index 2f703a20c724c..6d41e20c47ced 100644
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -398,6 +398,7 @@ extern int traceprobe_define_arg_fields(struct 
trace_event_call *event_call,
+       C(NO_EVENT_NAME,        "Event name is not specified"),         \
+       C(EVENT_TOO_LONG,       "Event name is too long"),              \
+       C(BAD_EVENT_NAME,       "Event name must follow the same rules as C 
identifiers"), \
++      C(EVENT_EXIST,          "Given group/event name is already used by 
another event"), \
+       C(RETVAL_ON_PROBE,      "$retval is not available on probe"),   \
+       C(BAD_STACK_NUM,        "Invalid stack number"),                \
+       C(BAD_ARG_NUM,          "Invalid argument number"),             \
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index 3cf7128e1ad30..0dd6e286e5196 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -514,7 +514,11 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
+ 
+       ret = register_uprobe_event(tu);
+       if (ret) {
+-              pr_warn("Failed to register probe event(%d)\n", ret);
++              if (ret == -EEXIST) {
++                      trace_probe_log_set_index(0);
++                      trace_probe_log_err(0, EVENT_EXIST);
++              } else
++                      pr_warn("Failed to register probe event(%d)\n", ret);
+               goto end;
+       }
+ 
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index b9de2df5b8358..6275b1c05f111 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -765,8 +765,8 @@ static inline struct zone *default_zone_for_pfn(int nid, 
unsigned long start_pfn
+       return movable_node_enabled ? movable_zone : kernel_zone;
+ }
+ 
+-struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
+-              unsigned long nr_pages)
++struct zone *zone_for_pfn_range(int online_type, int nid,
++              unsigned long start_pfn, unsigned long nr_pages)
+ {
+       if (online_type == MMOP_ONLINE_KERNEL)
+               return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages);
+diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
+index 79b6a04d8eb61..42dc080a4dbbc 100644
+--- a/net/caif/chnl_net.c
++++ b/net/caif/chnl_net.c
+@@ -53,20 +53,6 @@ struct chnl_net {
+       enum caif_states state;
+ };
+ 
+-static void robust_list_del(struct list_head *delete_node)
+-{
+-      struct list_head *list_node;
+-      struct list_head *n;
+-      ASSERT_RTNL();
+-      list_for_each_safe(list_node, n, &chnl_net_list) {
+-              if (list_node == delete_node) {
+-                      list_del(list_node);
+-                      return;
+-              }
+-      }
+-      WARN_ON(1);
+-}
+-
+ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
+ {
+       struct sk_buff *skb;
+@@ -369,6 +355,7 @@ static int chnl_net_init(struct net_device *dev)
+       ASSERT_RTNL();
+       priv = netdev_priv(dev);
+       strncpy(priv->name, dev->name, sizeof(priv->name));
++      INIT_LIST_HEAD(&priv->list_field);
+       return 0;
+ }
+ 
+@@ -377,7 +364,7 @@ static void chnl_net_uninit(struct net_device *dev)
+       struct chnl_net *priv;
+       ASSERT_RTNL();
+       priv = netdev_priv(dev);
+-      robust_list_del(&priv->list_field);
++      list_del_init(&priv->list_field);
+ }
+ 
+ static const struct net_device_ops netdev_ops = {
+@@ -542,7 +529,7 @@ static void __exit chnl_exit_module(void)
+       rtnl_lock();
+       list_for_each_safe(list_node, _tmp, &chnl_net_list) {
+               dev = list_entry(list_node, struct chnl_net, list_field);
+-              list_del(list_node);
++              list_del_init(list_node);
+               delete_device(dev);
+       }
+       rtnl_unlock();
+diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
+index c5c74a34d139d..91e7a22026971 100644
+--- a/net/dccp/minisocks.c
++++ b/net/dccp/minisocks.c
+@@ -94,6 +94,8 @@ struct sock *dccp_create_openreq_child(const struct sock *sk,
+               newdp->dccps_role           = DCCP_ROLE_SERVER;
+               newdp->dccps_hc_rx_ackvec   = NULL;
+               newdp->dccps_service_list   = NULL;
++              newdp->dccps_hc_rx_ccid     = NULL;
++              newdp->dccps_hc_tx_ccid     = NULL;
+               newdp->dccps_service        = dreq->dreq_service;
+               newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo;
+               newdp->dccps_timestamp_time = dreq->dreq_timestamp_time;
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index 9281c9c6a253e..65b125bb3b860 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -1728,13 +1728,11 @@ static int dsa_slave_phy_setup(struct net_device 
*slave_dev)
+                * use the switch internal MDIO bus instead
+                */
+               ret = dsa_slave_phy_connect(slave_dev, dp->index);
+-              if (ret) {
+-                      netdev_err(slave_dev,
+-                                 "failed to connect to port %d: %d\n",
+-                                 dp->index, ret);
+-                      phylink_destroy(dp->pl);
+-                      return ret;
+-              }
++      }
++      if (ret) {
++              netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
++                         ERR_PTR(ret));
++              phylink_destroy(dp->pl);
+       }
+ 
+       return ret;
+diff --git a/net/dsa/tag_rtl4_a.c b/net/dsa/tag_rtl4_a.c
+index e9176475bac89..24375ebd684e8 100644
+--- a/net/dsa/tag_rtl4_a.c
++++ b/net/dsa/tag_rtl4_a.c
+@@ -54,9 +54,10 @@ static struct sk_buff *rtl4a_tag_xmit(struct sk_buff *skb,
+       p = (__be16 *)tag;
+       *p = htons(RTL4_A_ETHERTYPE);
+ 
+-      out = (RTL4_A_PROTOCOL_RTL8366RB << 12) | (2 << 8);
+-      /* The lower bits is the port number */
+-      out |= (u8)dp->index;
++      out = (RTL4_A_PROTOCOL_RTL8366RB << RTL4_A_PROTOCOL_SHIFT) | (2 << 8);
++      /* The lower bits indicate the port number */
++      out |= BIT(dp->index);
++
+       p = (__be16 *)(tag + 2);
+       *p = htons(out);
+ 
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 97b402b2d6fbd..80d2a00d30977 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -906,7 +906,7 @@ static int ethtool_rxnfc_copy_to_user(void __user 
*useraddr,
+                                                  rule_buf);
+               useraddr += offsetof(struct compat_ethtool_rxnfc, rule_locs);
+       } else {
+-              ret = copy_to_user(useraddr, &rxnfc, size);
++              ret = copy_to_user(useraddr, rxnfc, size);
+               useraddr += offsetof(struct ethtool_rxnfc, rule_locs);
+       }
+ 
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index a0829495b211e..a9cc05043fa47 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -468,8 +468,6 @@ static void __gre_xmit(struct sk_buff *skb, struct 
net_device *dev,
+ 
+ static int gre_handle_offloads(struct sk_buff *skb, bool csum)
+ {
+-      if (csum && skb_checksum_start(skb) < skb->data)
+-              return -EINVAL;
+       return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : 
SKB_GSO_GRE);
+ }
+ 
+@@ -627,15 +625,20 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
+       }
+ 
+       if (dev->header_ops) {
++              const int pull_len = tunnel->hlen + sizeof(struct iphdr);
++
+               if (skb_cow_head(skb, 0))
+                       goto free_skb;
+ 
+               tnl_params = (const struct iphdr *)skb->data;
+ 
++              if (pull_len > skb_transport_offset(skb))
++                      goto free_skb;
++
+               /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
+                * to gre header.
+                */
+-              skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
++              skb_pull(skb, pull_len);
+               skb_reset_mac_header(skb);
+       } else {
+               if (skb_cow_head(skb, dev->needed_headroom))
+diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
+index f2d313c5900df..1075cc2136ac6 100644
+--- a/net/ipv4/nexthop.c
++++ b/net/ipv4/nexthop.c
+@@ -1303,6 +1303,7 @@ static int nh_create_ipv4(struct net *net, struct 
nexthop *nh,
+               .fc_gw4   = cfg->gw.ipv4,
+               .fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0,
+               .fc_flags = cfg->nh_flags,
++              .fc_nlinfo = cfg->nlinfo,
+               .fc_encap = cfg->nh_encap,
+               .fc_encap_type = cfg->nh_encap_type,
+       };
+@@ -1341,6 +1342,7 @@ static int nh_create_ipv6(struct net *net,  struct 
nexthop *nh,
+               .fc_ifindex = cfg->nh_ifindex,
+               .fc_gateway = cfg->gw.ipv6,
+               .fc_flags = cfg->nh_flags,
++              .fc_nlinfo = cfg->nlinfo,
+               .fc_encap = cfg->nh_encap,
+               .fc_encap_type = cfg->nh_encap_type,
+               .fc_is_fdb = cfg->nh_fdb,
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index ac8d38e044002..991e3434957b8 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -1314,7 +1314,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
+       if (dup_sack && (sacked & TCPCB_RETRANS)) {
+               if (tp->undo_marker && tp->undo_retrans > 0 &&
+                   after(end_seq, tp->undo_marker))
+-                      tp->undo_retrans--;
++                      tp->undo_retrans = max_t(int, 0, tp->undo_retrans - 
pcount);
+               if ((sacked & TCPCB_SACKED_ACKED) &&
+                   before(start_seq, state->reord))
+                               state->reord = start_seq;
+diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c
+index 0d122edc368dd..b91003538d87a 100644
+--- a/net/ipv4/udp_tunnel_nic.c
++++ b/net/ipv4/udp_tunnel_nic.c
+@@ -935,7 +935,7 @@ static int __init udp_tunnel_nic_init_module(void)
+ {
+       int err;
+ 
+-      udp_tunnel_nic_workqueue = alloc_workqueue("udp_tunnel_nic", 0, 0);
++      udp_tunnel_nic_workqueue = alloc_ordered_workqueue("udp_tunnel_nic", 0);
+       if (!udp_tunnel_nic_workqueue)
+               return -ENOMEM;
+ 
+diff --git a/net/ipv6/netfilter/nf_socket_ipv6.c 
b/net/ipv6/netfilter/nf_socket_ipv6.c
+index 6fd54744cbc38..aa5bb8789ba0b 100644
+--- a/net/ipv6/netfilter/nf_socket_ipv6.c
++++ b/net/ipv6/netfilter/nf_socket_ipv6.c
+@@ -99,7 +99,7 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const 
struct sk_buff *skb,
+ {
+       __be16 dport, sport;
+       const struct in6_addr *daddr = NULL, *saddr = NULL;
+-      struct ipv6hdr *iph = ipv6_hdr(skb);
++      struct ipv6hdr *iph = ipv6_hdr(skb), ipv6_var;
+       struct sk_buff *data_skb = NULL;
+       int doff = 0;
+       int thoff = 0, tproto;
+@@ -129,8 +129,6 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const 
struct sk_buff *skb,
+                       thoff + sizeof(*hp);
+ 
+       } else if (tproto == IPPROTO_ICMPV6) {
+-              struct ipv6hdr ipv6_var;
+-
+               if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
+                                        &sport, &dport, &ipv6_var))
+                       return NULL;
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 203890e378cb0..561b6d67ab8b9 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -869,8 +869,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, 
struct sk_buff *skb)
+       }
+ 
+       if (tunnel->version == L2TP_HDR_VER_3 &&
+-          l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
++          l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
++              l2tp_session_dec_refcount(session);
+               goto invalid;
++      }
+ 
+       l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
+       l2tp_session_dec_refcount(session);
+diff --git a/net/netfilter/nf_conntrack_proto_dccp.c 
b/net/netfilter/nf_conntrack_proto_dccp.c
+index b3f4a334f9d78..94001eb51ffe4 100644
+--- a/net/netfilter/nf_conntrack_proto_dccp.c
++++ b/net/netfilter/nf_conntrack_proto_dccp.c
+@@ -397,6 +397,7 @@ dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
+                       msg = "not picking up existing connection ";
+                       goto out_invalid;
+               }
++              break;
+       case CT_DCCP_REQUEST:
+               break;
+       case CT_DCCP_INVALID:
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 2b5f97e1d40b9..c605a3e713e76 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -8394,6 +8394,7 @@ static int nf_tables_check_loops(const struct nft_ctx 
*ctx,
+                                                       data->verdict.chain);
+                               if (err < 0)
+                                       return err;
++                              break;
+                       default:
+                               break;
+                       }
+diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
+index 70d46e0bbf064..7fcb73ac2e6ed 100644
+--- a/net/netfilter/nft_ct.c
++++ b/net/netfilter/nft_ct.c
+@@ -41,6 +41,7 @@ struct nft_ct_helper_obj  {
+ #ifdef CONFIG_NF_CONNTRACK_ZONES
+ static DEFINE_PER_CPU(struct nf_conn *, nft_ct_pcpu_template);
+ static unsigned int nft_ct_pcpu_template_refcnt __read_mostly;
++static DEFINE_MUTEX(nft_ct_pcpu_mutex);
+ #endif
+ 
+ static u64 nft_ct_get_eval_counter(const struct nf_conn_counter *c,
+@@ -526,8 +527,11 @@ static void __nft_ct_set_destroy(const struct nft_ctx 
*ctx, struct nft_ct *priv)
+ #endif
+ #ifdef CONFIG_NF_CONNTRACK_ZONES
+       case NFT_CT_ZONE:
++              mutex_lock(&nft_ct_pcpu_mutex);
+               if (--nft_ct_pcpu_template_refcnt == 0)
+                       nft_ct_tmpl_put_pcpu();
++              mutex_unlock(&nft_ct_pcpu_mutex);
++              break;
+ #endif
+       default:
+               break;
+@@ -564,9 +568,13 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
+ #endif
+ #ifdef CONFIG_NF_CONNTRACK_ZONES
+       case NFT_CT_ZONE:
+-              if (!nft_ct_tmpl_alloc_pcpu())
++              mutex_lock(&nft_ct_pcpu_mutex);
++              if (!nft_ct_tmpl_alloc_pcpu()) {
++                      mutex_unlock(&nft_ct_pcpu_mutex);
+                       return -ENOMEM;
++              }
+               nft_ct_pcpu_template_refcnt++;
++              mutex_unlock(&nft_ct_pcpu_mutex);
+               len = sizeof(u16);
+               break;
+ #endif
+diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
+index bbd5f87536006..99e8db2621984 100644
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -369,6 +369,7 @@ static int fq_codel_change(struct Qdisc *sch, struct 
nlattr *opt,
+ {
+       struct fq_codel_sched_data *q = qdisc_priv(sch);
+       struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
++      u32 quantum = 0;
+       int err;
+ 
+       if (!opt)
+@@ -386,6 +387,13 @@ static int fq_codel_change(struct Qdisc *sch, struct 
nlattr *opt,
+                   q->flows_cnt > 65536)
+                       return -EINVAL;
+       }
++      if (tb[TCA_FQ_CODEL_QUANTUM]) {
++              quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
++              if (quantum > FQ_CODEL_QUANTUM_MAX) {
++                      NL_SET_ERR_MSG(extack, "Invalid quantum");
++                      return -EINVAL;
++              }
++      }
+       sch_tree_lock(sch);
+ 
+       if (tb[TCA_FQ_CODEL_TARGET]) {
+@@ -412,8 +420,8 @@ static int fq_codel_change(struct Qdisc *sch, struct 
nlattr *opt,
+       if (tb[TCA_FQ_CODEL_ECN])
+               q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
+ 
+-      if (tb[TCA_FQ_CODEL_QUANTUM])
+-              q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
++      if (quantum)
++              q->quantum = quantum;
+ 
+       if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
+               q->drop_batch_size = max(1U, 
nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index 963047c57c27b..ce957ee5383c4 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -1980,10 +1980,12 @@ static int tipc_recvmsg(struct socket *sock, struct 
msghdr *m,
+               tipc_node_distr_xmit(sock_net(sk), &xmitq);
+       }
+ 
+-      if (!skb_cb->bytes_read)
+-              tsk_advance_rx_queue(sk);
++      if (skb_cb->bytes_read)
++              goto exit;
++
++      tsk_advance_rx_queue(sk);
+ 
+-      if (likely(!connected) || skb_cb->bytes_read)
++      if (likely(!connected))
+               goto exit;
+ 
+       /* Send connection flow control advertisement when applicable */
+@@ -2420,7 +2422,7 @@ static int tipc_sk_backlog_rcv(struct sock *sk, struct 
sk_buff *skb)
+ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
+                           u32 dport, struct sk_buff_head *xmitq)
+ {
+-      unsigned long time_limit = jiffies + 2;
++      unsigned long time_limit = jiffies + usecs_to_jiffies(20000);
+       struct sk_buff *skb;
+       unsigned int lim;
+       atomic_t *dcnt;
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 37ffa7725cee2..d5c0ae34b1e45 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2769,7 +2769,7 @@ static __poll_t unix_dgram_poll(struct file *file, 
struct socket *sock,
+ 
+               other = unix_peer(sk);
+               if (other && unix_peer(other) != sk &&
+-                  unix_recvq_full(other) &&
++                  unix_recvq_full_lockless(other) &&
+                   unix_dgram_peer_wake_me(sk, other))
+                       writable = 0;
+ 
+diff --git a/scripts/clang-tools/gen_compile_commands.py 
b/scripts/clang-tools/gen_compile_commands.py
+index 8ddb5d099029f..8bf55bb4f515c 100755
+--- a/scripts/clang-tools/gen_compile_commands.py
++++ b/scripts/clang-tools/gen_compile_commands.py
+@@ -13,6 +13,7 @@ import logging
+ import os
+ import re
+ import subprocess
++import sys
+ 
+ _DEFAULT_OUTPUT = 'compile_commands.json'
+ _DEFAULT_LOG_LEVEL = 'WARNING'
+diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
+index 2abbd75fbf2e3..014b959575cae 100644
+--- a/tools/perf/Makefile.config
++++ b/tools/perf/Makefile.config
+@@ -127,10 +127,10 @@ FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS) 
$(LIBUNWIND_LIBS)
+ FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
+ FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS) 
$(LIBUNWIND_LIBS)
+ 
+-FEATURE_CHECK_LDFLAGS-libunwind-arm = -lunwind -lunwind-arm
+-FEATURE_CHECK_LDFLAGS-libunwind-aarch64 = -lunwind -lunwind-aarch64
+-FEATURE_CHECK_LDFLAGS-libunwind-x86 = -lunwind -llzma -lunwind-x86
+-FEATURE_CHECK_LDFLAGS-libunwind-x86_64 = -lunwind -llzma -lunwind-x86_64
++FEATURE_CHECK_LDFLAGS-libunwind-arm += -lunwind -lunwind-arm
++FEATURE_CHECK_LDFLAGS-libunwind-aarch64 += -lunwind -lunwind-aarch64
++FEATURE_CHECK_LDFLAGS-libunwind-x86 += -lunwind -llzma -lunwind-x86
++FEATURE_CHECK_LDFLAGS-libunwind-x86_64 += -lunwind -llzma -lunwind-x86_64
+ 
+ FEATURE_CHECK_LDFLAGS-libcrypto = -lcrypto
+ 
+diff --git a/tools/perf/bench/inject-buildid.c 
b/tools/perf/bench/inject-buildid.c
+index 280227e3ffd7a..f4ec01da8da68 100644
+--- a/tools/perf/bench/inject-buildid.c
++++ b/tools/perf/bench/inject-buildid.c
+@@ -133,7 +133,7 @@ static u64 dso_map_addr(struct bench_dso *dso)
+       return 0x400000ULL + dso->ino * 8192ULL;
+ }
+ 
+-static u32 synthesize_attr(struct bench_data *data)
++static ssize_t synthesize_attr(struct bench_data *data)
+ {
+       union perf_event event;
+ 
+@@ -151,7 +151,7 @@ static u32 synthesize_attr(struct bench_data *data)
+       return writen(data->input_pipe[1], &event, event.header.size);
+ }
+ 
+-static u32 synthesize_fork(struct bench_data *data)
++static ssize_t synthesize_fork(struct bench_data *data)
+ {
+       union perf_event event;
+ 
+@@ -169,8 +169,7 @@ static u32 synthesize_fork(struct bench_data *data)
+       return writen(data->input_pipe[1], &event, event.header.size);
+ }
+ 
+-static u32 synthesize_mmap(struct bench_data *data, struct bench_dso *dso,
+-                         u64 timestamp)
++static ssize_t synthesize_mmap(struct bench_data *data, struct bench_dso 
*dso, u64 timestamp)
+ {
+       union perf_event event;
+       size_t len = offsetof(struct perf_record_mmap2, filename);
+@@ -198,23 +197,25 @@ static u32 synthesize_mmap(struct bench_data *data, 
struct bench_dso *dso,
+ 
+       if (len > sizeof(event.mmap2)) {
+               /* write mmap2 event first */
+-              writen(data->input_pipe[1], &event, len - bench_id_hdr_size);
++              if (writen(data->input_pipe[1], &event, len - 
bench_id_hdr_size) < 0)
++                      return -1;
+               /* zero-fill sample id header */
+               memset(id_hdr_ptr, 0, bench_id_hdr_size);
+               /* put timestamp in the right position */
+               ts_idx = (bench_id_hdr_size / sizeof(u64)) - 2;
+               id_hdr_ptr[ts_idx] = timestamp;
+-              writen(data->input_pipe[1], id_hdr_ptr, bench_id_hdr_size);
+-      } else {
+-              ts_idx = (len / sizeof(u64)) - 2;
+-              id_hdr_ptr[ts_idx] = timestamp;
+-              writen(data->input_pipe[1], &event, len);
++              if (writen(data->input_pipe[1], id_hdr_ptr, bench_id_hdr_size) 
< 0)
++                      return -1;
++
++              return len;
+       }
+-      return len;
++
++      ts_idx = (len / sizeof(u64)) - 2;
++      id_hdr_ptr[ts_idx] = timestamp;
++      return writen(data->input_pipe[1], &event, len);
+ }
+ 
+-static u32 synthesize_sample(struct bench_data *data, struct bench_dso *dso,
+-                           u64 timestamp)
++static ssize_t synthesize_sample(struct bench_data *data, struct bench_dso 
*dso, u64 timestamp)
+ {
+       union perf_event event;
+       struct perf_sample sample = {
+@@ -233,7 +234,7 @@ static u32 synthesize_sample(struct bench_data *data, 
struct bench_dso *dso,
+       return writen(data->input_pipe[1], &event, event.header.size);
+ }
+ 
+-static u32 synthesize_flush(struct bench_data *data)
++static ssize_t synthesize_flush(struct bench_data *data)
+ {
+       struct perf_event_header header = {
+               .size = sizeof(header),
+@@ -348,14 +349,16 @@ static int inject_build_id(struct bench_data *data, u64 
*max_rss)
+       int status;
+       unsigned int i, k;
+       struct rusage rusage;
+-      u64 len = 0;
+ 
+       /* this makes the child to run */
+       if (perf_header__write_pipe(data->input_pipe[1]) < 0)
+               return -1;
+ 
+-      len += synthesize_attr(data);
+-      len += synthesize_fork(data);
++      if (synthesize_attr(data) < 0)
++              return -1;
++
++      if (synthesize_fork(data) < 0)
++              return -1;
+ 
+       for (i = 0; i < nr_mmaps; i++) {
+               int idx = rand() % (nr_dsos - 1);
+@@ -363,13 +366,18 @@ static int inject_build_id(struct bench_data *data, u64 
*max_rss)
+               u64 timestamp = rand() % 1000000;
+ 
+               pr_debug2("   [%d] injecting: %s\n", i+1, dso->name);
+-              len += synthesize_mmap(data, dso, timestamp);
++              if (synthesize_mmap(data, dso, timestamp) < 0)
++                      return -1;
+ 
+-              for (k = 0; k < nr_samples; k++)
+-                      len += synthesize_sample(data, dso, timestamp + k * 
1000);
++              for (k = 0; k < nr_samples; k++) {
++                      if (synthesize_sample(data, dso, timestamp + k * 1000) 
< 0)
++                              return -1;
++              }
+ 
+-              if ((i + 1) % 10 == 0)
+-                      len += synthesize_flush(data);
++              if ((i + 1) % 10 == 0) {
++                      if (synthesize_flush(data) < 0)
++                              return -1;
++              }
+       }
+ 
+       /* tihs makes the child to finish */
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index 74bf480aa4f05..df515cd8d0184 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -2100,6 +2100,7 @@ static int add_callchain_ip(struct thread *thread,
+ 
+       al.filtered = 0;
+       al.sym = NULL;
++      al.srcline = NULL;
+       if (!cpumode) {
+               thread__find_cpumode_addr_location(thread, ip, &al);
+       } else {
+diff --git a/tools/testing/selftests/net/altnames.sh 
b/tools/testing/selftests/net/altnames.sh
+index 4254ddc3f70b5..1ef9e4159bba8 100755
+--- a/tools/testing/selftests/net/altnames.sh
++++ b/tools/testing/selftests/net/altnames.sh
+@@ -45,7 +45,7 @@ altnames_test()
+       check_err $? "Got unexpected long alternative name from link show JSON"
+ 
+       ip link property del $DUMMY_DEV altname $SHORT_NAME
+-      check_err $? "Failed to add short alternative name"
++      check_err $? "Failed to delete short alternative name"
+ 
+       ip -j -p link show $SHORT_NAME &>/dev/null
+       check_fail $? "Unexpected success while trying to do link show with 
deleted short alternative name"
+diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh 
b/tools/testing/selftests/net/mptcp/simult_flows.sh
+index 2f649b431456a..8fcb289278182 100755
+--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
++++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
+@@ -21,8 +21,8 @@ usage() {
+ 
+ cleanup()
+ {
+-      rm -f "$cin" "$cout"
+-      rm -f "$sin" "$sout"
++      rm -f "$cout" "$sout"
++      rm -f "$large" "$small"
+       rm -f "$capout"
+ 
+       local netns

Reply via email to