commit:     3f53c5485902a74ab9bfe982bb0f0327e4f4bb1a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue May 21 17:16:54 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue May 21 17:16:54 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3f53c548

Linux patch 4.14.121

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1120_linux-4.14.121.patch | 2085 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2089 insertions(+)

diff --git a/0000_README b/0000_README
index 10ec9c9..5301105 100644
--- a/0000_README
+++ b/0000_README
@@ -523,6 +523,10 @@ Patch:  1119_4.14.120.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.14.120
 
+Patch:  1120_4.14.121.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.14.121
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1120_linux-4.14.121.patch b/1120_linux-4.14.121.patch
new file mode 100644
index 0000000..eb1f4c2
--- /dev/null
+++ b/1120_linux-4.14.121.patch
@@ -0,0 +1,2085 @@
+diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst
+index 534e9baa4e1d..5d4330be200f 100644
+--- a/Documentation/x86/mds.rst
++++ b/Documentation/x86/mds.rst
+@@ -142,45 +142,13 @@ Mitigation points
+    mds_user_clear.
+ 
+    The mitigation is invoked in prepare_exit_to_usermode() which covers
+-   most of the kernel to user space transitions. There are a few exceptions
+-   which are not invoking prepare_exit_to_usermode() on return to user
+-   space. These exceptions use the paranoid exit code.
++   all but one of the kernel to user space transitions.  The exception
++   is when we return from a Non Maskable Interrupt (NMI), which is
++   handled directly in do_nmi().
+ 
+-   - Non Maskable Interrupt (NMI):
+-
+-     Access to sensible data like keys, credentials in the NMI context is
+-     mostly theoretical: The CPU can do prefetching or execute a
+-     misspeculated code path and thereby fetching data which might end up
+-     leaking through a buffer.
+-
+-     But for mounting other attacks the kernel stack address of the task is
+-     already valuable information. So in full mitigation mode, the NMI is
+-     mitigated on the return from do_nmi() to provide almost complete
+-     coverage.
+-
+-   - Double fault (#DF):
+-
+-     A double fault is usually fatal, but the ESPFIX workaround, which can
+-     be triggered from user space through modify_ldt(2) is a recoverable
+-     double fault. #DF uses the paranoid exit path, so explicit mitigation
+-     in the double fault handler is required.
+-
+-   - Machine Check Exception (#MC):
+-
+-     Another corner case is a #MC which hits between the CPU buffer clear
+-     invocation and the actual return to user. As this still is in kernel
+-     space it takes the paranoid exit path which does not clear the CPU
+-     buffers. So the #MC handler repopulates the buffers to some
+-     extent. Machine checks are not reliably controllable and the window is
+-     extremly small so mitigation would just tick a checkbox that this
+-     theoretical corner case is covered. To keep the amount of special
+-     cases small, ignore #MC.
+-
+-   - Debug Exception (#DB):
+-
+-     This takes the paranoid exit path only when the INT1 breakpoint is in
+-     kernel space. #DB on a user space address takes the regular exit path,
+-     so no extra mitigation required.
++   (The reason that NMI is special is that prepare_exit_to_usermode() can
++    enable IRQs.  In NMI context, NMIs are blocked, and we don't want to
++    enable IRQs with NMIs blocked.)
+ 
+ 
+ 2. C-State transition
+diff --git a/Makefile b/Makefile
+index 436d49582d3b..0e4063e1d0d7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 120
++SUBLEVEL = 121
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+diff --git a/arch/arm/boot/dts/exynos5260.dtsi 
b/arch/arm/boot/dts/exynos5260.dtsi
+index 5e88c9645975..026267055327 100644
+--- a/arch/arm/boot/dts/exynos5260.dtsi
++++ b/arch/arm/boot/dts/exynos5260.dtsi
+@@ -226,7 +226,7 @@
+                       wakeup-interrupt-controller {
+                               compatible = "samsung,exynos4210-wakeup-eint";
+                               interrupt-parent = <&gic>;
+-                              interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
++                              interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+                       };
+               };
+ 
+diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi 
b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
+index c0b85981c6bf..5f36c1319bb2 100644
+--- a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
++++ b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
+@@ -23,7 +23,7 @@
+                       "Headphone Jack", "HPL",
+                       "Headphone Jack", "HPR",
+                       "Headphone Jack", "MICBIAS",
+-                      "IN1", "Headphone Jack",
++                      "IN12", "Headphone Jack",
+                       "Speakers", "SPKL",
+                       "Speakers", "SPKR";
+ 
+diff --git a/arch/arm/crypto/aes-neonbs-glue.c 
b/arch/arm/crypto/aes-neonbs-glue.c
+index 18768f330449..772a55526f24 100644
+--- a/arch/arm/crypto/aes-neonbs-glue.c
++++ b/arch/arm/crypto/aes-neonbs-glue.c
+@@ -280,6 +280,8 @@ static int __xts_crypt(struct skcipher_request *req,
+       int err;
+ 
+       err = skcipher_walk_virt(&walk, req, true);
++      if (err)
++              return err;
+ 
+       crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);
+ 
+diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c
+index e81a78b125d9..14e3d320dbad 100644
+--- a/arch/arm/mach-exynos/firmware.c
++++ b/arch/arm/mach-exynos/firmware.c
+@@ -205,6 +205,7 @@ void __init exynos_firmware_init(void)
+               return;
+ 
+       addr = of_get_address(nd, 0, NULL, NULL);
++      of_node_put(nd);
+       if (!addr) {
+               pr_err("%s: No address specified.\n", __func__);
+               return;
+diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
+index eafa26d9f692..9be92073f847 100644
+--- a/arch/arm/mach-exynos/suspend.c
++++ b/arch/arm/mach-exynos/suspend.c
+@@ -649,8 +649,10 @@ void __init exynos_pm_init(void)
+ 
+       if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
+               pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
++              of_node_put(np);
+               return;
+       }
++      of_node_put(np);
+ 
+       pm_data = (const struct exynos_pm_data *) match->data;
+ 
+diff --git a/arch/arm64/crypto/aes-neonbs-glue.c 
b/arch/arm64/crypto/aes-neonbs-glue.c
+index c55d68ccb89f..52975817fdb6 100644
+--- a/arch/arm64/crypto/aes-neonbs-glue.c
++++ b/arch/arm64/crypto/aes-neonbs-glue.c
+@@ -307,6 +307,8 @@ static int __xts_crypt(struct skcipher_request *req,
+       int err;
+ 
+       err = skcipher_walk_virt(&walk, req, true);
++      if (err)
++              return err;
+ 
+       kernel_neon_begin();
+ 
+diff --git a/arch/arm64/include/asm/processor.h 
b/arch/arm64/include/asm/processor.h
+index fda6f5812281..91bb97d8bdbf 100644
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -49,7 +49,15 @@
+  * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
+  */
+ #ifdef CONFIG_COMPAT
++#ifdef CONFIG_ARM64_64K_PAGES
++/*
++ * With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied
++ * by the compat vectors page.
++ */
+ #define TASK_SIZE_32          UL(0x100000000)
++#else
++#define TASK_SIZE_32          (UL(0x100000000) - PAGE_SIZE)
++#endif /* CONFIG_ARM64_64K_PAGES */
+ #define TASK_SIZE             (test_thread_flag(TIF_32BIT) ? \
+                               TASK_SIZE_32 : TASK_SIZE_64)
+ #define TASK_SIZE_OF(tsk)     (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
+diff --git a/arch/arm64/kernel/debug-monitors.c 
b/arch/arm64/kernel/debug-monitors.c
+index c7ef99904934..edb63bf2ac1c 100644
+--- a/arch/arm64/kernel/debug-monitors.c
++++ b/arch/arm64/kernel/debug-monitors.c
+@@ -133,6 +133,7 @@ NOKPROBE_SYMBOL(disable_debug_monitors);
+  */
+ static int clear_os_lock(unsigned int cpu)
+ {
++      write_sysreg(0, osdlr_el1);
+       write_sysreg(0, oslar_el1);
+       isb();
+       return 0;
+diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
+index 10c835f13f62..034a3a2a38ee 100644
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -64,24 +64,25 @@ ENTRY(cpu_do_suspend)
+       mrs     x2, tpidr_el0
+       mrs     x3, tpidrro_el0
+       mrs     x4, contextidr_el1
+-      mrs     x5, cpacr_el1
+-      mrs     x6, tcr_el1
+-      mrs     x7, vbar_el1
+-      mrs     x8, mdscr_el1
+-      mrs     x9, oslsr_el1
+-      mrs     x10, sctlr_el1
++      mrs     x5, osdlr_el1
++      mrs     x6, cpacr_el1
++      mrs     x7, tcr_el1
++      mrs     x8, vbar_el1
++      mrs     x9, mdscr_el1
++      mrs     x10, oslsr_el1
++      mrs     x11, sctlr_el1
+ alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+-      mrs     x11, tpidr_el1
++      mrs     x12, tpidr_el1
+ alternative_else
+-      mrs     x11, tpidr_el2
++      mrs     x12, tpidr_el2
+ alternative_endif
+-      mrs     x12, sp_el0
++      mrs     x13, sp_el0
+       stp     x2, x3, [x0]
+-      stp     x4, xzr, [x0, #16]
+-      stp     x5, x6, [x0, #32]
+-      stp     x7, x8, [x0, #48]
+-      stp     x9, x10, [x0, #64]
+-      stp     x11, x12, [x0, #80]
++      stp     x4, x5, [x0, #16]
++      stp     x6, x7, [x0, #32]
++      stp     x8, x9, [x0, #48]
++      stp     x10, x11, [x0, #64]
++      stp     x12, x13, [x0, #80]
+       ret
+ ENDPROC(cpu_do_suspend)
+ 
+@@ -104,8 +105,8 @@ ENTRY(cpu_do_resume)
+       msr     cpacr_el1, x6
+ 
+       /* Don't change t0sz here, mask those bits when restoring */
+-      mrs     x5, tcr_el1
+-      bfi     x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
++      mrs     x7, tcr_el1
++      bfi     x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
+ 
+       msr     tcr_el1, x8
+       msr     vbar_el1, x9
+@@ -129,6 +130,7 @@ alternative_endif
+       /*
+        * Restore oslsr_el1 by writing oslar_el1
+        */
++      msr     osdlr_el1, x5
+       ubfx    x11, x11, #1, #1
+       msr     oslar_el1, x11
+       reset_pmuserenr_el0 x0                  // Disable PMU access from EL0
+diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
+index 783de51a6c4e..6c881659ee8a 100644
+--- a/arch/arm64/net/bpf_jit.h
++++ b/arch/arm64/net/bpf_jit.h
+@@ -100,12 +100,6 @@
+ #define A64_STXR(sf, Rt, Rn, Rs) \
+       A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
+ 
+-/* Prefetch */
+-#define A64_PRFM(Rn, type, target, policy) \
+-      aarch64_insn_gen_prefetch(Rn, AARCH64_INSN_PRFM_TYPE_##type, \
+-                                AARCH64_INSN_PRFM_TARGET_##target, \
+-                                AARCH64_INSN_PRFM_POLICY_##policy)
+-
+ /* Add/subtract (immediate) */
+ #define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
+       aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
+index be155f70f108..6110fe344368 100644
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -712,7 +712,6 @@ emit_cond_jmp:
+       case BPF_STX | BPF_XADD | BPF_DW:
+               emit_a64_mov_i(1, tmp, off, ctx);
+               emit(A64_ADD(1, tmp, tmp, dst), ctx);
+-              emit(A64_PRFM(tmp, PST, L1, STRM), ctx);
+               emit(A64_LDXR(isdw, tmp2, tmp), ctx);
+               emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
+               emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);
+diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c 
b/arch/x86/crypto/crct10dif-pclmul_glue.c
+index cd4df9322501..7bbfe7d35da7 100644
+--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
++++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
+@@ -76,15 +76,14 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
+       return 0;
+ }
+ 
+-static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
+-                      u8 *out)
++static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 
*out)
+ {
+       if (irq_fpu_usable()) {
+               kernel_fpu_begin();
+-              *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len);
++              *(__u16 *)out = crc_t10dif_pcl(crc, data, len);
+               kernel_fpu_end();
+       } else
+-              *(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
++              *(__u16 *)out = crc_t10dif_generic(crc, data, len);
+       return 0;
+ }
+ 
+@@ -93,15 +92,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 
*data,
+ {
+       struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+ 
+-      return __chksum_finup(&ctx->crc, data, len, out);
++      return __chksum_finup(ctx->crc, data, len, out);
+ }
+ 
+ static int chksum_digest(struct shash_desc *desc, const u8 *data,
+                        unsigned int length, u8 *out)
+ {
+-      struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+-
+-      return __chksum_finup(&ctx->crc, data, length, out);
++      return __chksum_finup(0, data, length, out);
+ }
+ 
+ static struct shash_alg alg = {
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index 60c4c342316c..49adabd94f88 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -234,6 +234,7 @@ ENTRY(__switch_to_asm)
+       pushl   %ebx
+       pushl   %edi
+       pushl   %esi
++      pushfl
+ 
+       /* switch stack */
+       movl    %esp, TASK_threadsp(%eax)
+@@ -256,6 +257,7 @@ ENTRY(__switch_to_asm)
+ #endif
+ 
+       /* restore callee-saved registers */
++      popfl
+       popl    %esi
+       popl    %edi
+       popl    %ebx
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 164cd7529f0b..09896f1c114f 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -342,6 +342,7 @@ ENTRY(__switch_to_asm)
+       pushq   %r13
+       pushq   %r14
+       pushq   %r15
++      pushfq
+ 
+       /* switch stack */
+       movq    %rsp, TASK_threadsp(%rdi)
+@@ -364,6 +365,7 @@ ENTRY(__switch_to_asm)
+ #endif
+ 
+       /* restore callee-saved registers */
++      popfq
+       popq    %r15
+       popq    %r14
+       popq    %r13
+diff --git a/arch/x86/include/asm/switch_to.h 
b/arch/x86/include/asm/switch_to.h
+index 12ef2b49d11b..6a7e83092811 100644
+--- a/arch/x86/include/asm/switch_to.h
++++ b/arch/x86/include/asm/switch_to.h
+@@ -41,6 +41,7 @@ asmlinkage void ret_from_fork(void);
+  * order of the fields must match the code in __switch_to_asm().
+  */
+ struct inactive_task_frame {
++      unsigned long flags;
+ #ifdef CONFIG_X86_64
+       unsigned long r15;
+       unsigned long r14;
+diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
+index c2df91eab573..df6bb5f8ab2a 100644
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -132,6 +132,13 @@ int copy_thread_tls(unsigned long clone_flags, unsigned 
long sp,
+       struct task_struct *tsk;
+       int err;
+ 
++      /*
++       * For a new task use the RESET flags value since there is no before.
++       * All the status flags are zero; DF and all the system flags must also
++       * be 0, specifically IF must be 0 because we context switch to the new
++       * task with interrupts disabled.
++       */
++      frame->flags = X86_EFLAGS_FIXED;
+       frame->bp = 0;
+       frame->ret_addr = (unsigned long) ret_from_fork;
+       p->thread.sp = (unsigned long) fork_frame;
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index ec63d6be5e02..d1dfd1397b46 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -278,6 +278,14 @@ int copy_thread_tls(unsigned long clone_flags, unsigned 
long sp,
+       childregs = task_pt_regs(p);
+       fork_frame = container_of(childregs, struct fork_frame, regs);
+       frame = &fork_frame->frame;
++
++      /*
++       * For a new task use the RESET flags value since there is no before.
++       * All the status flags are zero; DF and all the system flags must also
++       * be 0, specifically IF must be 0 because we context switch to the new
++       * task with interrupts disabled.
++       */
++      frame->flags = X86_EFLAGS_FIXED;
+       frame->bp = 0;
+       frame->ret_addr = (unsigned long) ret_from_fork;
+       p->thread.sp = (unsigned long) fork_frame;
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index aa0022a3faf5..ed8d78fd4f8c 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -58,7 +58,6 @@
+ #include <asm/alternative.h>
+ #include <asm/fpu/xstate.h>
+ #include <asm/trace/mpx.h>
+-#include <asm/nospec-branch.h>
+ #include <asm/mpx.h>
+ #include <asm/vm86.h>
+ 
+@@ -386,13 +385,6 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, 
long error_code)
+               regs->ip = (unsigned long)general_protection;
+               regs->sp = (unsigned long)&gpregs->orig_ax;
+ 
+-              /*
+-               * This situation can be triggered by userspace via
+-               * modify_ldt(2) and the return does not take the regular
+-               * user space exit, so a CPU buffer clear is required when
+-               * MDS mitigation is enabled.
+-               */
+-              mds_user_clear_cpu_buffers();
+               return;
+       }
+ #endif
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 5f85f17ffb75..40b1e7ec2399 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1107,31 +1107,42 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, 
unsigned index, u64 *data)
+       return 0;
+ }
+ 
+-bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
++static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
+ {
+-      if (efer & efer_reserved_bits)
+-              return false;
+-
+       if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
+-                      return false;
++              return false;
+ 
+       if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
+-                      return false;
++              return false;
+ 
+       return true;
++
++}
++bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
++{
++      if (efer & efer_reserved_bits)
++              return false;
++
++      return __kvm_valid_efer(vcpu, efer);
+ }
+ EXPORT_SYMBOL_GPL(kvm_valid_efer);
+ 
+-static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
++static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ {
+       u64 old_efer = vcpu->arch.efer;
++      u64 efer = msr_info->data;
+ 
+-      if (!kvm_valid_efer(vcpu, efer))
+-              return 1;
++      if (efer & efer_reserved_bits)
++              return false;
+ 
+-      if (is_paging(vcpu)
+-          && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
+-              return 1;
++      if (!msr_info->host_initiated) {
++              if (!__kvm_valid_efer(vcpu, efer))
++                      return 1;
++
++              if (is_paging(vcpu) &&
++                  (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
++                      return 1;
++      }
+ 
+       efer &= ~EFER_LMA;
+       efer |= vcpu->arch.efer & EFER_LMA;
+@@ -2240,7 +2251,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
+               vcpu->arch.arch_capabilities = data;
+               break;
+       case MSR_EFER:
+-              return set_efer(vcpu, data);
++              return set_efer(vcpu, msr_info);
+       case MSR_K7_HWCR:
+               data &= ~(u64)0x40;     /* ignore flush filter disable */
+               data &= ~(u64)0x100;    /* ignore ignne emulation enable */
+diff --git a/crypto/ccm.c b/crypto/ccm.c
+index 0a083342ec8c..8104c564dd31 100644
+--- a/crypto/ccm.c
++++ b/crypto/ccm.c
+@@ -455,7 +455,6 @@ static void crypto_ccm_free(struct aead_instance *inst)
+ 
+ static int crypto_ccm_create_common(struct crypto_template *tmpl,
+                                   struct rtattr **tb,
+-                                  const char *full_name,
+                                   const char *ctr_name,
+                                   const char *mac_name)
+ {
+@@ -483,7 +482,8 @@ static int crypto_ccm_create_common(struct crypto_template 
*tmpl,
+ 
+       mac = __crypto_hash_alg_common(mac_alg);
+       err = -EINVAL;
+-      if (mac->digestsize != 16)
++      if (strncmp(mac->base.cra_name, "cbcmac(", 7) != 0 ||
++          mac->digestsize != 16)
+               goto out_put_mac;
+ 
+       inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
+@@ -506,23 +506,27 @@ static int crypto_ccm_create_common(struct 
crypto_template *tmpl,
+ 
+       ctr = crypto_spawn_skcipher_alg(&ictx->ctr);
+ 
+-      /* Not a stream cipher? */
++      /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
+       err = -EINVAL;
+-      if (ctr->base.cra_blocksize != 1)
++      if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
++          crypto_skcipher_alg_ivsize(ctr) != 16 ||
++          ctr->base.cra_blocksize != 1)
+               goto err_drop_ctr;
+ 
+-      /* We want the real thing! */
+-      if (crypto_skcipher_alg_ivsize(ctr) != 16)
++      /* ctr and cbcmac must use the same underlying block cipher. */
++      if (strcmp(ctr->base.cra_name + 4, mac->base.cra_name + 7) != 0)
+               goto err_drop_ctr;
+ 
+       err = -ENAMETOOLONG;
++      if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
++                   "ccm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
++              goto err_drop_ctr;
++
+       if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+                    "ccm_base(%s,%s)", ctr->base.cra_driver_name,
+                    mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+               goto err_drop_ctr;
+ 
+-      memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
+-
+       inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC;
+       inst->alg.base.cra_priority = (mac->base.cra_priority +
+                                      ctr->base.cra_priority) / 2;
+@@ -564,7 +568,6 @@ static int crypto_ccm_create(struct crypto_template *tmpl, 
struct rtattr **tb)
+       const char *cipher_name;
+       char ctr_name[CRYPTO_MAX_ALG_NAME];
+       char mac_name[CRYPTO_MAX_ALG_NAME];
+-      char full_name[CRYPTO_MAX_ALG_NAME];
+ 
+       cipher_name = crypto_attr_alg_name(tb[1]);
+       if (IS_ERR(cipher_name))
+@@ -578,12 +581,7 @@ static int crypto_ccm_create(struct crypto_template 
*tmpl, struct rtattr **tb)
+                    cipher_name) >= CRYPTO_MAX_ALG_NAME)
+               return -ENAMETOOLONG;
+ 
+-      if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >=
+-          CRYPTO_MAX_ALG_NAME)
+-              return -ENAMETOOLONG;
+-
+-      return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
+-                                      mac_name);
++      return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
+ }
+ 
+ static struct crypto_template crypto_ccm_tmpl = {
+@@ -596,23 +594,17 @@ static int crypto_ccm_base_create(struct crypto_template 
*tmpl,
+                                 struct rtattr **tb)
+ {
+       const char *ctr_name;
+-      const char *cipher_name;
+-      char full_name[CRYPTO_MAX_ALG_NAME];
++      const char *mac_name;
+ 
+       ctr_name = crypto_attr_alg_name(tb[1]);
+       if (IS_ERR(ctr_name))
+               return PTR_ERR(ctr_name);
+ 
+-      cipher_name = crypto_attr_alg_name(tb[2]);
+-      if (IS_ERR(cipher_name))
+-              return PTR_ERR(cipher_name);
+-
+-      if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
+-                   ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
+-              return -ENAMETOOLONG;
++      mac_name = crypto_attr_alg_name(tb[2]);
++      if (IS_ERR(mac_name))
++              return PTR_ERR(mac_name);
+ 
+-      return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
+-                                      cipher_name);
++      return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
+ }
+ 
+ static struct crypto_template crypto_ccm_base_tmpl = {
+diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
+index 600afa99941f..4d6f51bcdfab 100644
+--- a/crypto/chacha20poly1305.c
++++ b/crypto/chacha20poly1305.c
+@@ -647,8 +647,8 @@ static int chachapoly_create(struct crypto_template *tmpl, 
struct rtattr **tb,
+ 
+       err = -ENAMETOOLONG;
+       if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+-                   "%s(%s,%s)", name, chacha_name,
+-                   poly_name) >= CRYPTO_MAX_ALG_NAME)
++                   "%s(%s,%s)", name, chacha->base.cra_name,
++                   poly->cra_name) >= CRYPTO_MAX_ALG_NAME)
+               goto out_drop_chacha;
+       if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+                    "%s(%s,%s)", name, chacha->base.cra_driver_name,
+diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
+index 8e94e29dc6fc..d08048ae5552 100644
+--- a/crypto/crct10dif_generic.c
++++ b/crypto/crct10dif_generic.c
+@@ -65,10 +65,9 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
+       return 0;
+ }
+ 
+-static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
+-                      u8 *out)
++static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 
*out)
+ {
+-      *(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
++      *(__u16 *)out = crc_t10dif_generic(crc, data, len);
+       return 0;
+ }
+ 
+@@ -77,15 +76,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 
*data,
+ {
+       struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+ 
+-      return __chksum_finup(&ctx->crc, data, len, out);
++      return __chksum_finup(ctx->crc, data, len, out);
+ }
+ 
+ static int chksum_digest(struct shash_desc *desc, const u8 *data,
+                        unsigned int length, u8 *out)
+ {
+-      struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+-
+-      return __chksum_finup(&ctx->crc, data, length, out);
++      return __chksum_finup(0, data, length, out);
+ }
+ 
+ static struct shash_alg alg = {
+diff --git a/crypto/gcm.c b/crypto/gcm.c
+index 3841b5eafa7e..45b34a1144b8 100644
+--- a/crypto/gcm.c
++++ b/crypto/gcm.c
+@@ -616,7 +616,6 @@ static void crypto_gcm_free(struct aead_instance *inst)
+ 
+ static int crypto_gcm_create_common(struct crypto_template *tmpl,
+                                   struct rtattr **tb,
+-                                  const char *full_name,
+                                   const char *ctr_name,
+                                   const char *ghash_name)
+ {
+@@ -657,7 +656,8 @@ static int crypto_gcm_create_common(struct crypto_template 
*tmpl,
+               goto err_free_inst;
+ 
+       err = -EINVAL;
+-      if (ghash->digestsize != 16)
++      if (strcmp(ghash->base.cra_name, "ghash") != 0 ||
++          ghash->digestsize != 16)
+               goto err_drop_ghash;
+ 
+       crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
+@@ -669,24 +669,24 @@ static int crypto_gcm_create_common(struct 
crypto_template *tmpl,
+ 
+       ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
+ 
+-      /* We only support 16-byte blocks. */
++      /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
+       err = -EINVAL;
+-      if (crypto_skcipher_alg_ivsize(ctr) != 16)
++      if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
++          crypto_skcipher_alg_ivsize(ctr) != 16 ||
++          ctr->base.cra_blocksize != 1)
+               goto out_put_ctr;
+ 
+-      /* Not a stream cipher? */
+-      if (ctr->base.cra_blocksize != 1)
++      err = -ENAMETOOLONG;
++      if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
++                   "gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
+               goto out_put_ctr;
+ 
+-      err = -ENAMETOOLONG;
+       if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+                    "gcm_base(%s,%s)", ctr->base.cra_driver_name,
+                    ghash_alg->cra_driver_name) >=
+           CRYPTO_MAX_ALG_NAME)
+               goto out_put_ctr;
+ 
+-      memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
+-
+       inst->alg.base.cra_flags = (ghash->base.cra_flags |
+                                   ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
+       inst->alg.base.cra_priority = (ghash->base.cra_priority +
+@@ -728,7 +728,6 @@ static int crypto_gcm_create(struct crypto_template *tmpl, 
struct rtattr **tb)
+ {
+       const char *cipher_name;
+       char ctr_name[CRYPTO_MAX_ALG_NAME];
+-      char full_name[CRYPTO_MAX_ALG_NAME];
+ 
+       cipher_name = crypto_attr_alg_name(tb[1]);
+       if (IS_ERR(cipher_name))
+@@ -738,12 +737,7 @@ static int crypto_gcm_create(struct crypto_template 
*tmpl, struct rtattr **tb)
+           CRYPTO_MAX_ALG_NAME)
+               return -ENAMETOOLONG;
+ 
+-      if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >=
+-          CRYPTO_MAX_ALG_NAME)
+-              return -ENAMETOOLONG;
+-
+-      return crypto_gcm_create_common(tmpl, tb, full_name,
+-                                      ctr_name, "ghash");
++      return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash");
+ }
+ 
+ static struct crypto_template crypto_gcm_tmpl = {
+@@ -757,7 +751,6 @@ static int crypto_gcm_base_create(struct crypto_template 
*tmpl,
+ {
+       const char *ctr_name;
+       const char *ghash_name;
+-      char full_name[CRYPTO_MAX_ALG_NAME];
+ 
+       ctr_name = crypto_attr_alg_name(tb[1]);
+       if (IS_ERR(ctr_name))
+@@ -767,12 +760,7 @@ static int crypto_gcm_base_create(struct crypto_template 
*tmpl,
+       if (IS_ERR(ghash_name))
+               return PTR_ERR(ghash_name);
+ 
+-      if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)",
+-                   ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
+-              return -ENAMETOOLONG;
+-
+-      return crypto_gcm_create_common(tmpl, tb, full_name,
+-                                      ctr_name, ghash_name);
++      return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name);
+ }
+ 
+ static struct crypto_template crypto_gcm_base_tmpl = {
+diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
+index d7da0eea5622..319d9962552e 100644
+--- a/crypto/salsa20_generic.c
++++ b/crypto/salsa20_generic.c
+@@ -186,7 +186,7 @@ static int encrypt(struct blkcipher_desc *desc,
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt_block(desc, &walk, 64);
+ 
+-      salsa20_ivsetup(ctx, walk.iv);
++      salsa20_ivsetup(ctx, desc->info);
+ 
+       while (walk.nbytes >= 64) {
+               salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
+diff --git a/crypto/skcipher.c b/crypto/skcipher.c
+index e319421a32e7..c5501404f145 100644
+--- a/crypto/skcipher.c
++++ b/crypto/skcipher.c
+@@ -131,8 +131,13 @@ unmap_src:
+               memcpy(walk->dst.virt.addr, walk->page, n);
+               skcipher_unmap_dst(walk);
+       } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
+-              if (WARN_ON(err)) {
+-                      /* unexpected case; didn't process all bytes */
++              if (err) {
++                      /*
++                       * Didn't process all bytes.  Either the algorithm is
++                       * broken, or this was the last step and it turned out
++                       * the message wasn't evenly divisible into blocks but
++                       * the algorithm requires it.
++                       */
+                       err = -EINVAL;
+                       goto finish;
+               }
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index ab701f668ebc..941bffd9b49c 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -703,12 +703,16 @@ static void msg_done_handler(struct ssif_info 
*ssif_info, int result,
+                       /* End of read */
+                       len = ssif_info->multi_len;
+                       data = ssif_info->data;
+-              } else if (blocknum != ssif_info->multi_pos) {
++              } else if (blocknum + 1 != ssif_info->multi_pos) {
+                       /*
+                        * Out of sequence block, just abort.  Block
+                        * numbers start at zero for the second block,
+                        * but multi_pos starts at one, so the +1.
+                        */
++                      if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
++                              dev_dbg(&ssif_info->client->dev,
++                                      "Received message out of sequence, 
expected %u, got %u\n",
++                                      ssif_info->multi_pos - 1, blocknum);
+                       result = -EIO;
+               } else {
+                       ssif_inc_stat(ssif_info, received_message_parts);
+diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c 
b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+index 23305f22072f..204e4ad62c38 100644
+--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
++++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+@@ -250,9 +250,14 @@ static int rk_set_data_start(struct rk_crypto_info *dev)
+       u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
+               dev->sg_src->offset + dev->sg_src->length - ivsize;
+ 
+-      /* store the iv that need to be updated in chain mode */
+-      if (ctx->mode & RK_CRYPTO_DEC)
++      /* Store the iv that need to be updated in chain mode.
++       * And update the IV buffer to contain the next IV for decryption mode.
++       */
++      if (ctx->mode & RK_CRYPTO_DEC) {
+               memcpy(ctx->iv, src_last_blk, ivsize);
++              sg_pcopy_to_buffer(dev->first, dev->src_nents, req->info,
++                                 ivsize, dev->total - ivsize);
++      }
+ 
+       err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
+       if (!err)
+@@ -288,13 +293,19 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
+       struct ablkcipher_request *req =
+               ablkcipher_request_cast(dev->async_req);
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
++      struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+       u32 ivsize = crypto_ablkcipher_ivsize(tfm);
+ 
+-      if (ivsize == DES_BLOCK_SIZE)
+-              memcpy_fromio(req->info, dev->reg + RK_CRYPTO_TDES_IV_0,
+-                            ivsize);
+-      else if (ivsize == AES_BLOCK_SIZE)
+-              memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
++      /* Update the IV buffer to contain the next IV for encryption mode. */
++      if (!(ctx->mode & RK_CRYPTO_DEC)) {
++              if (dev->aligned) {
++                      memcpy(req->info, sg_virt(dev->sg_dst) +
++                              dev->sg_dst->length - ivsize, ivsize);
++              } else {
++                      memcpy(req->info, dev->addr_vir +
++                              dev->count - ivsize, ivsize);
++              }
++      }
+ }
+ 
+ static void rk_update_iv(struct rk_crypto_info *dev)
+diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
+index 0b4a293b8a1e..d9281a28818d 100644
+--- a/drivers/crypto/vmx/aesp8-ppc.pl
++++ b/drivers/crypto/vmx/aesp8-ppc.pl
+@@ -1815,7 +1815,7 @@ Lctr32_enc8x_three:
+       stvx_u          $out1,$x10,$out
+       stvx_u          $out2,$x20,$out
+       addi            $out,$out,0x30
+-      b               Lcbc_dec8x_done
++      b               Lctr32_enc8x_done
+ 
+ .align        5
+ Lctr32_enc8x_two:
+@@ -1827,7 +1827,7 @@ Lctr32_enc8x_two:
+       stvx_u          $out0,$x00,$out
+       stvx_u          $out1,$x10,$out
+       addi            $out,$out,0x20
+-      b               Lcbc_dec8x_done
++      b               Lctr32_enc8x_done
+ 
+ .align        5
+ Lctr32_enc8x_one:
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index 03cc0722ae48..c02394c3181f 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -512,11 +512,11 @@ static void journal_reclaim(struct cache_set *c)
+                                 ca->sb.nr_this_dev);
+       }
+ 
+-      bkey_init(k);
+-      SET_KEY_PTRS(k, n);
+-
+-      if (n)
++      if (n) {
++              bkey_init(k);
++              SET_KEY_PTRS(k, n);
+               c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
++      }
+ out:
+       if (!journal_full(&c->journal))
+               __closure_wake_up(&c->journal.wait);
+@@ -641,6 +641,9 @@ static void journal_write_unlocked(struct closure *cl)
+               ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
+       }
+ 
++      /* If KEY_PTRS(k) == 0, this jset gets lost in air */
++      BUG_ON(i == 0);
++
+       atomic_dec_bug(&fifo_back(&c->journal.pin));
+       bch_journal_next(&c->journal);
+       journal_reclaim(c);
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 9e875aba41b9..175bab2d7206 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1357,6 +1357,7 @@ static void cache_set_free(struct closure *cl)
+       bch_btree_cache_free(c);
+       bch_journal_free(c);
+ 
++      mutex_lock(&bch_register_lock);
+       for_each_cache(ca, c, i)
+               if (ca) {
+                       ca->set = NULL;
+@@ -1379,7 +1380,6 @@ static void cache_set_free(struct closure *cl)
+               mempool_destroy(c->search);
+       kfree(c->devices);
+ 
+-      mutex_lock(&bch_register_lock);
+       list_del(&c->list);
+       mutex_unlock(&bch_register_lock);
+ 
+diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
+index 7802ac3ba934..fa2983af4d2c 100644
+--- a/drivers/mtd/spi-nor/intel-spi.c
++++ b/drivers/mtd/spi-nor/intel-spi.c
+@@ -503,6 +503,10 @@ static ssize_t intel_spi_read(struct spi_nor *nor, loff_t 
from, size_t len,
+       while (len > 0) {
+               block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
+ 
++              /* Read cannot cross 4K boundary */
++              block_size = min_t(loff_t, from + block_size,
++                                 round_up(from + 1, SZ_4K)) - from;
++
+               writel(from, ispi->base + FADDR);
+ 
+               val = readl(ispi->base + HSFSTS_CTL);
+@@ -553,6 +557,10 @@ static ssize_t intel_spi_write(struct spi_nor *nor, 
loff_t to, size_t len,
+       while (len > 0) {
+               block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
+ 
++              /* Write cannot cross 4K boundary */
++              block_size = min_t(loff_t, to + block_size,
++                                 round_up(to + 1, SZ_4K)) - to;
++
+               writel(to, ispi->base + FADDR);
+ 
+               val = readl(ispi->base + HSFSTS_CTL);
+diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
+index 53d1c08cef4d..f591de23f3d3 100644
+--- a/drivers/pci/host/pci-hyperv.c
++++ b/drivers/pci/host/pci-hyperv.c
+@@ -1513,6 +1513,21 @@ static void hv_pci_assign_slots(struct hv_pcibus_device 
*hbus)
+       }
+ }
+ 
++/*
++ * Remove entries in sysfs pci slot directory.
++ */
++static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
++{
++      struct hv_pci_dev *hpdev;
++
++      list_for_each_entry(hpdev, &hbus->children, list_entry) {
++              if (!hpdev->pci_slot)
++                      continue;
++              pci_destroy_slot(hpdev->pci_slot);
++              hpdev->pci_slot = NULL;
++      }
++}
++
+ /**
+  * create_root_hv_pci_bus() - Expose a new root PCI bus
+  * @hbus:     Root PCI bus, as understood by this driver
+@@ -1809,6 +1824,10 @@ static void pci_devices_present_work(struct work_struct 
*work)
+               hpdev = list_first_entry(&removed, struct hv_pci_dev,
+                                        list_entry);
+               list_del(&hpdev->list_entry);
++
++              if (hpdev->pci_slot)
++                      pci_destroy_slot(hpdev->pci_slot);
++
+               put_pcichild(hpdev, hv_pcidev_ref_initial);
+       }
+ 
+@@ -1941,6 +1960,7 @@ static void hv_eject_device_work(struct work_struct 
*work)
+                        VM_PKT_DATA_INBAND, 0);
+ 
+       put_pcichild(hpdev, hv_pcidev_ref_childlist);
++      put_pcichild(hpdev, hv_pcidev_ref_initial);
+       put_pcichild(hpdev, hv_pcidev_ref_pnp);
+       put_hvpcibus(hpdev->hbus);
+ }
+@@ -2718,6 +2738,7 @@ static int hv_pci_remove(struct hv_device *hdev)
+               pci_lock_rescan_remove();
+               pci_stop_root_bus(hbus->pci_bus);
+               pci_remove_root_bus(hbus->pci_bus);
++              hv_pci_remove_slots(hbus);
+               pci_unlock_rescan_remove();
+               hbus->state = hv_pcibus_removed;
+       }
+diff --git a/drivers/power/supply/axp288_charger.c 
b/drivers/power/supply/axp288_charger.c
+index 4d016fbc3527..b8f7da57c78a 100644
+--- a/drivers/power/supply/axp288_charger.c
++++ b/drivers/power/supply/axp288_charger.c
+@@ -881,6 +881,10 @@ static int axp288_charger_probe(struct platform_device 
*pdev)
+       /* Register charger interrupts */
+       for (i = 0; i < CHRG_INTR_END; i++) {
+               pirq = platform_get_irq(info->pdev, i);
++              if (pirq < 0) {
++                      dev_err(&pdev->dev, "Failed to get IRQ: %d\n", pirq);
++                      return pirq;
++              }
+               info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
+               if (info->irq[i] < 0) {
+                       dev_warn(&info->pdev->dev,
+diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
+index f4166263bb3a..7506bbcf8259 100644
+--- a/drivers/tty/vt/keyboard.c
++++ b/drivers/tty/vt/keyboard.c
+@@ -122,6 +122,7 @@ static const int NR_TYPES = ARRAY_SIZE(max_vals);
+ static struct input_handler kbd_handler;
+ static DEFINE_SPINLOCK(kbd_event_lock);
+ static DEFINE_SPINLOCK(led_lock);
++static DEFINE_SPINLOCK(func_buf_lock); /* guard 'func_buf'  and friends */
+ static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)];        /* keyboard key 
bitmap */
+ static unsigned char shift_down[NR_SHIFT];            /* shift state 
counters.. */
+ static bool dead_key_next;
+@@ -1959,11 +1960,12 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user 
*user_kdgkb, int perm)
+       char *p;
+       u_char *q;
+       u_char __user *up;
+-      int sz;
++      int sz, fnw_sz;
+       int delta;
+       char *first_free, *fj, *fnw;
+       int i, j, k;
+       int ret;
++      unsigned long flags;
+ 
+       if (!capable(CAP_SYS_TTY_CONFIG))
+               perm = 0;
+@@ -2006,7 +2008,14 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user 
*user_kdgkb, int perm)
+                       goto reterr;
+               }
+ 
++              fnw = NULL;
++              fnw_sz = 0;
++              /* race aginst other writers */
++              again:
++              spin_lock_irqsave(&func_buf_lock, flags);
+               q = func_table[i];
++
++              /* fj pointer to next entry after 'q' */
+               first_free = funcbufptr + (funcbufsize - funcbufleft);
+               for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++)
+                       ;
+@@ -2014,10 +2023,12 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user 
*user_kdgkb, int perm)
+                       fj = func_table[j];
+               else
+                       fj = first_free;
+-
++              /* buffer usage increase by new entry */
+               delta = (q ? -strlen(q) : 1) + strlen(kbs->kb_string);
++
+               if (delta <= funcbufleft) {     /* it fits in current buf */
+                   if (j < MAX_NR_FUNC) {
++                      /* make enough space for new entry at 'fj' */
+                       memmove(fj + delta, fj, first_free - fj);
+                       for (k = j; k < MAX_NR_FUNC; k++)
+                           if (func_table[k])
+@@ -2030,20 +2041,28 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user 
*user_kdgkb, int perm)
+                   sz = 256;
+                   while (sz < funcbufsize - funcbufleft + delta)
+                     sz <<= 1;
+-                  fnw = kmalloc(sz, GFP_KERNEL);
+-                  if(!fnw) {
+-                    ret = -ENOMEM;
+-                    goto reterr;
++                  if (fnw_sz != sz) {
++                    spin_unlock_irqrestore(&func_buf_lock, flags);
++                    kfree(fnw);
++                    fnw = kmalloc(sz, GFP_KERNEL);
++                    fnw_sz = sz;
++                    if (!fnw) {
++                      ret = -ENOMEM;
++                      goto reterr;
++                    }
++                    goto again;
+                   }
+ 
+                   if (!q)
+                     func_table[i] = fj;
++                  /* copy data before insertion point to new location */
+                   if (fj > funcbufptr)
+                       memmove(fnw, funcbufptr, fj - funcbufptr);
+                   for (k = 0; k < j; k++)
+                     if (func_table[k])
+                       func_table[k] = fnw + (func_table[k] - funcbufptr);
+ 
++                  /* copy data after insertion point to new location */
+                   if (first_free > fj) {
+                       memmove(fnw + (fj - funcbufptr) + delta, fj, first_free 
- fj);
+                       for (k = j; k < MAX_NR_FUNC; k++)
+@@ -2056,7 +2075,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user 
*user_kdgkb, int perm)
+                   funcbufleft = funcbufleft - delta + sz - funcbufsize;
+                   funcbufsize = sz;
+               }
++              /* finally insert item itself */
+               strcpy(func_table[i], kbs->kb_string);
++              spin_unlock_irqrestore(&func_buf_lock, flags);
+               break;
+       }
+       ret = 0;
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 6ff921cf9a9e..06761fcedeff 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -3840,8 +3840,6 @@ void do_blank_screen(int entering_gfx)
+               return;
+       }
+ 
+-      if (blank_state != blank_normal_wait)
+-              return;
+       blank_state = blank_off;
+ 
+       /* don't blank graphics */
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 0531cb9a3ba9..196503d8c993 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -1452,8 +1452,8 @@ int btrfs_find_all_roots(struct btrfs_trans_handle 
*trans,
+  * callers (such as fiemap) which want to know whether the extent is
+  * shared but do not need a ref count.
+  *
+- * This attempts to allocate a transaction in order to account for
+- * delayed refs, but continues on even when the alloc fails.
++ * This attempts to attach to the running transaction in order to account for
++ * delayed refs, but continues on even when no running transaction exists.
+  *
+  * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
+  */
+@@ -1476,13 +1476,16 @@ int btrfs_check_shared(struct btrfs_root *root, u64 
inum, u64 bytenr)
+       tmp = ulist_alloc(GFP_NOFS);
+       roots = ulist_alloc(GFP_NOFS);
+       if (!tmp || !roots) {
+-              ulist_free(tmp);
+-              ulist_free(roots);
+-              return -ENOMEM;
++              ret = -ENOMEM;
++              goto out;
+       }
+ 
+-      trans = btrfs_join_transaction(root);
++      trans = btrfs_attach_transaction(root);
+       if (IS_ERR(trans)) {
++              if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
++                      ret = PTR_ERR(trans);
++                      goto out;
++              }
+               trans = NULL;
+               down_read(&fs_info->commit_root_sem);
+       } else {
+@@ -1515,6 +1518,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 
inum, u64 bytenr)
+       } else {
+               up_read(&fs_info->commit_root_sem);
+       }
++out:
+       ulist_free(tmp);
+       ulist_free(roots);
+       return ret;
+@@ -1903,13 +1907,19 @@ int iterate_extent_inodes(struct btrfs_fs_info 
*fs_info,
+                       extent_item_objectid);
+ 
+       if (!search_commit_root) {
+-              trans = btrfs_join_transaction(fs_info->extent_root);
+-              if (IS_ERR(trans))
+-                      return PTR_ERR(trans);
++              trans = btrfs_attach_transaction(fs_info->extent_root);
++              if (IS_ERR(trans)) {
++                      if (PTR_ERR(trans) != -ENOENT &&
++                          PTR_ERR(trans) != -EROFS)
++                              return PTR_ERR(trans);
++                      trans = NULL;
++              }
++      }
++
++      if (trans)
+               btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
+-      } else {
++      else
+               down_read(&fs_info->commit_root_sem);
+-      }
+ 
+       ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
+                                  tree_mod_seq_elem.seq, &refs,
+@@ -1941,7 +1951,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
+ 
+       free_leaf_list(refs);
+ out:
+-      if (!search_commit_root) {
++      if (trans) {
+               btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
+               btrfs_end_transaction(trans);
+       } else {
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 5592b7726241..01f44364c547 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -1047,6 +1047,7 @@ static int ext4_ext_split(handle_t *handle, struct inode 
*inode,
+       __le32 border;
+       ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
+       int err = 0;
++      size_t ext_size = 0;
+ 
+       /* make decision: where to split? */
+       /* FIXME: now decision is simplest: at current extent */
+@@ -1138,6 +1139,10 @@ static int ext4_ext_split(handle_t *handle, struct 
inode *inode,
+               le16_add_cpu(&neh->eh_entries, m);
+       }
+ 
++      /* zero out unused area in the extent block */
++      ext_size = sizeof(struct ext4_extent_header) +
++              sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries);
++      memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
+       ext4_extent_block_csum_set(inode, neh);
+       set_buffer_uptodate(bh);
+       unlock_buffer(bh);
+@@ -1217,6 +1222,11 @@ static int ext4_ext_split(handle_t *handle, struct 
inode *inode,
+                               sizeof(struct ext4_extent_idx) * m);
+                       le16_add_cpu(&neh->eh_entries, m);
+               }
++              /* zero out unused area in the extent block */
++              ext_size = sizeof(struct ext4_extent_header) +
++                 (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries));
++              memset(bh->b_data + ext_size, 0,
++                      inode->i_sb->s_blocksize - ext_size);
+               ext4_extent_block_csum_set(inode, neh);
+               set_buffer_uptodate(bh);
+               unlock_buffer(bh);
+@@ -1282,6 +1292,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, 
struct inode *inode,
+       ext4_fsblk_t newblock, goal = 0;
+       struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
+       int err = 0;
++      size_t ext_size = 0;
+ 
+       /* Try to prepend new index to old one */
+       if (ext_depth(inode))
+@@ -1307,9 +1318,11 @@ static int ext4_ext_grow_indepth(handle_t *handle, 
struct inode *inode,
+               goto out;
+       }
+ 
++      ext_size = sizeof(EXT4_I(inode)->i_data);
+       /* move top-level index/leaf into new block */
+-      memmove(bh->b_data, EXT4_I(inode)->i_data,
+-              sizeof(EXT4_I(inode)->i_data));
++      memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
++      /* zero out unused area in the extent block */
++      memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
+ 
+       /* set size of new block */
+       neh = ext_block_hdr(bh);
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 1913c69498c1..44966b272216 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -262,6 +262,13 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter 
*from)
+       }
+ 
+       ret = __generic_file_write_iter(iocb, from);
++      /*
++       * Unaligned direct AIO must be the only IO in flight. Otherwise
++       * overlapping aligned IO after unaligned might result in data
++       * corruption.
++       */
++      if (ret == -EIOCBQUEUED && unaligned_aio)
++              ext4_unwritten_wait(inode);
+       inode_unlock(inode);
+ 
+       if (ret > 0)
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 5eb28dcaa0f0..d3ef946df585 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -5818,7 +5818,7 @@ int ext4_expand_extra_isize(struct inode *inode,
+ 
+       ext4_write_lock_xattr(inode, &no_expand);
+ 
+-      BUFFER_TRACE(iloc.bh, "get_write_access");
++      BUFFER_TRACE(iloc->bh, "get_write_access");
+       error = ext4_journal_get_write_access(handle, iloc->bh);
+       if (error) {
+               brelse(iloc->bh);
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index 3dbf4e414706..d2efc0cb8f31 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -918,7 +918,7 @@ group_add_out:
+               if (err == 0)
+                       err = err2;
+               mnt_drop_write_file(filp);
+-              if (!err && (o_group > EXT4_SB(sb)->s_groups_count) &&
++              if (!err && (o_group < EXT4_SB(sb)->s_groups_count) &&
+                   ext4_has_group_desc_csum(sb) &&
+                   test_opt(sb, INIT_INODE_TABLE))
+                       err = ext4_register_li_request(sb, o_group);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 1792999eec91..3ba9a4ae4eac 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1555,7 +1555,7 @@ static int mb_find_extent(struct ext4_buddy *e4b, int 
block,
+               ex->fe_len += 1 << order;
+       }
+ 
+-      if (ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))) {
++      if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
+               /* Should never happen! (but apparently sometimes does?!?) */
+               WARN_ON(1);
+               ext4_error(e4b->bd_sb, "corruption or bug in mb_find_extent "
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 4e301b0cdfb5..070660cb5b91 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -870,12 +870,15 @@ static void dx_release(struct dx_frame *frames)
+ {
+       struct dx_root_info *info;
+       int i;
++      unsigned int indirect_levels;
+ 
+       if (frames[0].bh == NULL)
+               return;
+ 
+       info = &((struct dx_root *)frames[0].bh->b_data)->info;
+-      for (i = 0; i <= info->indirect_levels; i++) {
++      /* save local copy, "info" may be freed after brelse() */
++      indirect_levels = info->indirect_levels;
++      for (i = 0; i <= indirect_levels; i++) {
+               if (frames[i].bh == NULL)
+                       break;
+               brelse(frames[i].bh);
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 333fba05e1a5..4f7cd78d0364 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -849,6 +849,7 @@ static int add_new_gdb(handle_t *handle, struct inode 
*inode,
+       err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
+       if (unlikely(err)) {
+               ext4_std_error(sb, err);
++              iloc.bh = NULL;
+               goto errout;
+       }
+       brelse(dind);
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index d0049064f62f..61d07608577e 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3454,6 +3454,37 @@ int ext4_calculate_overhead(struct super_block *sb)
+       return 0;
+ }
+ 
++static void ext4_clamp_want_extra_isize(struct super_block *sb)
++{
++      struct ext4_sb_info *sbi = EXT4_SB(sb);
++      struct ext4_super_block *es = sbi->s_es;
++
++      /* determine the minimum size of new large inodes, if present */
++      if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
++          sbi->s_want_extra_isize == 0) {
++              sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
++                                                   EXT4_GOOD_OLD_INODE_SIZE;
++              if (ext4_has_feature_extra_isize(sb)) {
++                      if (sbi->s_want_extra_isize <
++                          le16_to_cpu(es->s_want_extra_isize))
++                              sbi->s_want_extra_isize =
++                                      le16_to_cpu(es->s_want_extra_isize);
++                      if (sbi->s_want_extra_isize <
++                          le16_to_cpu(es->s_min_extra_isize))
++                              sbi->s_want_extra_isize =
++                                      le16_to_cpu(es->s_min_extra_isize);
++              }
++      }
++      /* Check if enough inode space is available */
++      if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
++                                                      sbi->s_inode_size) {
++              sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
++                                                     EXT4_GOOD_OLD_INODE_SIZE;
++              ext4_msg(sb, KERN_INFO,
++                       "required extra inode space not available");
++      }
++}
++
+ static void ext4_set_resv_clusters(struct super_block *sb)
+ {
+       ext4_fsblk_t resv_clusters;
+@@ -4178,7 +4209,7 @@ static int ext4_fill_super(struct super_block *sb, void 
*data, int silent)
+                                "data=, fs mounted w/o journal");
+                       goto failed_mount_wq;
+               }
+-              sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM;
++              sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
+               clear_opt(sb, JOURNAL_CHECKSUM);
+               clear_opt(sb, DATA_FLAGS);
+               sbi->s_journal = NULL;
+@@ -4320,30 +4351,7 @@ no_journal:
+       if (ext4_setup_super(sb, es, sb_rdonly(sb)))
+               sb->s_flags |= MS_RDONLY;
+ 
+-      /* determine the minimum size of new large inodes, if present */
+-      if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
+-          sbi->s_want_extra_isize == 0) {
+-              sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
+-                                                   EXT4_GOOD_OLD_INODE_SIZE;
+-              if (ext4_has_feature_extra_isize(sb)) {
+-                      if (sbi->s_want_extra_isize <
+-                          le16_to_cpu(es->s_want_extra_isize))
+-                              sbi->s_want_extra_isize =
+-                                      le16_to_cpu(es->s_want_extra_isize);
+-                      if (sbi->s_want_extra_isize <
+-                          le16_to_cpu(es->s_min_extra_isize))
+-                              sbi->s_want_extra_isize =
+-                                      le16_to_cpu(es->s_min_extra_isize);
+-              }
+-      }
+-      /* Check if enough inode space is available */
+-      if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
+-                                                      sbi->s_inode_size) {
+-              sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
+-                                                     EXT4_GOOD_OLD_INODE_SIZE;
+-              ext4_msg(sb, KERN_INFO, "required extra inode space not"
+-                       "available");
+-      }
++      ext4_clamp_want_extra_isize(sb);
+ 
+       ext4_set_resv_clusters(sb);
+ 
+@@ -5128,6 +5136,8 @@ static int ext4_remount(struct super_block *sb, int 
*flags, char *data)
+               goto restore_opts;
+       }
+ 
++      ext4_clamp_want_extra_isize(sb);
++
+       if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
+           test_opt(sb, JOURNAL_CHECKSUM)) {
+               ext4_msg(sb, KERN_ERR, "changing journal_checksum "
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 6761e905cab0..f2fde3ac8698 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1698,7 +1698,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info 
*i,
+ 
+       /* No failures allowed past this point. */
+ 
+-      if (!s->not_found && here->e_value_size && here->e_value_offs) {
++      if (!s->not_found && here->e_value_size && !here->e_value_inum) {
+               /* Remove the old value. */
+               void *first_val = s->base + min_offs;
+               size_t offs = le16_to_cpu(here->e_value_offs);
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 6a76616c9401..4d561ee08d05 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -530,8 +530,6 @@ static void inode_switch_wbs(struct inode *inode, int 
new_wb_id)
+ 
+       isw->inode = inode;
+ 
+-      atomic_inc(&isw_nr_in_flight);
+-
+       /*
+        * In addition to synchronizing among switchers, I_WB_SWITCH tells
+        * the RCU protected stat update paths to grab the mapping's
+@@ -539,6 +537,9 @@ static void inode_switch_wbs(struct inode *inode, int 
new_wb_id)
+        * Let's continue after I_WB_SWITCH is guaranteed to be visible.
+        */
+       call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
++
++      atomic_inc(&isw_nr_in_flight);
++
+       goto out_unlock;
+ 
+ out_free:
+@@ -908,7 +909,11 @@ restart:
+ void cgroup_writeback_umount(void)
+ {
+       if (atomic_read(&isw_nr_in_flight)) {
+-              synchronize_rcu();
++              /*
++               * Use rcu_barrier() to wait for all pending callbacks to
++               * ensure that all in-flight wb switches are in the workqueue.
++               */
++              rcu_barrier();
+               flush_workqueue(isw_wq);
+       }
+ }
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 0c8f77db60e2..593f3e31fb21 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -1353,6 +1353,10 @@ static int jbd2_write_superblock(journal_t *journal, 
int write_flags)
+       journal_superblock_t *sb = journal->j_superblock;
+       int ret;
+ 
++      /* Buffer got discarded which means block device got invalidated */
++      if (!buffer_mapped(bh))
++              return -EIO;
++
+       trace_jbd2_write_superblock(journal, write_flags);
+       if (!(journal->j_flags & JBD2_BARRIER))
+               write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
+diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
+index 4bf8d5854b27..af2888d23de3 100644
+--- a/fs/ocfs2/export.c
++++ b/fs/ocfs2/export.c
+@@ -148,16 +148,24 @@ static struct dentry *ocfs2_get_parent(struct dentry 
*child)
+       u64 blkno;
+       struct dentry *parent;
+       struct inode *dir = d_inode(child);
++      int set;
+ 
+       trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name,
+                              (unsigned long long)OCFS2_I(dir)->ip_blkno);
+ 
++      status = ocfs2_nfs_sync_lock(OCFS2_SB(dir->i_sb), 1);
++      if (status < 0) {
++              mlog(ML_ERROR, "getting nfs sync lock(EX) failed %d\n", status);
++              parent = ERR_PTR(status);
++              goto bail;
++      }
++
+       status = ocfs2_inode_lock(dir, NULL, 0);
+       if (status < 0) {
+               if (status != -ENOENT)
+                       mlog_errno(status);
+               parent = ERR_PTR(status);
+-              goto bail;
++              goto unlock_nfs_sync;
+       }
+ 
+       status = ocfs2_lookup_ino_from_name(dir, "..", 2, &blkno);
+@@ -166,11 +174,31 @@ static struct dentry *ocfs2_get_parent(struct dentry 
*child)
+               goto bail_unlock;
+       }
+ 
++      status = ocfs2_test_inode_bit(OCFS2_SB(dir->i_sb), blkno, &set);
++      if (status < 0) {
++              if (status == -EINVAL) {
++                      status = -ESTALE;
++              } else
++                      mlog(ML_ERROR, "test inode bit failed %d\n", status);
++              parent = ERR_PTR(status);
++              goto bail_unlock;
++      }
++
++      trace_ocfs2_get_dentry_test_bit(status, set);
++      if (!set) {
++              status = -ESTALE;
++              parent = ERR_PTR(status);
++              goto bail_unlock;
++      }
++
+       parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0));
+ 
+ bail_unlock:
+       ocfs2_inode_unlock(dir, 0);
+ 
++unlock_nfs_sync:
++      ocfs2_nfs_sync_unlock(OCFS2_SB(dir->i_sb), 1);
++
+ bail:
+       trace_ocfs2_get_parent_end(parent);
+ 
+diff --git a/include/linux/list.h b/include/linux/list.h
+index 4b129df4d46b..de04cc5ed536 100644
+--- a/include/linux/list.h
++++ b/include/linux/list.h
+@@ -285,6 +285,36 @@ static inline void list_cut_position(struct list_head 
*list,
+               __list_cut_position(list, head, entry);
+ }
+ 
++/**
++ * list_cut_before - cut a list into two, before given entry
++ * @list: a new list to add all removed entries
++ * @head: a list with entries
++ * @entry: an entry within head, could be the head itself
++ *
++ * This helper moves the initial part of @head, up to but
++ * excluding @entry, from @head to @list.  You should pass
++ * in @entry an element you know is on @head.  @list should
++ * be an empty list or a list you do not care about losing
++ * its data.
++ * If @entry == @head, all entries on @head are moved to
++ * @list.
++ */
++static inline void list_cut_before(struct list_head *list,
++                                 struct list_head *head,
++                                 struct list_head *entry)
++{
++      if (head->next == entry) {
++              INIT_LIST_HEAD(list);
++              return;
++      }
++      list->next = head->next;
++      list->next->prev = list;
++      list->prev = entry->prev;
++      list->prev->next = list;
++      head->next = entry;
++      entry->prev = head;
++}
++
+ static inline void __list_splice(const struct list_head *list,
+                                struct list_head *prev,
+                                struct list_head *next)
+diff --git a/include/linux/mfd/da9063/registers.h 
b/include/linux/mfd/da9063/registers.h
+index 5d42859cb441..844fc2973392 100644
+--- a/include/linux/mfd/da9063/registers.h
++++ b/include/linux/mfd/da9063/registers.h
+@@ -215,9 +215,9 @@
+ 
+ /* DA9063 Configuration registers */
+ /* OTP */
+-#define       DA9063_REG_OPT_COUNT            0x101
+-#define       DA9063_REG_OPT_ADDR             0x102
+-#define       DA9063_REG_OPT_DATA             0x103
++#define       DA9063_REG_OTP_CONT             0x101
++#define       DA9063_REG_OTP_ADDR             0x102
++#define       DA9063_REG_OTP_DATA             0x103
+ 
+ /* Customer Trim and Configuration */
+ #define       DA9063_REG_T_OFFSET             0x104
+diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h
+index ad2a9a852aea..b4fd5a7c2aaa 100644
+--- a/include/linux/mfd/max77620.h
++++ b/include/linux/mfd/max77620.h
+@@ -136,8 +136,8 @@
+ #define MAX77620_FPS_PERIOD_MIN_US            40
+ #define MAX20024_FPS_PERIOD_MIN_US            20
+ 
+-#define MAX77620_FPS_PERIOD_MAX_US            2560
+-#define MAX20024_FPS_PERIOD_MAX_US            5120
++#define MAX20024_FPS_PERIOD_MAX_US            2560
++#define MAX77620_FPS_PERIOD_MAX_US            5120
+ 
+ #define MAX77620_REG_FPS_GPIO1                        0x54
+ #define MAX77620_REG_FPS_GPIO2                        0x55
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 6d6ce2c3a364..a5bb8fad5475 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -790,6 +790,15 @@ static void mm_init_aio(struct mm_struct *mm)
+ #endif
+ }
+ 
++static __always_inline void mm_clear_owner(struct mm_struct *mm,
++                                         struct task_struct *p)
++{
++#ifdef CONFIG_MEMCG
++      if (mm->owner == p)
++              WRITE_ONCE(mm->owner, NULL);
++#endif
++}
++
+ static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
+ {
+ #ifdef CONFIG_MEMCG
+@@ -1211,6 +1220,7 @@ static struct mm_struct *dup_mm(struct task_struct *tsk)
+ free_pt:
+       /* don't put binfmt in mmput, we haven't got module yet */
+       mm->binfmt = NULL;
++      mm_init_owner(mm, NULL);
+       mmput(mm);
+ 
+ fail_nomem:
+@@ -1528,6 +1538,21 @@ static inline void rcu_copy_process(struct task_struct 
*p)
+ #endif /* #ifdef CONFIG_TASKS_RCU */
+ }
+ 
++static void __delayed_free_task(struct rcu_head *rhp)
++{
++      struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
++
++      free_task(tsk);
++}
++
++static __always_inline void delayed_free_task(struct task_struct *tsk)
++{
++      if (IS_ENABLED(CONFIG_MEMCG))
++              call_rcu(&tsk->rcu, __delayed_free_task);
++      else
++              free_task(tsk);
++}
++
+ /*
+  * This creates a new process as a copy of the old one,
+  * but does not actually start it yet.
+@@ -1960,8 +1985,10 @@ bad_fork_cleanup_io:
+ bad_fork_cleanup_namespaces:
+       exit_task_namespaces(p);
+ bad_fork_cleanup_mm:
+-      if (p->mm)
++      if (p->mm) {
++              mm_clear_owner(p->mm, p);
+               mmput(p->mm);
++      }
+ bad_fork_cleanup_signal:
+       if (!(clone_flags & CLONE_THREAD))
+               free_signal_struct(p->signal);
+@@ -1992,7 +2019,7 @@ bad_fork_cleanup_count:
+ bad_fork_free:
+       p->state = TASK_DEAD;
+       put_task_stack(p);
+-      free_task(p);
++      delayed_free_task(p);
+ fork_out:
+       return ERR_PTR(retval);
+ }
+diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
+index c75017326c37..3f5be624c764 100644
+--- a/kernel/locking/rwsem-xadd.c
++++ b/kernel/locking/rwsem-xadd.c
+@@ -130,6 +130,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
+ {
+       struct rwsem_waiter *waiter, *tmp;
+       long oldcount, woken = 0, adjustment = 0;
++      struct list_head wlist;
+ 
+       /*
+        * Take a peek at the queue head waiter such that we can determine
+@@ -188,18 +189,42 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
+        * of the queue. We know that woken will be at least 1 as we accounted
+        * for above. Note we increment the 'active part' of the count by the
+        * number of readers before waking any processes up.
++       *
++       * We have to do wakeup in 2 passes to prevent the possibility that
++       * the reader count may be decremented before it is incremented. It
++       * is because the to-be-woken waiter may not have slept yet. So it
++       * may see waiter->task got cleared, finish its critical section and
++       * do an unlock before the reader count increment.
++       *
++       * 1) Collect the read-waiters in a separate list, count them and
++       *    fully increment the reader count in rwsem.
++       * 2) For each waiters in the new list, clear waiter->task and
++       *    put them into wake_q to be woken up later.
+        */
+-      list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
+-              struct task_struct *tsk;
+-
++      list_for_each_entry(waiter, &sem->wait_list, list) {
+               if (waiter->type == RWSEM_WAITING_FOR_WRITE)
+                       break;
+ 
+               woken++;
+-              tsk = waiter->task;
++      }
++      list_cut_before(&wlist, &sem->wait_list, &waiter->list);
++
++      adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
++      if (list_empty(&sem->wait_list)) {
++              /* hit end of list above */
++              adjustment -= RWSEM_WAITING_BIAS;
++      }
++
++      if (adjustment)
++              atomic_long_add(adjustment, &sem->count);
++
++      /* 2nd pass */
++      list_for_each_entry_safe(waiter, tmp, &wlist, list) {
++              struct task_struct *tsk;
+ 
++              tsk = waiter->task;
+               get_task_struct(tsk);
+-              list_del(&waiter->list);
++
+               /*
+                * Ensure calling get_task_struct() before setting the reader
+                * waiter to nil such that rwsem_down_read_failed() cannot
+@@ -215,15 +240,6 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
+               /* wake_q_add() already take the task ref */
+               put_task_struct(tsk);
+       }
+-
+-      adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
+-      if (list_empty(&sem->wait_list)) {
+-              /* hit end of list above */
+-              adjustment -= RWSEM_WAITING_BIAS;
+-      }
+-
+-      if (adjustment)
+-              atomic_long_add(adjustment, &sem->count);
+ }
+ 
+ /*
+diff --git a/lib/iov_iter.c b/lib/iov_iter.c
+index 1c1c06ddc20a..7b2fd5f251f2 100644
+--- a/lib/iov_iter.c
++++ b/lib/iov_iter.c
+@@ -687,8 +687,21 @@ EXPORT_SYMBOL(_copy_from_iter_full_nocache);
+ 
+ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
+ {
+-      struct page *head = compound_head(page);
+-      size_t v = n + offset + page_address(page) - page_address(head);
++      struct page *head;
++      size_t v = n + offset;
++
++      /*
++       * The general case needs to access the page order in order
++       * to compute the page size.
++       * However, we mostly deal with order-0 pages and thus can
++       * avoid a possible cache line miss for requests that fit all
++       * page orders.
++       */
++      if (n <= v && v <= PAGE_SIZE)
++              return true;
++
++      head = compound_head(page);
++      v += (page - head) << PAGE_SHIFT;
+ 
+       if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
+               return true;
+diff --git a/mm/mincore.c b/mm/mincore.c
+index fc37afe226e6..2732c8c0764c 100644
+--- a/mm/mincore.c
++++ b/mm/mincore.c
+@@ -169,6 +169,22 @@ out:
+       return 0;
+ }
+ 
++static inline bool can_do_mincore(struct vm_area_struct *vma)
++{
++      if (vma_is_anonymous(vma))
++              return true;
++      if (!vma->vm_file)
++              return false;
++      /*
++       * Reveal pagecache information only for non-anonymous mappings that
++       * correspond to the files the calling process could (if tried) open
++       * for writing; otherwise we'd be including shared non-exclusive
++       * mappings, which opens a side channel.
++       */
++      return inode_owner_or_capable(file_inode(vma->vm_file)) ||
++              inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0;
++}
++
+ /*
+  * Do a chunk of "sys_mincore()". We've already checked
+  * all the arguments, we hold the mmap semaphore: we should
+@@ -189,8 +205,13 @@ static long do_mincore(unsigned long addr, unsigned long 
pages, unsigned char *v
+       vma = find_vma(current->mm, addr);
+       if (!vma || addr < vma->vm_start)
+               return -ENOMEM;
+-      mincore_walk.mm = vma->vm_mm;
+       end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
++      if (!can_do_mincore(vma)) {
++              unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
++              memset(vec, 1, pages);
++              return pages;
++      }
++      mincore_walk.mm = vma->vm_mm;
+       err = walk_page_range(addr, end, &mincore_walk);
+       if (err < 0)
+               return err;
+diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
+index 5229d1cf51fd..b08c69730a72 100644
+--- a/net/core/fib_rules.c
++++ b/net/core/fib_rules.c
+@@ -564,6 +564,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr 
*nlh,
+       }
+ 
+       if (rule_exists(ops, frh, tb, rule)) {
++              err = 0;
+               if (nlh->nlmsg_flags & NLM_F_EXCL)
+                       err = -EEXIST;
+               goto errout_free;
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index ffb6aba71998..f5803f9bba9b 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1549,9 +1549,11 @@ static bool hdmi_present_sense_via_verbs(struct 
hdmi_spec_per_pin *per_pin,
+       ret = !repoll || !eld->monitor_present || eld->eld_valid;
+ 
+       jack = snd_hda_jack_tbl_get(codec, pin_nid);
+-      if (jack)
++      if (jack) {
+               jack->block_report = !ret;
+-
++              jack->pin_sense = (eld->monitor_present && eld->eld_valid) ?
++                      AC_PINSENSE_PRESENCE : 0;
++      }
+       mutex_unlock(&per_pin->lock);
+       return ret;
+ }
+@@ -1661,6 +1663,11 @@ static void hdmi_repoll_eld(struct work_struct *work)
+       container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work);
+       struct hda_codec *codec = per_pin->codec;
+       struct hdmi_spec *spec = codec->spec;
++      struct hda_jack_tbl *jack;
++
++      jack = snd_hda_jack_tbl_get(codec, per_pin->pin_nid);
++      if (jack)
++              jack->jack_dirty = 1;
+ 
+       if (per_pin->repoll_count++ > 6)
+               per_pin->repoll_count = 0;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f44d08fe20fc..dc62c800cba4 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -781,11 +781,10 @@ static int alc_init(struct hda_codec *codec)
+       if (spec->init_hook)
+               spec->init_hook(codec);
+ 
++      snd_hda_gen_init(codec);
+       alc_fix_pll(codec);
+       alc_auto_init_amp(codec, spec->init_amp);
+ 
+-      snd_hda_gen_init(codec);
+-
+       snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
+ 
+       return 0;
+@@ -6551,7 +6550,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", 
ALC294_FIXUP_LENOVO_MIC_LOCATION),
+       SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", 
ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+       SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+-      SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
++      SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", 
ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+       SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", 
ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
+       SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", 
ALC292_FIXUP_TPT440_DOCK),
+diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
+index 13bcfb1ef9b4..cc66ea5cc776 100644
+--- a/sound/soc/codecs/max98090.c
++++ b/sound/soc/codecs/max98090.c
+@@ -1209,14 +1209,14 @@ static const struct snd_soc_dapm_widget 
max98090_dapm_widgets[] = {
+               &max98090_right_rcv_mixer_controls[0],
+               ARRAY_SIZE(max98090_right_rcv_mixer_controls)),
+ 
+-      SND_SOC_DAPM_MUX("LINMOD Mux", M98090_REG_LOUTR_MIXER,
+-              M98090_LINMOD_SHIFT, 0, &max98090_linmod_mux),
++      SND_SOC_DAPM_MUX("LINMOD Mux", SND_SOC_NOPM, 0, 0,
++              &max98090_linmod_mux),
+ 
+-      SND_SOC_DAPM_MUX("MIXHPLSEL Mux", M98090_REG_HP_CONTROL,
+-              M98090_MIXHPLSEL_SHIFT, 0, &max98090_mixhplsel_mux),
++      SND_SOC_DAPM_MUX("MIXHPLSEL Mux", SND_SOC_NOPM, 0, 0,
++              &max98090_mixhplsel_mux),
+ 
+-      SND_SOC_DAPM_MUX("MIXHPRSEL Mux", M98090_REG_HP_CONTROL,
+-              M98090_MIXHPRSEL_SHIFT, 0, &max98090_mixhprsel_mux),
++      SND_SOC_DAPM_MUX("MIXHPRSEL Mux", SND_SOC_NOPM, 0, 0,
++              &max98090_mixhprsel_mux),
+ 
+       SND_SOC_DAPM_PGA("HP Left Out", M98090_REG_OUTPUT_ENABLE,
+               M98090_HPLEN_SHIFT, 0, NULL, 0),
+diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c
+index bd51f3655ee3..06abcd017650 100644
+--- a/sound/soc/codecs/rt5677-spi.c
++++ b/sound/soc/codecs/rt5677-spi.c
+@@ -58,13 +58,15 @@ static DEFINE_MUTEX(spi_mutex);
+  * RT5677_SPI_READ/WRITE_32:  Transfer 4 bytes
+  * RT5677_SPI_READ/WRITE_BURST:       Transfer any multiples of 8 bytes
+  *
+- * For example, reading 260 bytes at 0x60030002 uses the following commands:
+- * 0x60030002 RT5677_SPI_READ_16      2 bytes
++ * Note:
++ * 16 Bit writes and reads are restricted to the address range
++ * 0x18020000 ~ 0x18021000
++ *
++ * For example, reading 256 bytes at 0x60030004 uses the following commands:
+  * 0x60030004 RT5677_SPI_READ_32      4 bytes
+  * 0x60030008 RT5677_SPI_READ_BURST   240 bytes
+  * 0x600300F8 RT5677_SPI_READ_BURST   8 bytes
+  * 0x60030100 RT5677_SPI_READ_32      4 bytes
+- * 0x60030104 RT5677_SPI_READ_16      2 bytes
+  *
+  * Input:
+  * @read: true for read commands; false for write commands
+@@ -79,15 +81,13 @@ static u8 rt5677_spi_select_cmd(bool read, u32 align, u32 
remain, u32 *len)
+ {
+       u8 cmd;
+ 
+-      if (align == 2 || align == 6 || remain == 2) {
+-              cmd = RT5677_SPI_READ_16;
+-              *len = 2;
+-      } else if (align == 4 || remain <= 6) {
++      if (align == 4 || remain <= 4) {
+               cmd = RT5677_SPI_READ_32;
+               *len = 4;
+       } else {
+               cmd = RT5677_SPI_READ_BURST;
+-              *len = min_t(u32, remain & ~7, RT5677_SPI_BURST_LEN);
++              *len = (((remain - 1) >> 3) + 1) << 3;
++              *len = min_t(u32, *len, RT5677_SPI_BURST_LEN);
+       }
+       return read ? cmd : cmd + 1;
+ }
+@@ -108,7 +108,7 @@ static void rt5677_spi_reverse(u8 *dst, u32 dstlen, const 
u8 *src, u32 srclen)
+       }
+ }
+ 
+-/* Read DSP address space using SPI. addr and len have to be 2-byte aligned. 
*/
++/* Read DSP address space using SPI. addr and len have to be 4-byte aligned. 
*/
+ int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
+ {
+       u32 offset;
+@@ -124,7 +124,7 @@ int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
+       if (!g_spi)
+               return -ENODEV;
+ 
+-      if ((addr & 1) || (len & 1)) {
++      if ((addr & 3) || (len & 3)) {
+               dev_err(&g_spi->dev, "Bad read align 0x%x(%zu)\n", addr, len);
+               return -EACCES;
+       }
+@@ -159,13 +159,13 @@ int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
+ }
+ EXPORT_SYMBOL_GPL(rt5677_spi_read);
+ 
+-/* Write DSP address space using SPI. addr has to be 2-byte aligned.
+- * If len is not 2-byte aligned, an extra byte of zero is written at the end
++/* Write DSP address space using SPI. addr has to be 4-byte aligned.
++ * If len is not 4-byte aligned, then extra zeros are written at the end
+  * as padding.
+  */
+ int rt5677_spi_write(u32 addr, const void *txbuf, size_t len)
+ {
+-      u32 offset, len_with_pad = len;
++      u32 offset;
+       int status = 0;
+       struct spi_transfer t;
+       struct spi_message m;
+@@ -178,22 +178,19 @@ int rt5677_spi_write(u32 addr, const void *txbuf, size_t 
len)
+       if (!g_spi)
+               return -ENODEV;
+ 
+-      if (addr & 1) {
++      if (addr & 3) {
+               dev_err(&g_spi->dev, "Bad write align 0x%x(%zu)\n", addr, len);
+               return -EACCES;
+       }
+ 
+-      if (len & 1)
+-              len_with_pad = len + 1;
+-
+       memset(&t, 0, sizeof(t));
+       t.tx_buf = buf;
+       t.speed_hz = RT5677_SPI_FREQ;
+       spi_message_init_with_transfers(&m, &t, 1);
+ 
+-      for (offset = 0; offset < len_with_pad;) {
++      for (offset = 0; offset < len;) {
+               spi_cmd = rt5677_spi_select_cmd(false, (addr + offset) & 7,
+-                              len_with_pad - offset, &t.len);
++                              len - offset, &t.len);
+ 
+               /* Construct SPI message header */
+               buf[0] = spi_cmd;
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index b3be0d432a75..3d93e33b3485 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -2184,6 +2184,8 @@ static int parse_audio_selector_unit(struct mixer_build 
*state, int unitid,
+       kctl = snd_ctl_new1(&mixer_selectunit_ctl, cval);
+       if (! kctl) {
+               usb_audio_err(state->chip, "cannot malloc kcontrol\n");
++              for (i = 0; i < desc->bNrInPins; i++)
++                      kfree(namelist[i]);
+               kfree(namelist);
+               kfree(cval);
+               return -ENOMEM;
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 3ff025b64527..ae3446768181 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -1779,7 +1779,8 @@ static int validate_branch(struct objtool_file *file, 
struct instruction *first,
+                       return 1;
+               }
+ 
+-              func = insn->func ? insn->func->pfunc : NULL;
++              if (insn->func)
++                      func = insn->func->pfunc;
+ 
+               if (func && insn->ignore) {
+                       WARN_FUNC("BUG: why am I validating an ignored 
function?",

Reply via email to