commit:     9a00dcdc364ac5b848d781e0c072bdef6dd84956
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Mar 23 11:57:36 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Mar 23 11:57:36 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9a00dcdc

Linux patch 4.19.236

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1235_linux-4.19.236.patch | 2743 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2747 insertions(+)

diff --git a/0000_README b/0000_README
index 97b02019..0e8f7f4a 100644
--- a/0000_README
+++ b/0000_README
@@ -979,6 +979,10 @@ Patch:  1234_linux-4.19.235.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.19.235
 
+Patch:  1235_linux-4.19.236.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.19.236
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1235_linux-4.19.236.patch b/1235_linux-4.19.236.patch
new file mode 100644
index 00000000..ab82a60a
--- /dev/null
+++ b/1235_linux-4.19.236.patch
@@ -0,0 +1,2743 @@
+diff --git a/Makefile b/Makefile
+index 6bfb0a18ee8ae..d83513c135206 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 235
++SUBLEVEL = 236
+ EXTRAVERSION =
+ NAME = "People's Front"
+ 
+diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
+index 2ff81f3736c85..e442bf7427ae1 100644
+--- a/arch/arm/boot/dts/rk3288.dtsi
++++ b/arch/arm/boot/dts/rk3288.dtsi
+@@ -957,7 +957,7 @@
+               status = "disabled";
+       };
+ 
+-      crypto: cypto-controller@ff8a0000 {
++      crypto: crypto@ff8a0000 {
+               compatible = "rockchip,rk3288-crypto";
+               reg = <0x0 0xff8a0000 0x0 0x4000>;
+               interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
+index ae073fceb3f05..db20534912cdd 100644
+--- a/arch/arm/include/asm/kvm_host.h
++++ b/arch/arm/include/asm/kvm_host.h
+@@ -26,6 +26,7 @@
+ #include <asm/kvm_asm.h>
+ #include <asm/kvm_mmio.h>
+ #include <asm/fpstate.h>
++#include <asm/spectre.h>
+ #include <kvm/arm_arch_timer.h>
+ 
+ #define __KVM_HAVE_ARCH_INTC_INITIALIZED
+@@ -367,4 +368,10 @@ void kvm_arch_free_vm(struct kvm *kvm);
+ 
+ #define kvm_arm_vcpu_loaded(vcpu)     (false)
+ 
++static inline int kvm_arm_get_spectre_bhb_state(void)
++{
++      /* 32bit guests don't need firmware for this */
++      return SPECTRE_VULNERABLE; /* aka SMCCC_RET_NOT_SUPPORTED */
++}
++
+ #endif /* __ARM_KVM_HOST_H__ */
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 1daefa57e2742..a101f5d2fbed4 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -979,6 +979,15 @@ config ARM64_SSBD
+ 
+         If unsure, say Y.
+ 
++config MITIGATE_SPECTRE_BRANCH_HISTORY
++      bool "Mitigate Spectre style attacks against branch history" if EXPERT
++      default y
++      help
++        Speculation attacks against some high-performance processors can
++        make use of branch history to influence future speculation.
++        When taking an exception from user-space, a sequence of branches
++        or a firmware call overwrites the branch history.
++
+ menuconfig ARMV8_DEPRECATED
+       bool "Emulate deprecated/obsolete ARMv8 instructions"
+       depends on COMPAT
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi 
b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+index b155f657292bd..ce1320e4c1060 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+@@ -468,6 +468,12 @@
+ };
+ 
+ &sdhci {
++      /*
++       * Signal integrity isn't great at 200MHz but 100MHz has proven stable
++       * enough.
++       */
++      max-frequency = <100000000>;
++
+       bus-width = <8>;
+       mmc-hs400-1_8v;
+       mmc-hs400-enhanced-strobe;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi 
b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+index f70c053326865..5a60faa8e9998 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+@@ -1686,10 +1686,10 @@
+               interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH 0>;
+               clocks = <&cru PCLK_HDMI_CTRL>,
+                        <&cru SCLK_HDMI_SFR>,
+-                       <&cru PLL_VPLL>,
++                       <&cru SCLK_HDMI_CEC>,
+                        <&cru PCLK_VIO_GRF>,
+-                       <&cru SCLK_HDMI_CEC>;
+-              clock-names = "iahb", "isfr", "vpll", "grf", "cec";
++                       <&cru PLL_VPLL>;
++              clock-names = "iahb", "isfr", "cec", "grf", "vpll";
+               power-domains = <&power RK3399_PD_HDCP>;
+               reg-io-width = <4>;
+               rockchip,grf = <&grf>;
+diff --git a/arch/arm64/include/asm/assembler.h 
b/arch/arm64/include/asm/assembler.h
+index 5a97ac8531682..fc3d26c954a40 100644
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -126,6 +126,13 @@
+       hint    #20
+       .endm
+ 
++/*
++ * Clear Branch History instruction
++ */
++      .macro clearbhb
++      hint    #22
++      .endm
++
+ /*
+  * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
+  * of bounds.
+@@ -711,4 +718,31 @@ USER(\label, ic   ivau, \tmp2)                    // 
invalidate I line PoU
+ .Lyield_out_\@ :
+       .endm
+ 
++      .macro __mitigate_spectre_bhb_loop      tmp
++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
++alternative_cb  spectre_bhb_patch_loop_iter
++      mov     \tmp, #32               // Patched to correct the immediate
++alternative_cb_end
++.Lspectre_bhb_loop\@:
++      b       . + 4
++      subs    \tmp, \tmp, #1
++      b.ne    .Lspectre_bhb_loop\@
++      dsb     nsh
++      isb
++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
++      .endm
++
++      /* Save/restores x0-x3 to the stack */
++      .macro __mitigate_spectre_bhb_fw
++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
++      stp     x0, x1, [sp, #-16]!
++      stp     x2, x3, [sp, #-16]!
++      mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_3
++alternative_cb        arm64_update_smccc_conduit
++      nop                                     // Patched to SMC/HVC #0
++alternative_cb_end
++      ldp     x2, x3, [sp], #16
++      ldp     x0, x1, [sp], #16
++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
++      .endm
+ #endif        /* __ASM_ASSEMBLER_H */
+diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
+index 88392272250e8..3a9908a012190 100644
+--- a/arch/arm64/include/asm/cpu.h
++++ b/arch/arm64/include/asm/cpu.h
+@@ -36,6 +36,7 @@ struct cpuinfo_arm64 {
+       u64             reg_id_aa64dfr1;
+       u64             reg_id_aa64isar0;
+       u64             reg_id_aa64isar1;
++      u64             reg_id_aa64isar2;
+       u64             reg_id_aa64mmfr0;
+       u64             reg_id_aa64mmfr1;
+       u64             reg_id_aa64mmfr2;
+diff --git a/arch/arm64/include/asm/cpucaps.h 
b/arch/arm64/include/asm/cpucaps.h
+index df8fe8ecc37e1..64ae14371cae9 100644
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -54,7 +54,8 @@
+ #define ARM64_WORKAROUND_1463225              33
+ #define ARM64_SSBS                            34
+ #define ARM64_WORKAROUND_1542419              35
++#define ARM64_SPECTRE_BHB                     36
+ 
+-#define ARM64_NCAPS                           36
++#define ARM64_NCAPS                           37
+ 
+ #endif /* __ASM_CPUCAPS_H */
+diff --git a/arch/arm64/include/asm/cpufeature.h 
b/arch/arm64/include/asm/cpufeature.h
+index dda6e50568107..05f41d8f7db31 100644
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -482,6 +482,34 @@ static inline bool cpu_supports_mixed_endian_el0(void)
+       return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
+ }
+ 
++static inline bool supports_csv2p3(int scope)
++{
++      u64 pfr0;
++      u8 csv2_val;
++
++      if (scope == SCOPE_LOCAL_CPU)
++              pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
++      else
++              pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
++
++      csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
++                                                      ID_AA64PFR0_CSV2_SHIFT);
++      return csv2_val == 3;
++}
++
++static inline bool supports_clearbhb(int scope)
++{
++      u64 isar2;
++
++      if (scope == SCOPE_LOCAL_CPU)
++              isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
++      else
++              isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
++
++      return cpuid_feature_extract_unsigned_field(isar2,
++                                                  
ID_AA64ISAR2_CLEARBHB_SHIFT);
++}
++
+ static inline bool system_supports_32bit_el0(void)
+ {
+       return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
+@@ -527,6 +555,17 @@ static inline int arm64_get_ssbd_state(void)
+ 
+ void arm64_set_ssbd_mitigation(bool state);
+ 
++/* Watch out, ordering is important here. */
++enum mitigation_state {
++      SPECTRE_UNAFFECTED,
++      SPECTRE_MITIGATED,
++      SPECTRE_VULNERABLE,
++};
++
++enum mitigation_state arm64_get_spectre_bhb_state(void);
++bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int 
scope);
++u8 spectre_bhb_loop_affected(int scope);
++void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities 
*__unused);
+ #endif /* __ASSEMBLY__ */
+ 
+ #endif
+diff --git a/arch/arm64/include/asm/cputype.h 
b/arch/arm64/include/asm/cputype.h
+index 3cd936b1c79c1..50368f9622139 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -81,6 +81,14 @@
+ #define ARM_CPU_PART_CORTEX_A55               0xD05
+ #define ARM_CPU_PART_CORTEX_A76               0xD0B
+ #define ARM_CPU_PART_NEOVERSE_N1      0xD0C
++#define ARM_CPU_PART_CORTEX_A77               0xD0D
++#define ARM_CPU_PART_NEOVERSE_V1      0xD40
++#define ARM_CPU_PART_CORTEX_A78               0xD41
++#define ARM_CPU_PART_CORTEX_X1                0xD44
++#define ARM_CPU_PART_CORTEX_A710      0xD47
++#define ARM_CPU_PART_CORTEX_X2                0xD48
++#define ARM_CPU_PART_NEOVERSE_N2      0xD49
++#define ARM_CPU_PART_CORTEX_A78C      0xD4B
+ 
+ #define APM_CPU_PART_POTENZA          0x000
+ 
+@@ -109,6 +117,14 @@
+ #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A55)
+ #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A76)
+ #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_NEOVERSE_N1)
++#define MIDR_CORTEX_A77       MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A77)
++#define MIDR_NEOVERSE_V1      MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_NEOVERSE_V1)
++#define MIDR_CORTEX_A78       MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A78)
++#define MIDR_CORTEX_X1        MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_X1)
++#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A710)
++#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
++#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_NEOVERSE_N2)
++#define MIDR_CORTEX_A78C      MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A78C)
+ #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, 
CAVIUM_CPU_PART_THUNDERX)
+ #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, 
CAVIUM_CPU_PART_THUNDERX_81XX)
+ #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, 
CAVIUM_CPU_PART_THUNDERX_83XX)
+diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
+index ec1e6d6fa14cc..3c962ef081f84 100644
+--- a/arch/arm64/include/asm/fixmap.h
++++ b/arch/arm64/include/asm/fixmap.h
+@@ -59,9 +59,11 @@ enum fixed_addresses {
+ #endif /* CONFIG_ACPI_APEI_GHES */
+ 
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++      FIX_ENTRY_TRAMP_TEXT3,
++      FIX_ENTRY_TRAMP_TEXT2,
++      FIX_ENTRY_TRAMP_TEXT1,
+       FIX_ENTRY_TRAMP_DATA,
+-      FIX_ENTRY_TRAMP_TEXT,
+-#define TRAMP_VALIAS          (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
++#define TRAMP_VALIAS          (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT1))
+ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+       __end_of_permanent_fixed_addresses,
+ 
+diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
+index 07472c138ced9..7e1b19ea07ea6 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -542,4 +542,9 @@ void kvm_arch_free_vm(struct kvm *kvm);
+ 
+ #define kvm_arm_vcpu_loaded(vcpu)     ((vcpu)->arch.sysregs_loaded_on_cpu)
+ 
++static inline enum mitigation_state kvm_arm_get_spectre_bhb_state(void)
++{
++      return arm64_get_spectre_bhb_state();
++}
++
+ #endif /* __ARM64_KVM_HOST_H__ */
+diff --git a/arch/arm64/include/asm/kvm_mmu.h 
b/arch/arm64/include/asm/kvm_mmu.h
+index b2558447c67d7..44d3fdbcdf629 100644
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -438,7 +438,8 @@ static inline void *kvm_get_hyp_vector(void)
+       void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
+       int slot = -1;
+ 
+-      if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
++      if ((cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) ||
++           cpus_have_const_cap(ARM64_SPECTRE_BHB)) && data->template_start) {
+               vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs_start));
+               slot = data->hyp_vectors_slot;
+       }
+@@ -467,7 +468,8 @@ static inline int kvm_map_vectors(void)
+        * !HBP +  HEL2 -> allocate one vector slot and use exec mapping
+        *  HBP +  HEL2 -> use hardened vertors and use exec mapping
+        */
+-      if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
++      if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) ||
++          cpus_have_const_cap(ARM64_SPECTRE_BHB)) {
+               __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start);
+               __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
+       }
+diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
+index dd320df0d0269..b37d185e0e841 100644
+--- a/arch/arm64/include/asm/mmu.h
++++ b/arch/arm64/include/asm/mmu.h
+@@ -38,7 +38,7 @@ typedef struct {
+  */
+ #define ASID(mm)      ((mm)->context.id.counter & 0xffff)
+ 
+-static inline bool arm64_kernel_unmapped_at_el0(void)
++static __always_inline bool arm64_kernel_unmapped_at_el0(void)
+ {
+       return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
+              cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
+@@ -49,6 +49,12 @@ typedef void (*bp_hardening_cb_t)(void);
+ struct bp_hardening_data {
+       int                     hyp_vectors_slot;
+       bp_hardening_cb_t       fn;
++
++      /*
++       * template_start is only used by the BHB mitigation to identify the
++       * hyp_vectors_slot sequence.
++       */
++      const char *template_start;
+ };
+ 
+ #if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ||       \
+diff --git a/arch/arm64/include/asm/sections.h 
b/arch/arm64/include/asm/sections.h
+index caab039d63055..8d3f1eab58e04 100644
+--- a/arch/arm64/include/asm/sections.h
++++ b/arch/arm64/include/asm/sections.h
+@@ -30,4 +30,9 @@ extern char __irqentry_text_start[], __irqentry_text_end[];
+ extern char __mmuoff_data_start[], __mmuoff_data_end[];
+ extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
+ 
++static inline size_t entry_tramp_text_size(void)
++{
++      return __entry_tramp_text_end - __entry_tramp_text_start;
++}
++
+ #endif /* __ASM_SECTIONS_H */
+diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
+index ed99d941c4623..e90cf51b87eca 100644
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -161,6 +161,7 @@
+ 
+ #define SYS_ID_AA64ISAR0_EL1          sys_reg(3, 0, 0, 6, 0)
+ #define SYS_ID_AA64ISAR1_EL1          sys_reg(3, 0, 0, 6, 1)
++#define SYS_ID_AA64ISAR2_EL1          sys_reg(3, 0, 0, 6, 2)
+ 
+ #define SYS_ID_AA64MMFR0_EL1          sys_reg(3, 0, 0, 7, 0)
+ #define SYS_ID_AA64MMFR1_EL1          sys_reg(3, 0, 0, 7, 1)
+@@ -526,6 +527,9 @@
+ #define ID_AA64ISAR1_JSCVT_SHIFT      12
+ #define ID_AA64ISAR1_DPB_SHIFT                0
+ 
++/* id_aa64isar2 */
++#define ID_AA64ISAR2_CLEARBHB_SHIFT   28
++
+ /* id_aa64pfr0 */
+ #define ID_AA64PFR0_CSV3_SHIFT                60
+ #define ID_AA64PFR0_CSV2_SHIFT                56
+@@ -583,6 +587,7 @@
+ #endif
+ 
+ /* id_aa64mmfr1 */
++#define ID_AA64MMFR1_ECBHB_SHIFT      60
+ #define ID_AA64MMFR1_PAN_SHIFT                20
+ #define ID_AA64MMFR1_LOR_SHIFT                16
+ #define ID_AA64MMFR1_HPD_SHIFT                12
+diff --git a/arch/arm64/include/asm/vectors.h 
b/arch/arm64/include/asm/vectors.h
+new file mode 100644
+index 0000000000000..695583b9a145b
+--- /dev/null
++++ b/arch/arm64/include/asm/vectors.h
+@@ -0,0 +1,74 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * Copyright (C) 2022 ARM Ltd.
++ */
++#ifndef __ASM_VECTORS_H
++#define __ASM_VECTORS_H
++
++#include <linux/bug.h>
++#include <linux/percpu.h>
++
++#include <asm/fixmap.h>
++#include <asm/mmu.h>
++
++extern char vectors[];
++extern char tramp_vectors[];
++extern char __bp_harden_el1_vectors[];
++
++/*
++ * Note: the order of this enum corresponds to two arrays in entry.S:
++ * tramp_vecs and __bp_harden_el1_vectors. By default the canonical
++ * 'full fat' vectors are used directly.
++ */
++enum arm64_bp_harden_el1_vectors {
++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
++      /*
++       * Perform the BHB loop mitigation, before branching to the canonical
++       * vectors.
++       */
++      EL1_VECTOR_BHB_LOOP,
++
++      /*
++       * Make the SMC call for firmware mitigation, before branching to the
++       * canonical vectors.
++       */
++      EL1_VECTOR_BHB_FW,
++
++      /*
++       * Use the ClearBHB instruction, before branching to the canonical
++       * vectors.
++       */
++      EL1_VECTOR_BHB_CLEAR_INSN,
++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
++
++      /*
++       * Remap the kernel before branching to the canonical vectors.
++       */
++      EL1_VECTOR_KPTI,
++};
++
++#ifndef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
++#define EL1_VECTOR_BHB_LOOP           -1
++#define EL1_VECTOR_BHB_FW             -1
++#define EL1_VECTOR_BHB_CLEAR_INSN     -1
++#endif /* !CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
++
++/* The vectors to use on return from EL0. e.g. to remap the kernel */
++DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector);
++
++#ifndef CONFIG_UNMAP_KERNEL_AT_EL0
++#define TRAMP_VALIAS  0
++#endif
++
++static inline const char *
++arm64_get_bp_hardening_vector(enum arm64_bp_harden_el1_vectors slot)
++{
++      if (arm64_kernel_unmapped_at_el0())
++              return (char *)TRAMP_VALIAS + SZ_2K * slot;
++
++      WARN_ON_ONCE(slot == EL1_VECTOR_KPTI);
++
++      return __bp_harden_el1_vectors + SZ_2K * slot;
++}
++
++#endif /* __ASM_VECTORS_H */
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index d191ce8410dba..d0b7dd60861bc 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -24,6 +24,7 @@
+ #include <asm/cputype.h>
+ #include <asm/cpufeature.h>
+ #include <asm/smp_plat.h>
++#include <asm/vectors.h>
+ 
+ static bool __maybe_unused
+ is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
+@@ -97,6 +98,16 @@ DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, 
bp_hardening_data);
+ #ifdef CONFIG_KVM_INDIRECT_VECTORS
+ extern char __smccc_workaround_1_smc_start[];
+ extern char __smccc_workaround_1_smc_end[];
++extern char __smccc_workaround_3_smc_start[];
++extern char __smccc_workaround_3_smc_end[];
++extern char __spectre_bhb_loop_k8_start[];
++extern char __spectre_bhb_loop_k8_end[];
++extern char __spectre_bhb_loop_k24_start[];
++extern char __spectre_bhb_loop_k24_end[];
++extern char __spectre_bhb_loop_k32_start[];
++extern char __spectre_bhb_loop_k32_end[];
++extern char __spectre_bhb_clearbhb_start[];
++extern char __spectre_bhb_clearbhb_end[];
+ 
+ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
+                               const char *hyp_vecs_end)
+@@ -110,11 +121,11 @@ static void __copy_hyp_vect_bpi(int slot, const char 
*hyp_vecs_start,
+       __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
+ }
+ 
++static DEFINE_SPINLOCK(bp_lock);
+ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
+                                   const char *hyp_vecs_start,
+                                   const char *hyp_vecs_end)
+ {
+-      static DEFINE_SPINLOCK(bp_lock);
+       int cpu, slot = -1;
+ 
+       spin_lock(&bp_lock);
+@@ -133,6 +144,7 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
+ 
+       __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
+       __this_cpu_write(bp_hardening_data.fn, fn);
++      __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
+       spin_unlock(&bp_lock);
+ }
+ #else
+@@ -839,6 +851,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
+               .matches = has_ssbd_mitigation,
+               .midr_range_list = arm64_ssb_cpus,
+       },
++      {
++              .desc = "Spectre-BHB",
++              .capability = ARM64_SPECTRE_BHB,
++              .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
++              .matches = is_spectre_bhb_affected,
++              .cpu_enable = spectre_bhb_enable_mitigation,
++      },
+ #ifdef CONFIG_ARM64_ERRATUM_1463225
+       {
+               .desc = "ARM erratum 1463225",
+@@ -875,14 +894,39 @@ ssize_t cpu_show_spectre_v1(struct device *dev, struct 
device_attribute *attr,
+       return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+ }
+ 
++static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
++{
++      switch (bhb_state) {
++      case SPECTRE_UNAFFECTED:
++              return "";
++      default:
++      case SPECTRE_VULNERABLE:
++              return ", but not BHB";
++      case SPECTRE_MITIGATED:
++              return ", BHB";
++      }
++}
++
+ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+               char *buf)
+ {
+-      if (__spectrev2_safe)
+-              return sprintf(buf, "Not affected\n");
++      enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
++      const char *bhb_str = get_bhb_affected_string(bhb_state);
++      const char *v2_str = "Branch predictor hardening";
++
++      if (__spectrev2_safe) {
++              if (bhb_state == SPECTRE_UNAFFECTED)
++                      return sprintf(buf, "Not affected\n");
++
++              /*
++               * Platforms affected by Spectre-BHB can't report
++               * "Not affected" for Spectre-v2.
++               */
++              v2_str = "CSV2";
++      }
+ 
+       if (__hardenbp_enab)
+-              return sprintf(buf, "Mitigation: Branch predictor hardening\n");
++              return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
+ 
+       return sprintf(buf, "Vulnerable\n");
+ }
+@@ -903,3 +947,332 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev,
+ 
+       return sprintf(buf, "Vulnerable\n");
+ }
++
++/*
++ * We try to ensure that the mitigation state can never change as the result 
of
++ * onlining a late CPU.
++ */
++static void update_mitigation_state(enum mitigation_state *oldp,
++                                  enum mitigation_state new)
++{
++      enum mitigation_state state;
++
++      do {
++              state = READ_ONCE(*oldp);
++              if (new <= state)
++                      break;
++      } while (cmpxchg_relaxed(oldp, state, new) != state);
++}
++
++/*
++ * Spectre BHB.
++ *
++ * A CPU is either:
++ * - Mitigated by a branchy loop a CPU specific number of times, and listed
++ *   in our "loop mitigated list".
++ * - Mitigated in software by the firmware Spectre v2 call.
++ * - Has the ClearBHB instruction to perform the mitigation.
++ * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
++ *   software mitigation in the vectors is needed.
++ * - Has CSV2.3, so is unaffected.
++ */
++static enum mitigation_state spectre_bhb_state;
++
++enum mitigation_state arm64_get_spectre_bhb_state(void)
++{
++      return spectre_bhb_state;
++}
++
++/*
++ * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
++ * SCOPE_SYSTEM call will give the right answer.
++ */
++u8 spectre_bhb_loop_affected(int scope)
++{
++      u8 k = 0;
++      static u8 max_bhb_k;
++
++      if (scope == SCOPE_LOCAL_CPU) {
++              static const struct midr_range spectre_bhb_k32_list[] = {
++                      MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
++                      MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
++                      MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
++                      MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
++                      MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
++                      MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
++                      MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
++                      {},
++              };
++              static const struct midr_range spectre_bhb_k24_list[] = {
++                      MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
++                      MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
++                      MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
++                      {},
++              };
++              static const struct midr_range spectre_bhb_k8_list[] = {
++                      MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
++                      MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
++                      {},
++              };
++
++              if (is_midr_in_range_list(read_cpuid_id(), 
spectre_bhb_k32_list))
++                      k = 32;
++              else if (is_midr_in_range_list(read_cpuid_id(), 
spectre_bhb_k24_list))
++                      k = 24;
++              else if (is_midr_in_range_list(read_cpuid_id(), 
spectre_bhb_k8_list))
++                      k =  8;
++
++              max_bhb_k = max(max_bhb_k, k);
++      } else {
++              k = max_bhb_k;
++      }
++
++      return k;
++}
++
++static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
++{
++      int ret;
++      struct arm_smccc_res res;
++
++      if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
++              return SPECTRE_VULNERABLE;
++
++      switch (psci_ops.conduit) {
++      case PSCI_CONDUIT_HVC:
++              arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++                                ARM_SMCCC_ARCH_WORKAROUND_3, &res);
++              break;
++
++      case PSCI_CONDUIT_SMC:
++              arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++                                ARM_SMCCC_ARCH_WORKAROUND_3, &res);
++              break;
++
++      default:
++              return SPECTRE_VULNERABLE;
++      }
++
++      ret = res.a0;
++      switch (ret) {
++      case SMCCC_RET_SUCCESS:
++              return SPECTRE_MITIGATED;
++      case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
++              return SPECTRE_UNAFFECTED;
++      default:
++      case SMCCC_RET_NOT_SUPPORTED:
++              return SPECTRE_VULNERABLE;
++      }
++}
++
++static bool is_spectre_bhb_fw_affected(int scope)
++{
++      static bool system_affected;
++      enum mitigation_state fw_state;
++      bool has_smccc = (psci_ops.smccc_version >= SMCCC_VERSION_1_1);
++      static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
++              MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
++              MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
++              {},
++      };
++      bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
++                                       spectre_bhb_firmware_mitigated_list);
++
++      if (scope != SCOPE_LOCAL_CPU)
++              return system_affected;
++
++      fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
++      if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
++              system_affected = true;
++              return true;
++      }
++
++      return false;
++}
++
++static bool supports_ecbhb(int scope)
++{
++      u64 mmfr1;
++
++      if (scope == SCOPE_LOCAL_CPU)
++              mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
++      else
++              mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
++
++      return cpuid_feature_extract_unsigned_field(mmfr1,
++                                                  ID_AA64MMFR1_ECBHB_SHIFT);
++}
++
++bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
++                           int scope)
++{
++      WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
++
++      if (supports_csv2p3(scope))
++              return false;
++
++      if (supports_clearbhb(scope))
++              return true;
++
++      if (spectre_bhb_loop_affected(scope))
++              return true;
++
++      if (is_spectre_bhb_fw_affected(scope))
++              return true;
++
++      return false;
++}
++
++static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
++{
++      const char *v = arm64_get_bp_hardening_vector(slot);
++
++      if (slot < 0)
++              return;
++
++      __this_cpu_write(this_cpu_vector, v);
++
++      /*
++       * When KPTI is in use, the vectors are switched when exiting to
++       * user-space.
++       */
++      if (arm64_kernel_unmapped_at_el0())
++              return;
++
++      write_sysreg(v, vbar_el1);
++      isb();
++}
++
++#ifdef CONFIG_KVM_INDIRECT_VECTORS
++static const char *kvm_bhb_get_vecs_end(const char *start)
++{
++      if (start == __smccc_workaround_3_smc_start)
++              return __smccc_workaround_3_smc_end;
++      else if (start == __spectre_bhb_loop_k8_start)
++              return __spectre_bhb_loop_k8_end;
++      else if (start == __spectre_bhb_loop_k24_start)
++              return __spectre_bhb_loop_k24_end;
++      else if (start == __spectre_bhb_loop_k32_start)
++              return __spectre_bhb_loop_k32_end;
++      else if (start == __spectre_bhb_clearbhb_start)
++              return __spectre_bhb_clearbhb_end;
++
++      return NULL;
++}
++
++static void kvm_setup_bhb_slot(const char *hyp_vecs_start)
++{
++      int cpu, slot = -1;
++      const char *hyp_vecs_end;
++
++      if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
++              return;
++
++      hyp_vecs_end = kvm_bhb_get_vecs_end(hyp_vecs_start);
++      if (WARN_ON_ONCE(!hyp_vecs_start || !hyp_vecs_end))
++              return;
++
++      spin_lock(&bp_lock);
++      for_each_possible_cpu(cpu) {
++              if (per_cpu(bp_hardening_data.template_start, cpu) == 
hyp_vecs_start) {
++                      slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
++                      break;
++              }
++      }
++
++      if (slot == -1) {
++              slot = atomic_inc_return(&arm64_el2_vector_last_slot);
++              BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
++              __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
++      }
++
++      __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
++      __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
++      spin_unlock(&bp_lock);
++}
++#else
++#define __smccc_workaround_3_smc_start NULL
++#define __spectre_bhb_loop_k8_start NULL
++#define __spectre_bhb_loop_k24_start NULL
++#define __spectre_bhb_loop_k32_start NULL
++#define __spectre_bhb_clearbhb_start NULL
++
++static void kvm_setup_bhb_slot(const char *hyp_vecs_start) { };
++#endif
++
++void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
++{
++      enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
++
++      if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
++              return;
++
++      if (!__spectrev2_safe &&  !__hardenbp_enab) {
++              /* No point mitigating Spectre-BHB alone. */
++      } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
++              pr_info_once("spectre-bhb mitigation disabled by compile time 
option\n");
++      } else if (cpu_mitigations_off()) {
++              pr_info_once("spectre-bhb mitigation disabled by command line 
option\n");
++      } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
++              state = SPECTRE_MITIGATED;
++      } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
++              kvm_setup_bhb_slot(__spectre_bhb_clearbhb_start);
++              this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
++
++              state = SPECTRE_MITIGATED;
++      } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
++              switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) {
++              case 8:
++                      kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start);
++                      break;
++              case 24:
++                      kvm_setup_bhb_slot(__spectre_bhb_loop_k24_start);
++                      break;
++              case 32:
++                      kvm_setup_bhb_slot(__spectre_bhb_loop_k32_start);
++                      break;
++              default:
++                      WARN_ON_ONCE(1);
++              }
++              this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
++
++              state = SPECTRE_MITIGATED;
++      } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
++              fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
++              if (fw_state == SPECTRE_MITIGATED) {
++                      kvm_setup_bhb_slot(__smccc_workaround_3_smc_start);
++                      this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
++
++                      /*
++                       * With WA3 in the vectors, the WA1 calls can be
++                       * removed.
++                       */
++                      __this_cpu_write(bp_hardening_data.fn, NULL);
++
++                      state = SPECTRE_MITIGATED;
++              }
++      }
++
++      update_mitigation_state(&spectre_bhb_state, state);
++}
++
++/* Patched to correct the immediate */
++void __init spectre_bhb_patch_loop_iter(struct alt_instr *alt,
++                                      __le32 *origptr, __le32 *updptr, int 
nr_inst)
++{
++      u8 rd;
++      u32 insn;
++      u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
++
++      BUG_ON(nr_inst != 1); /* MOV -> MOV */
++
++      if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
++              return;
++
++      insn = le32_to_cpu(*origptr);
++      rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
++      insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
++                                       AARCH64_INSN_VARIANT_64BIT,
++                                       AARCH64_INSN_MOVEWIDE_ZERO);
++      *updptr++ = cpu_to_le32(insn);
++}
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 122d5e843ab6d..03b0fdccaf052 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -20,11 +20,13 @@
+ 
+ #include <linux/bsearch.h>
+ #include <linux/cpumask.h>
++#include <linux/percpu.h>
+ #include <linux/sort.h>
+ #include <linux/stop_machine.h>
+ #include <linux/types.h>
+ #include <linux/mm.h>
+ #include <linux/cpu.h>
++
+ #include <asm/cpu.h>
+ #include <asm/cpufeature.h>
+ #include <asm/cpu_ops.h>
+@@ -33,6 +35,7 @@
+ #include <asm/processor.h>
+ #include <asm/sysreg.h>
+ #include <asm/traps.h>
++#include <asm/vectors.h>
+ #include <asm/virt.h>
+ 
+ unsigned long elf_hwcap __read_mostly;
+@@ -51,6 +54,8 @@ unsigned int compat_elf_hwcap2 __read_mostly;
+ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+ EXPORT_SYMBOL(cpu_hwcaps);
+ 
++DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
++
+ /*
+  * Flag to indicate if we have computed the system wide
+  * capabilities based on the boot time active CPUs. This
+@@ -145,6 +150,11 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
+       ARM64_FTR_END,
+ };
+ 
++static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, 
ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0),
++      ARM64_FTR_END,
++};
++
+ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 
ID_AA64PFR0_CSV3_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 
ID_AA64PFR0_CSV2_SHIFT, 4, 0),
+@@ -391,6 +401,7 @@ static const struct __ftr_reg_entry {
+       /* Op1 = 0, CRn = 0, CRm = 6 */
+       ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
+       ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
++      ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2),
+ 
+       /* Op1 = 0, CRn = 0, CRm = 7 */
+       ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
+@@ -539,6 +550,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
+       init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
+       init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
+       init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
++      init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2);
+       init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
+       init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
+       init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
+@@ -656,6 +668,8 @@ void update_cpu_features(int cpu,
+                                     info->reg_id_aa64isar0, 
boot->reg_id_aa64isar0);
+       taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
+                                     info->reg_id_aa64isar1, 
boot->reg_id_aa64isar1);
++      taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu,
++                                    info->reg_id_aa64isar2, 
boot->reg_id_aa64isar2);
+ 
+       /*
+        * Differing PARange support is fine as long as all peripherals and
+@@ -789,6 +803,7 @@ static u64 __read_sysreg_by_encoding(u32 sys_id)
+       read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
+       read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
+       read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
++      read_sysreg_case(SYS_ID_AA64ISAR2_EL1);
+ 
+       read_sysreg_case(SYS_CNTFRQ_EL0);
+       read_sysreg_case(SYS_CTR_EL0);
+@@ -963,6 +978,12 @@ kpti_install_ng_mappings(const struct 
arm64_cpu_capabilities *__unused)
+       static bool kpti_applied = false;
+       int cpu = smp_processor_id();
+ 
++      if (__this_cpu_read(this_cpu_vector) == vectors) {
++              const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
++
++              __this_cpu_write(this_cpu_vector, v);
++      }
++
+       if (kpti_applied)
+               return;
+ 
+diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
+index dce971f2c1673..36bd58d8ca11f 100644
+--- a/arch/arm64/kernel/cpuinfo.c
++++ b/arch/arm64/kernel/cpuinfo.c
+@@ -334,6 +334,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
+       info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
+       info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
+       info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
++      info->reg_id_aa64isar2 = read_cpuid(ID_AA64ISAR2_EL1);
+       info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
+       info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
+       info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 5f800384cb9a8..85433a84783b8 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -70,18 +70,21 @@
+ 
+       .macro kernel_ventry, el, label, regsize = 64
+       .align 7
+-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+-alternative_if ARM64_UNMAP_KERNEL_AT_EL0
++.Lventry_start\@:
+       .if     \el == 0
++      /*
++       * This must be the first instruction of the EL0 vector entries. It is
++       * skipped by the trampoline vectors, to trigger the cleanup.
++       */
++      b       .Lskip_tramp_vectors_cleanup\@
+       .if     \regsize == 64
+       mrs     x30, tpidrro_el0
+       msr     tpidrro_el0, xzr
+       .else
+       mov     x30, xzr
+       .endif
++.Lskip_tramp_vectors_cleanup\@:
+       .endif
+-alternative_else_nop_endif
+-#endif
+ 
+       sub     sp, sp, #S_FRAME_SIZE
+ #ifdef CONFIG_VMAP_STACK
+@@ -127,11 +130,15 @@ alternative_else_nop_endif
+       mrs     x0, tpidrro_el0
+ #endif
+       b       el\()\el\()_\label
++.org .Lventry_start\@ + 128   // Did we overflow the ventry slot?
+       .endm
+ 
+-      .macro tramp_alias, dst, sym
++      .macro tramp_alias, dst, sym, tmp
+       mov_q   \dst, TRAMP_VALIAS
+-      add     \dst, \dst, #(\sym - .entry.tramp.text)
++      adr_l   \tmp, \sym
++      add     \dst, \dst, \tmp
++      adr_l   \tmp, .entry.tramp.text
++      sub     \dst, \dst, \tmp
+       .endm
+ 
+       // This macro corrupts x0-x3. It is the caller's duty
+@@ -342,25 +349,29 @@ alternative_else_nop_endif
+       ldp     x24, x25, [sp, #16 * 12]
+       ldp     x26, x27, [sp, #16 * 13]
+       ldp     x28, x29, [sp, #16 * 14]
+-      ldr     lr, [sp, #S_LR]
+-      add     sp, sp, #S_FRAME_SIZE           // restore sp
+       /*
+        * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on eret context synchronization
+        * when returning from IPI handler, and when returning to user-space.
+        */
+ 
+       .if     \el == 0
+-alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
++alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
++      ldr     lr, [sp, #S_LR]
++      add     sp, sp, #S_FRAME_SIZE           // restore sp
++      eret
++alternative_else_nop_endif
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+       bne     4f
+-      msr     far_el1, x30
+-      tramp_alias     x30, tramp_exit_native
++      msr     far_el1, x29
++      tramp_alias     x30, tramp_exit_native, x29
+       br      x30
+ 4:
+-      tramp_alias     x30, tramp_exit_compat
++      tramp_alias     x30, tramp_exit_compat, x29
+       br      x30
+ #endif
+       .else
++      ldr     lr, [sp, #S_LR]
++      add     sp, sp, #S_FRAME_SIZE           // restore sp
+       eret
+       .endif
+       .endm
+@@ -920,12 +931,7 @@ ENDPROC(el0_svc)
+ 
+       .popsection                             // .entry.text
+ 
+-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+-/*
+- * Exception vectors trampoline.
+- */
+-      .pushsection ".entry.tramp.text", "ax"
+-
++      // Move from tramp_pg_dir to swapper_pg_dir
+       .macro tramp_map_kernel, tmp
+       mrs     \tmp, ttbr1_el1
+       add     \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
+@@ -957,12 +963,47 @@ alternative_else_nop_endif
+        */
+       .endm
+ 
+-      .macro tramp_ventry, regsize = 64
++      .macro tramp_data_page  dst
++      adr_l   \dst, .entry.tramp.text
++      sub     \dst, \dst, PAGE_SIZE
++      .endm
++
++      .macro tramp_data_read_var      dst, var
++#ifdef CONFIG_RANDOMIZE_BASE
++      tramp_data_page         \dst
++      add     \dst, \dst, #:lo12:__entry_tramp_data_\var
++      ldr     \dst, [\dst]
++#else
++      ldr     \dst, =\var
++#endif
++      .endm
++
++#define BHB_MITIGATION_NONE   0
++#define BHB_MITIGATION_LOOP   1
++#define BHB_MITIGATION_FW     2
++#define BHB_MITIGATION_INSN   3
++
++      .macro tramp_ventry, vector_start, regsize, kpti, bhb
+       .align  7
+ 1:
+       .if     \regsize == 64
+       msr     tpidrro_el0, x30        // Restored in kernel_ventry
+       .endif
++
++      .if     \bhb == BHB_MITIGATION_LOOP
++      /*
++       * This sequence must appear before the first indirect branch. i.e. the
++       * ret out of tramp_ventry. It appears here because x30 is free.
++       */
++      __mitigate_spectre_bhb_loop     x30
++      .endif // \bhb == BHB_MITIGATION_LOOP
++
++      .if     \bhb == BHB_MITIGATION_INSN
++      clearbhb
++      isb
++      .endif // \bhb == BHB_MITIGATION_INSN
++
++      .if     \kpti == 1
+       /*
+        * Defend against branch aliasing attacks by pushing a dummy
+        * entry onto the return stack and using a RET instruction to
+@@ -972,43 +1013,75 @@ alternative_else_nop_endif
+       b       .
+ 2:
+       tramp_map_kernel        x30
+-#ifdef CONFIG_RANDOMIZE_BASE
+-      adr     x30, tramp_vectors + PAGE_SIZE
+ alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
+-      ldr     x30, [x30]
+-#else
+-      ldr     x30, =vectors
+-#endif
+-      prfm    plil1strm, [x30, #(1b - tramp_vectors)]
++      tramp_data_read_var     x30, vectors
++      prfm    plil1strm, [x30, #(1b - \vector_start)]
+       msr     vbar_el1, x30
+-      add     x30, x30, #(1b - tramp_vectors)
+       isb
++      .else
++      ldr     x30, =vectors
++      .endif // \kpti == 1
++
++      .if     \bhb == BHB_MITIGATION_FW
++      /*
++       * The firmware sequence must appear before the first indirect branch.
++       * i.e. the ret out of tramp_ventry. But it also needs the stack to be
++       * mapped to save/restore the registers the SMC clobbers.
++       */
++      __mitigate_spectre_bhb_fw
++      .endif // \bhb == BHB_MITIGATION_FW
++
++      add     x30, x30, #(1b - \vector_start + 4)
+       ret
++.org 1b + 128 // Did we overflow the ventry slot?
+       .endm
+ 
+       .macro tramp_exit, regsize = 64
+-      adr     x30, tramp_vectors
++      tramp_data_read_var     x30, this_cpu_vector
++alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
++      mrs     x29, tpidr_el1
++alternative_else
++      mrs     x29, tpidr_el2
++alternative_endif
++      ldr     x30, [x30, x29]
++
+       msr     vbar_el1, x30
+-      tramp_unmap_kernel      x30
++      ldr     lr, [sp, #S_LR]
++      tramp_unmap_kernel      x29
+       .if     \regsize == 64
+-      mrs     x30, far_el1
++      mrs     x29, far_el1
+       .endif
++      add     sp, sp, #S_FRAME_SIZE           // restore sp
+       eret
+       .endm
+ 
+-      .align  11
+-ENTRY(tramp_vectors)
++      .macro  generate_tramp_vector,  kpti, bhb
++.Lvector_start\@:
+       .space  0x400
+ 
+-      tramp_ventry
+-      tramp_ventry
+-      tramp_ventry
+-      tramp_ventry
++      .rept   4
++      tramp_ventry    .Lvector_start\@, 64, \kpti, \bhb
++      .endr
++      .rept   4
++      tramp_ventry    .Lvector_start\@, 32, \kpti, \bhb
++      .endr
++      .endm
+ 
+-      tramp_ventry    32
+-      tramp_ventry    32
+-      tramp_ventry    32
+-      tramp_ventry    32
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++/*
++ * Exception vectors trampoline.
++ * The order must match __bp_harden_el1_vectors and the
++ * arm64_bp_harden_el1_vectors enum.
++ */
++      .pushsection ".entry.tramp.text", "ax"
++      .align  11
++ENTRY(tramp_vectors)
++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
++      generate_tramp_vector   kpti=1, bhb=BHB_MITIGATION_LOOP
++      generate_tramp_vector   kpti=1, bhb=BHB_MITIGATION_FW
++      generate_tramp_vector   kpti=1, bhb=BHB_MITIGATION_INSN
++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
++      generate_tramp_vector   kpti=1, bhb=BHB_MITIGATION_NONE
+ END(tramp_vectors)
+ 
+ ENTRY(tramp_exit_native)
+@@ -1026,11 +1099,55 @@ END(tramp_exit_compat)
+       .align PAGE_SHIFT
+       .globl  __entry_tramp_data_start
+ __entry_tramp_data_start:
++__entry_tramp_data_vectors:
+       .quad   vectors
++#ifdef CONFIG_ARM_SDE_INTERFACE
++__entry_tramp_data___sdei_asm_handler:
++      .quad   __sdei_asm_handler
++#endif /* CONFIG_ARM_SDE_INTERFACE */
++__entry_tramp_data_this_cpu_vector:
++      .quad   this_cpu_vector
+       .popsection                             // .rodata
+ #endif /* CONFIG_RANDOMIZE_BASE */
+ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+ 
++/*
++ * Exception vectors for spectre mitigations on entry from EL1 when
++ * kpti is not in use.
++ */
++      .macro generate_el1_vector, bhb
++.Lvector_start\@:
++      kernel_ventry   1, sync_invalid                 // Synchronous EL1t
++      kernel_ventry   1, irq_invalid                  // IRQ EL1t
++      kernel_ventry   1, fiq_invalid                  // FIQ EL1t
++      kernel_ventry   1, error_invalid                // Error EL1t
++
++      kernel_ventry   1, sync                         // Synchronous EL1h
++      kernel_ventry   1, irq                          // IRQ EL1h
++      kernel_ventry   1, fiq_invalid                  // FIQ EL1h
++      kernel_ventry   1, error                        // Error EL1h
++
++      .rept   4
++      tramp_ventry    .Lvector_start\@, 64, 0, \bhb
++      .endr
++      .rept 4
++      tramp_ventry    .Lvector_start\@, 32, 0, \bhb
++      .endr
++      .endm
++
++/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. 
*/
++      .pushsection ".entry.text", "ax"
++      .align  11
++ENTRY(__bp_harden_el1_vectors)
++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
++      generate_el1_vector     bhb=BHB_MITIGATION_LOOP
++      generate_el1_vector     bhb=BHB_MITIGATION_FW
++      generate_el1_vector     bhb=BHB_MITIGATION_INSN
++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
++END(__bp_harden_el1_vectors)
++      .popsection
++
++
+ /*
+  * Register switch for AArch64. The callee-saved registers need to be saved
+  * and restored. On entry:
+@@ -1117,13 +1234,7 @@ ENTRY(__sdei_asm_entry_trampoline)
+        */
+ 1:    str     x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
+ 
+-#ifdef CONFIG_RANDOMIZE_BASE
+-      adr     x4, tramp_vectors + PAGE_SIZE
+-      add     x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
+-      ldr     x4, [x4]
+-#else
+-      ldr     x4, =__sdei_asm_handler
+-#endif
++      tramp_data_read_var     x4, __sdei_asm_handler
+       br      x4
+ ENDPROC(__sdei_asm_entry_trampoline)
+ NOKPROBE(__sdei_asm_entry_trampoline)
+@@ -1146,12 +1257,6 @@ ENDPROC(__sdei_asm_exit_trampoline)
+ NOKPROBE(__sdei_asm_exit_trampoline)
+       .ltorg
+ .popsection           // .entry.tramp.text
+-#ifdef CONFIG_RANDOMIZE_BASE
+-.pushsection ".rodata", "a"
+-__sdei_asm_trampoline_next_handler:
+-      .quad   __sdei_asm_handler
+-.popsection           // .rodata
+-#endif /* CONFIG_RANDOMIZE_BASE */
+ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+ 
+ /*
+@@ -1247,7 +1352,7 @@ alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
+ alternative_else_nop_endif
+ 
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+-      tramp_alias     dst=x5, sym=__sdei_asm_exit_trampoline
++      tramp_alias     dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
+       br      x5
+ #endif
+ ENDPROC(__sdei_asm_handler)
+diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
+index 69e7c8d4a00f6..370089455d38d 100644
+--- a/arch/arm64/kernel/vmlinux.lds.S
++++ b/arch/arm64/kernel/vmlinux.lds.S
+@@ -259,7 +259,7 @@ ASSERT(__hibernate_exit_text_end - 
(__hibernate_exit_text_start & ~(SZ_4K - 1))
+       <= SZ_4K, "Hibernate exit text too big or misaligned")
+ #endif
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+-ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
++ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE,
+       "Entry trampoline text too big")
+ #endif
+ /*
+diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
+index ea063312bca18..01e518b82c495 100644
+--- a/arch/arm64/kvm/hyp/hyp-entry.S
++++ b/arch/arm64/kvm/hyp/hyp-entry.S
+@@ -135,6 +135,10 @@ el1_hvc_guest:
+       /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
+       eor     w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
+                         ARM_SMCCC_ARCH_WORKAROUND_2)
++      cbz     w1, wa_epilogue
++
++      eor     w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \
++                        ARM_SMCCC_ARCH_WORKAROUND_3)
+       cbnz    w1, el1_trap
+ 
+ #ifdef CONFIG_ARM64_SSBD
+@@ -334,4 +338,64 @@ ENTRY(__smccc_workaround_1_smc_start)
+       ldp     x0, x1, [sp, #(8 * 2)]
+       add     sp, sp, #(8 * 4)
+ ENTRY(__smccc_workaround_1_smc_end)
++
++ENTRY(__smccc_workaround_3_smc_start)
++      esb
++      sub     sp, sp, #(8 * 4)
++      stp     x2, x3, [sp, #(8 * 0)]
++      stp     x0, x1, [sp, #(8 * 2)]
++      mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_3
++      smc     #0
++      ldp     x2, x3, [sp, #(8 * 0)]
++      ldp     x0, x1, [sp, #(8 * 2)]
++      add     sp, sp, #(8 * 4)
++ENTRY(__smccc_workaround_3_smc_end)
++
++ENTRY(__spectre_bhb_loop_k8_start)
++      esb
++      sub     sp, sp, #(8 * 2)
++      stp     x0, x1, [sp, #(8 * 0)]
++      mov     x0, #8
++2:    b       . + 4
++      subs    x0, x0, #1
++      b.ne    2b
++      dsb     nsh
++      isb
++      ldp     x0, x1, [sp, #(8 * 0)]
++      add     sp, sp, #(8 * 2)
++ENTRY(__spectre_bhb_loop_k8_end)
++
++ENTRY(__spectre_bhb_loop_k24_start)
++      esb
++      sub     sp, sp, #(8 * 2)
++      stp     x0, x1, [sp, #(8 * 0)]
++      mov     x0, #24
++2:    b       . + 4
++      subs    x0, x0, #1
++      b.ne    2b
++      dsb     nsh
++      isb
++      ldp     x0, x1, [sp, #(8 * 0)]
++      add     sp, sp, #(8 * 2)
++ENTRY(__spectre_bhb_loop_k24_end)
++
++ENTRY(__spectre_bhb_loop_k32_start)
++      esb
++      sub     sp, sp, #(8 * 2)
++      stp     x0, x1, [sp, #(8 * 0)]
++      mov     x0, #32
++2:    b       . + 4
++      subs    x0, x0, #1
++      b.ne    2b
++      dsb     nsh
++      isb
++      ldp     x0, x1, [sp, #(8 * 0)]
++      add     sp, sp, #(8 * 2)
++ENTRY(__spectre_bhb_loop_k32_end)
++
++ENTRY(__spectre_bhb_clearbhb_start)
++      esb
++      clearbhb
++      isb
++ENTRY(__spectre_bhb_clearbhb_end)
+ #endif
+diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
+index 1d16ce0b7e0d6..1c248c12a49e1 100644
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -34,6 +34,7 @@
+ #include <asm/debug-monitors.h>
+ #include <asm/processor.h>
+ #include <asm/thread_info.h>
++#include <asm/vectors.h>
+ 
+ extern struct exception_table_entry __start___kvm_ex_table;
+ extern struct exception_table_entry __stop___kvm_ex_table;
+@@ -155,10 +156,13 @@ static void __hyp_text __activate_traps(struct kvm_vcpu 
*vcpu)
+ 
+ static void deactivate_traps_vhe(void)
+ {
+-      extern char vectors[];  /* kernel exception vectors */
++      const char *host_vectors = vectors;
+       write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+       write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
+-      write_sysreg(vectors, vbar_el1);
++
++      if (!arm64_kernel_unmapped_at_el0())
++              host_vectors = __this_cpu_read(this_cpu_vector);
++      write_sysreg(host_vectors, vbar_el1);
+ }
+ NOKPROBE_SYMBOL(deactivate_traps_vhe);
+ 
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index 98e8bc9195830..f06629bf2be16 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -1289,7 +1289,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
+       /* CRm=6 */
+       ID_SANITISED(ID_AA64ISAR0_EL1),
+       ID_SANITISED(ID_AA64ISAR1_EL1),
+-      ID_UNALLOCATED(6,2),
++      ID_SANITISED(ID_AA64ISAR2_EL1),
+       ID_UNALLOCATED(6,3),
+       ID_UNALLOCATED(6,4),
+       ID_UNALLOCATED(6,5),
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index 0fa558176fb10..b0a83dbed2dc4 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -541,6 +541,8 @@ early_param("rodata", parse_rodata);
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ static int __init map_entry_trampoline(void)
+ {
++      int i;
++
+       pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
+       phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
+ 
+@@ -549,11 +551,15 @@ static int __init map_entry_trampoline(void)
+ 
+       /* Map only the text into the trampoline page table */
+       memset(tramp_pg_dir, 0, PGD_SIZE);
+-      __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
+-                           prot, pgd_pgtable_alloc, 0);
++      __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS,
++                           entry_tramp_text_size(), prot, pgd_pgtable_alloc,
++                           0);
+ 
+       /* Map both the text and data into the kernel page table */
+-      __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
++      for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++)
++              __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
++                           pa_start + i * PAGE_SIZE, prot);
++
+       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+               extern char __entry_tramp_data_start[];
+ 
+diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
+index 1dacbf5e9e09a..0c1dac64e77b8 100644
+--- a/arch/ia64/kernel/acpi.c
++++ b/arch/ia64/kernel/acpi.c
+@@ -537,7 +537,8 @@ void __init acpi_numa_fixup(void)
+       if (srat_num_cpus == 0) {
+               node_set_online(0);
+               node_cpuid[0].phys_id = hard_smp_processor_id();
+-              return;
++              node_distance(0, 0) = LOCAL_DISTANCE;
++              goto out;
+       }
+ 
+       /*
+@@ -580,7 +581,7 @@ void __init acpi_numa_fixup(void)
+                       for (j = 0; j < MAX_NUMNODES; j++)
+                               node_distance(i, j) = i == j ? LOCAL_DISTANCE :
+                                                       REMOTE_DISTANCE;
+-              return;
++              goto out;
+       }
+ 
+       memset(numa_slit, -1, sizeof(numa_slit));
+@@ -605,6 +606,8 @@ void __init acpi_numa_fixup(void)
+               printk("\n");
+       }
+ #endif
++out:
++      node_possible_map = node_online_map;
+ }
+ #endif                                /* CONFIG_ACPI_NUMA */
+ 
+diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
+index d84b9066b4654..7206a6977be9b 100644
+--- a/arch/mips/kernel/smp.c
++++ b/arch/mips/kernel/smp.c
+@@ -372,6 +372,9 @@ asmlinkage void start_secondary(void)
+       cpu = smp_processor_id();
+       cpu_data[cpu].udelay_val = loops_per_jiffy;
+ 
++      set_cpu_sibling_map(cpu);
++      set_cpu_core_map(cpu);
++
+       cpumask_set_cpu(cpu, &cpu_coherent_mask);
+       notify_cpu_starting(cpu);
+ 
+@@ -383,9 +386,6 @@ asmlinkage void start_secondary(void)
+       /* The CPU is running and counters synchronised, now mark it online */
+       set_cpu_online(cpu, true);
+ 
+-      set_cpu_sibling_map(cpu);
+-      set_cpu_core_map(cpu);
+-
+       calculate_cpu_foreign_map();
+ 
+       /*
+diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
+index 1409d48affb70..f256aeeac1b3f 100644
+--- a/drivers/atm/eni.c
++++ b/drivers/atm/eni.c
+@@ -1114,6 +1114,8 @@ DPRINTK("iovcnt = %d\n",skb_shinfo(skb)->nr_frags);
+       }
+       paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len,
+                              DMA_TO_DEVICE);
++      if (dma_mapping_error(&eni_dev->pci_dev->dev, paddr))
++              return enq_next;
+       ENI_PRV_PADDR(skb) = paddr;
+       /* prepare DMA queue entries */
+       j = 0;
+diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
+index ef395b238816c..f7880012b9a01 100644
+--- a/drivers/atm/firestream.c
++++ b/drivers/atm/firestream.c
+@@ -1692,6 +1692,8 @@ static int fs_init(struct fs_dev *dev)
+       dev->hw_base = pci_resource_start(pci_dev, 0);
+ 
+       dev->base = ioremap(dev->hw_base, 0x1000);
++      if (!dev->base)
++              return 1;
+ 
+       reset_chip (dev);
+   
+diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
+index e54249ccc0092..e41ca58bf7b6d 100644
+--- a/drivers/crypto/qcom-rng.c
++++ b/drivers/crypto/qcom-rng.c
+@@ -7,6 +7,7 @@
+ #include <linux/acpi.h>
+ #include <linux/clk.h>
+ #include <linux/crypto.h>
++#include <linux/iopoll.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+@@ -42,16 +43,19 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, 
unsigned int max)
+ {
+       unsigned int currsize = 0;
+       u32 val;
++      int ret;
+ 
+       /* read random data from hardware */
+       do {
+-              val = readl_relaxed(rng->base + PRNG_STATUS);
+-              if (!(val & PRNG_STATUS_DATA_AVAIL))
+-                      break;
++              ret = readl_poll_timeout(rng->base + PRNG_STATUS, val,
++                                       val & PRNG_STATUS_DATA_AVAIL,
++                                       200, 10000);
++              if (ret)
++                      return ret;
+ 
+               val = readl_relaxed(rng->base + PRNG_DATA_OUT);
+               if (!val)
+-                      break;
++                      return -EINVAL;
+ 
+               if ((max - currsize) >= WORD_SZ) {
+                       memcpy(data, &val, WORD_SZ);
+@@ -60,11 +64,10 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, 
unsigned int max)
+               } else {
+                       /* copy only remaining bytes */
+                       memcpy(data, &val, max - currsize);
+-                      break;
+               }
+       } while (currsize < max);
+ 
+-      return currsize;
++      return 0;
+ }
+ 
+ static int qcom_rng_generate(struct crypto_rng *tfm,
+@@ -86,7 +89,7 @@ static int qcom_rng_generate(struct crypto_rng *tfm,
+       mutex_unlock(&rng->lock);
+       clk_disable_unprepare(rng->clk);
+ 
+-      return 0;
++      return ret;
+ }
+ 
+ static int qcom_rng_seed(struct crypto_rng *tfm, const u8 *seed,
+diff --git a/drivers/firmware/efi/apple-properties.c 
b/drivers/firmware/efi/apple-properties.c
+index 60a95719ecb86..726a23d45da4c 100644
+--- a/drivers/firmware/efi/apple-properties.c
++++ b/drivers/firmware/efi/apple-properties.c
+@@ -34,7 +34,7 @@ static bool dump_properties __initdata;
+ static int __init dump_properties_enable(char *arg)
+ {
+       dump_properties = true;
+-      return 0;
++      return 1;
+ }
+ 
+ __setup("dump_apple_properties", dump_properties_enable);
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index a8180f9090fae..7098744f9276a 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -245,7 +245,7 @@ static int __init efivar_ssdt_setup(char *str)
+               memcpy(efivar_ssdt, str, strlen(str));
+       else
+               pr_warn("efivar_ssdt: name too long: %s\n", str);
+-      return 0;
++      return 1;
+ }
+ __setup("efivar_ssdt=", efivar_ssdt_setup);
+ 
+diff --git a/drivers/gpu/drm/panel/panel-simple.c 
b/drivers/gpu/drm/panel/panel-simple.c
+index 8814aa38c5e7b..a424afdcc77a1 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -1244,7 +1244,7 @@ static const struct display_timing 
innolux_g070y2_l01_timing = {
+ static const struct panel_desc innolux_g070y2_l01 = {
+       .timings = &innolux_g070y2_l01_timing,
+       .num_timings = 1,
+-      .bpc = 6,
++      .bpc = 8,
+       .size = {
+               .width = 152,
+               .height = 91,
+diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
+index dc2ad1cc8fe1d..8f418d984a2d6 100644
+--- a/drivers/input/tablet/aiptek.c
++++ b/drivers/input/tablet/aiptek.c
+@@ -1814,15 +1814,13 @@ aiptek_probe(struct usb_interface *intf, const struct 
usb_device_id *id)
+       input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, 
AIPTEK_TILT_MAX, 0, 0);
+       input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, 
AIPTEK_WHEEL_MAX - 1, 0, 0);
+ 
+-      /* Verify that a device really has an endpoint */
+-      if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
++      err = usb_find_common_endpoints(intf->cur_altsetting,
++                                      NULL, NULL, &endpoint, NULL);
++      if (err) {
+               dev_err(&intf->dev,
+-                      "interface has %d endpoints, but must have minimum 1\n",
+-                      intf->cur_altsetting->desc.bNumEndpoints);
+-              err = -EINVAL;
++                      "interface has no int in endpoints, but must have 
minimum 1\n");
+               goto fail3;
+       }
+-      endpoint = &intf->cur_altsetting->endpoint[0].desc;
+ 
+       /* Go set up our URB, which is called when the tablet receives
+        * input.
+diff --git a/drivers/net/can/rcar/rcar_canfd.c 
b/drivers/net/can/rcar/rcar_canfd.c
+index 786d852a70d58..a1634834b640e 100644
+--- a/drivers/net/can/rcar/rcar_canfd.c
++++ b/drivers/net/can/rcar/rcar_canfd.c
+@@ -1602,15 +1602,15 @@ static int rcar_canfd_channel_probe(struct 
rcar_canfd_global *gpriv, u32 ch,
+ 
+       netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll,
+                      RCANFD_NAPI_WEIGHT);
++      spin_lock_init(&priv->tx_lock);
++      devm_can_led_init(ndev);
++      gpriv->ch[priv->channel] = priv;
+       err = register_candev(ndev);
+       if (err) {
+               dev_err(&pdev->dev,
+                       "register_candev() failed, error %d\n", err);
+               goto fail_candev;
+       }
+-      spin_lock_init(&priv->tx_lock);
+-      devm_can_led_init(ndev);
+-      gpriv->ch[priv->channel] = priv;
+       dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel);
+       return 0;
+ 
+diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
+index dfad93fca0a61..0fa64b8b79bf2 100644
+--- a/drivers/net/ethernet/sfc/mcdi.c
++++ b/drivers/net/ethernet/sfc/mcdi.c
+@@ -166,9 +166,9 @@ static void efx_mcdi_send_request(struct efx_nic *efx, 
unsigned cmd,
+       /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
+       spin_lock_bh(&mcdi->iface_lock);
+       ++mcdi->seqno;
++      seqno = mcdi->seqno & SEQ_MASK;
+       spin_unlock_bh(&mcdi->iface_lock);
+ 
+-      seqno = mcdi->seqno & SEQ_MASK;
+       xflags = 0;
+       if (mcdi->mode == MCDI_MODE_EVENTS)
+               xflags |= MCDI_HEADER_XFLAGS_EVREQ;
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 2dff0e110c6f3..f094e4bc21751 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -1454,6 +1454,9 @@ static void netvsc_get_ethtool_stats(struct net_device 
*dev,
+       pcpu_sum = kvmalloc_array(num_possible_cpus(),
+                                 sizeof(struct netvsc_ethtool_pcpu_stats),
+                                 GFP_KERNEL);
++      if (!pcpu_sum)
++              return;
++
+       netvsc_get_pcpu_stats(dev, pcpu_sum);
+       for_each_present_cpu(cpu) {
+               struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
+diff --git a/drivers/usb/gadget/function/rndis.c 
b/drivers/usb/gadget/function/rndis.c
+index 970ed1514f0bc..fa0c173a0d26f 100644
+--- a/drivers/usb/gadget/function/rndis.c
++++ b/drivers/usb/gadget/function/rndis.c
+@@ -640,6 +640,7 @@ static int rndis_set_response(struct rndis_params *params,
+       BufLength = le32_to_cpu(buf->InformationBufferLength);
+       BufOffset = le32_to_cpu(buf->InformationBufferOffset);
+       if ((BufLength > RNDIS_MAX_TOTAL_SIZE) ||
++          (BufOffset > RNDIS_MAX_TOTAL_SIZE) ||
+           (BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE))
+                   return -EINVAL;
+ 
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index 87a417d878b8a..e3dc74cdba838 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -1297,7 +1297,6 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
+       usb_gadget_udc_stop(udc);
+ 
+       udc->driver = NULL;
+-      udc->dev.driver = NULL;
+       udc->gadget->dev.driver = NULL;
+ }
+ 
+@@ -1346,7 +1345,6 @@ static int udc_bind_to_driver(struct usb_udc *udc, 
struct usb_gadget_driver *dri
+                       driver->function);
+ 
+       udc->driver = driver;
+-      udc->dev.driver = &driver->driver;
+       udc->gadget->dev.driver = &driver->driver;
+ 
+       usb_gadget_udc_set_speed(udc, driver->max_speed);
+@@ -1368,7 +1366,6 @@ err1:
+               dev_err(&udc->dev, "failed to start %s: %d\n",
+                       udc->driver->function, ret);
+       udc->driver = NULL;
+-      udc->dev.driver = NULL;
+       udc->gadget->dev.driver = NULL;
+       return ret;
+ }
+diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
+index b5aa60430a6a7..7a08053d95c21 100644
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -1150,17 +1150,6 @@ static int ocfs2_fill_super(struct super_block *sb, 
void *data, int silent)
+               goto read_super_error;
+       }
+ 
+-      root = d_make_root(inode);
+-      if (!root) {
+-              status = -ENOMEM;
+-              mlog_errno(status);
+-              goto read_super_error;
+-      }
+-
+-      sb->s_root = root;
+-
+-      ocfs2_complete_mount_recovery(osb);
+-
+       osb->osb_dev_kset = kset_create_and_add(sb->s_id, NULL,
+                                               &ocfs2_kset->kobj);
+       if (!osb->osb_dev_kset) {
+@@ -1178,6 +1167,17 @@ static int ocfs2_fill_super(struct super_block *sb, 
void *data, int silent)
+               goto read_super_error;
+       }
+ 
++      root = d_make_root(inode);
++      if (!root) {
++              status = -ENOMEM;
++              mlog_errno(status);
++              goto read_super_error;
++      }
++
++      sb->s_root = root;
++
++      ocfs2_complete_mount_recovery(osb);
++
+       if (ocfs2_mount_local(osb))
+               snprintf(nodestr, sizeof(nodestr), "local");
+       else
+diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
+index 5166eb40917da..011e391497f4e 100644
+--- a/fs/sysfs/file.c
++++ b/fs/sysfs/file.c
+@@ -572,8 +572,7 @@ int sysfs_emit(char *buf, const char *fmt, ...)
+       va_list args;
+       int len;
+ 
+-      if (WARN(!buf || offset_in_page(buf),
+-               "invalid sysfs_emit: buf:%p\n", buf))
++      if (WARN(!buf, "invalid sysfs_emit: buf:%p\n", buf))
+               return 0;
+ 
+       va_start(args, fmt);
+diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
+index 6366b04c7d5f4..0402668914147 100644
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -85,6 +85,13 @@
+                          ARM_SMCCC_SMC_32,                            \
+                          0, 0x7fff)
+ 
++#define ARM_SMCCC_ARCH_WORKAROUND_3                                   \
++      ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
++                         ARM_SMCCC_SMC_32,                            \
++                         0, 0x3fff)
++
++#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED  1
++
+ #ifndef __ASSEMBLY__
+ 
+ #include <linux/linkage.h>
+diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
+index e44746de95cdf..c697a05242735 100644
+--- a/include/linux/if_arp.h
++++ b/include/linux/if_arp.h
+@@ -55,6 +55,7 @@ static inline bool dev_is_mac_header_xmit(const struct 
net_device *dev)
+       case ARPHRD_VOID:
+       case ARPHRD_NONE:
+       case ARPHRD_RAWIP:
++      case ARPHRD_PIMREG:
+               return false;
+       default:
+               return true;
+diff --git a/include/linux/topology.h b/include/linux/topology.h
+index cb0775e1ee4bd..707364c90aa64 100644
+--- a/include/linux/topology.h
++++ b/include/linux/topology.h
+@@ -47,6 +47,7 @@ int arch_update_cpu_topology(void);
+ /* Conform to ACPI 2.0 SLIT distance definitions */
+ #define LOCAL_DISTANCE                10
+ #define REMOTE_DISTANCE               20
++#define DISTANCE_BITS           8
+ #ifndef node_distance
+ #define node_distance(from,to)        ((from) == (to) ? LOCAL_DISTANCE : 
REMOTE_DISTANCE)
+ #endif
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index fe8bed557691a..a8aa2bb74ad61 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -1763,14 +1763,15 @@ int km_migrate(const struct xfrm_selector *sel, u8 
dir, u8 type,
+              const struct xfrm_migrate *m, int num_bundles,
+              const struct xfrm_kmaddress *k,
+              const struct xfrm_encap_tmpl *encap);
+-struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net 
*net);
++struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net 
*net,
++                                              u32 if_id);
+ struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
+                                     struct xfrm_migrate *m,
+                                     struct xfrm_encap_tmpl *encap);
+ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+                struct xfrm_migrate *m, int num_bundles,
+                struct xfrm_kmaddress *k, struct net *net,
+-               struct xfrm_encap_tmpl *encap);
++               struct xfrm_encap_tmpl *encap, u32 if_id);
+ #endif
+ 
+ int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 
sport);
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index d43d25acc95ad..153628277a487 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -1528,9 +1528,13 @@ static void cpuset_attach(struct cgroup_taskset *tset)
+       cgroup_taskset_first(tset, &css);
+       cs = css_cs(css);
+ 
+-      cpus_read_lock();
+       mutex_lock(&cpuset_mutex);
+ 
++      /*
++       * It should hold cpus lock because a cpu offline event can
++       * cause set_cpus_allowed_ptr() failed.
++       */
++      get_online_cpus();
+       /* prepare for attach */
+       if (cs == &top_cpuset)
+               cpumask_copy(cpus_attach, cpu_possible_mask);
+@@ -1549,6 +1553,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
+               cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
+               cpuset_update_task_spread_flag(cs, task);
+       }
++       put_online_cpus();
+ 
+       /*
+        * Change mm for all threadgroup leaders. This is expensive and may
+@@ -1584,7 +1589,6 @@ static void cpuset_attach(struct cgroup_taskset *tset)
+               wake_up(&cpuset_attach_wq);
+ 
+       mutex_unlock(&cpuset_mutex);
+-      cpus_read_unlock();
+ }
+ 
+ /* The various types of files and directories in a cpuset file system */
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index f58efa5cc6474..02e85cd233d42 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -1322,66 +1322,58 @@ static void init_numa_topology_type(void)
+       }
+ }
+ 
++
++#define NR_DISTANCE_VALUES (1 << DISTANCE_BITS)
++
+ void sched_init_numa(void)
+ {
+-      int next_distance, curr_distance = node_distance(0, 0);
+       struct sched_domain_topology_level *tl;
+-      int level = 0;
+-      int i, j, k;
+-
+-      sched_domains_numa_distance = kzalloc(sizeof(int) * (nr_node_ids + 1), 
GFP_KERNEL);
+-      if (!sched_domains_numa_distance)
+-              return;
+-
+-      /* Includes NUMA identity node at level 0. */
+-      sched_domains_numa_distance[level++] = curr_distance;
+-      sched_domains_numa_levels = level;
++      unsigned long *distance_map;
++      int nr_levels = 0;
++      int i, j;
+ 
+       /*
+        * O(nr_nodes^2) deduplicating selection sort -- in order to find the
+        * unique distances in the node_distance() table.
+-       *
+-       * Assumes node_distance(0,j) includes all distances in
+-       * node_distance(i,j) in order to avoid cubic time.
+        */
+-      next_distance = curr_distance;
++      distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL);
++      if (!distance_map)
++              return;
++
++      bitmap_zero(distance_map, NR_DISTANCE_VALUES);
+       for (i = 0; i < nr_node_ids; i++) {
+               for (j = 0; j < nr_node_ids; j++) {
+-                      for (k = 0; k < nr_node_ids; k++) {
+-                              int distance = node_distance(i, k);
+-
+-                              if (distance > curr_distance &&
+-                                  (distance < next_distance ||
+-                                   next_distance == curr_distance))
+-                                      next_distance = distance;
+-
+-                              /*
+-                               * While not a strong assumption it would be 
nice to know
+-                               * about cases where if node A is connected to 
B, B is not
+-                               * equally connected to A.
+-                               */
+-                              if (sched_debug() && node_distance(k, i) != 
distance)
+-                                      sched_numa_warn("Node-distance not 
symmetric");
++                      int distance = node_distance(i, j);
+ 
+-                              if (sched_debug() && i && 
!find_numa_distance(distance))
+-                                      sched_numa_warn("Node-0 not 
representative");
++                      if (distance < LOCAL_DISTANCE || distance >= 
NR_DISTANCE_VALUES) {
++                              sched_numa_warn("Invalid distance value range");
++                              return;
+                       }
+-                      if (next_distance != curr_distance) {
+-                              sched_domains_numa_distance[level++] = 
next_distance;
+-                              sched_domains_numa_levels = level;
+-                              curr_distance = next_distance;
+-                      } else break;
++
++                      bitmap_set(distance_map, distance, 1);
+               }
++      }
++      /*
++       * We can now figure out how many unique distance values there are and
++       * allocate memory accordingly.
++       */
++      nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES);
+ 
+-              /*
+-               * In case of sched_debug() we verify the above assumption.
+-               */
+-              if (!sched_debug())
+-                      break;
++      sched_domains_numa_distance = kcalloc(nr_levels, sizeof(int), 
GFP_KERNEL);
++      if (!sched_domains_numa_distance) {
++              bitmap_free(distance_map);
++              return;
+       }
+ 
++      for (i = 0, j = 0; i < nr_levels; i++, j++) {
++              j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
++              sched_domains_numa_distance[i] = j;
++      }
++
++      bitmap_free(distance_map);
++
+       /*
+-       * 'level' contains the number of unique distances
++       * 'nr_levels' contains the number of unique distances
+        *
+        * The sched_domains_numa_distance[] array includes the actual distance
+        * numbers.
+@@ -1390,15 +1382,15 @@ void sched_init_numa(void)
+       /*
+        * Here, we should temporarily reset sched_domains_numa_levels to 0.
+        * If it fails to allocate memory for array 
sched_domains_numa_masks[][],
+-       * the array will contain less then 'level' members. This could be
++       * the array will contain less then 'nr_levels' members. This could be
+        * dangerous when we use it to iterate array 
sched_domains_numa_masks[][]
+        * in other functions.
+        *
+-       * We reset it to 'level' at the end of this function.
++       * We reset it to 'nr_levels' at the end of this function.
+        */
+       sched_domains_numa_levels = 0;
+ 
+-      sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
++      sched_domains_numa_masks = kzalloc(sizeof(void *) * nr_levels, 
GFP_KERNEL);
+       if (!sched_domains_numa_masks)
+               return;
+ 
+@@ -1406,7 +1398,7 @@ void sched_init_numa(void)
+        * Now for each level, construct a mask per node which contains all
+        * CPUs of nodes that are that many hops away from us.
+        */
+-      for (i = 0; i < level; i++) {
++      for (i = 0; i < nr_levels; i++) {
+               sched_domains_numa_masks[i] =
+                       kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
+               if (!sched_domains_numa_masks[i])
+@@ -1414,12 +1406,17 @@ void sched_init_numa(void)
+ 
+               for (j = 0; j < nr_node_ids; j++) {
+                       struct cpumask *mask = kzalloc(cpumask_size(), 
GFP_KERNEL);
++                      int k;
++
+                       if (!mask)
+                               return;
+ 
+                       sched_domains_numa_masks[i][j] = mask;
+ 
+                       for_each_node(k) {
++                              if (sched_debug() && (node_distance(j, k) != 
node_distance(k, j)))
++                                      sched_numa_warn("Node-distance not 
symmetric");
++
+                               if (node_distance(j, k) > 
sched_domains_numa_distance[i])
+                                       continue;
+ 
+@@ -1431,7 +1428,7 @@ void sched_init_numa(void)
+       /* Compute default topology size */
+       for (i = 0; sched_domain_topology[i].mask; i++);
+ 
+-      tl = kzalloc((i + level + 1) *
++      tl = kzalloc((i + nr_levels + 1) *
+                       sizeof(struct sched_domain_topology_level), GFP_KERNEL);
+       if (!tl)
+               return;
+@@ -1454,7 +1451,7 @@ void sched_init_numa(void)
+       /*
+        * .. and append 'j' levels of NUMA goodness.
+        */
+-      for (j = 1; j < level; i++, j++) {
++      for (j = 1; j < nr_levels; i++, j++) {
+               tl[i] = (struct sched_domain_topology_level){
+                       .mask = sd_numa_mask,
+                       .sd_flags = cpu_numa_flags,
+@@ -1466,8 +1463,8 @@ void sched_init_numa(void)
+ 
+       sched_domain_topology = tl;
+ 
+-      sched_domains_numa_levels = level;
+-      sched_max_numa_distance = sched_domains_numa_distance[level - 1];
++      sched_domains_numa_levels = nr_levels;
++      sched_max_numa_distance = sched_domains_numa_distance[nr_levels - 1];
+ 
+       init_numa_topology_type();
+ }
+diff --git a/lib/Kconfig b/lib/Kconfig
+index a3928d4438b50..714ec2f50bb10 100644
+--- a/lib/Kconfig
++++ b/lib/Kconfig
+@@ -16,7 +16,6 @@ config BITREVERSE
+ config HAVE_ARCH_BITREVERSE
+       bool
+       default n
+-      depends on BITREVERSE
+       help
+         This option enables the use of hardware bit-reversal instructions on
+         architectures which support such operations.
+diff --git a/mm/migrate.c b/mm/migrate.c
+index a69b842f95daf..76f8dedc0e02b 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -472,6 +472,10 @@ int migrate_page_move_mapping(struct address_space 
*mapping,
+ 
+       pslot = radix_tree_lookup_slot(&mapping->i_pages,
+                                       page_index(page));
++      if (pslot == NULL) {
++              xa_unlock_irq(&mapping->i_pages);
++              return -EAGAIN;
++      }
+ 
+       expected_count += hpage_nr_pages(page) + page_has_private(page);
+       if (page_count(page) != expected_count ||
+@@ -590,6 +594,10 @@ int migrate_huge_page_move_mapping(struct address_space 
*mapping,
+       xa_lock_irq(&mapping->i_pages);
+ 
+       pslot = radix_tree_lookup_slot(&mapping->i_pages, page_index(page));
++      if (pslot == NULL) {
++              xa_unlock_irq(&mapping->i_pages);
++              return -EAGAIN;
++      }
+ 
+       expected_count = 2 + page_has_private(page);
+       if (page_count(page) != expected_count ||
+diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
+index 7c10bc4dacd31..05aadb25e2949 100644
+--- a/net/dsa/dsa2.c
++++ b/net/dsa/dsa2.c
+@@ -596,6 +596,7 @@ static int dsa_port_parse_of(struct dsa_port *dp, struct 
device_node *dn)
+               struct net_device *master;
+ 
+               master = of_find_net_device_by_node(ethernet);
++              of_node_put(ethernet);
+               if (!master)
+                       return -EPROBE_DEFER;
+ 
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 4dce1b418acc2..f7795488b0adf 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1669,11 +1669,13 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t 
*desc,
+                               if (!copied)
+                                       copied = used;
+                               break;
+-                      } else if (used <= len) {
+-                              seq += used;
+-                              copied += used;
+-                              offset += used;
+                       }
++                      if (WARN_ON_ONCE(used > len))
++                              used = len;
++                      seq += used;
++                      copied += used;
++                      offset += used;
++
+                       /* If recv_actor drops the lock (e.g. TCP splice
+                        * receive) the skb pointer might be invalid when
+                        * getting here: tcp_collapse might have deleted it
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index c7d5a6015389b..388910cf09781 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -2633,7 +2633,7 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff 
*skb,
+       }
+ 
+       return xfrm_migrate(&sel, dir, XFRM_POLICY_TYPE_MAIN, m, i,
+-                          kma ? &k : NULL, net, NULL);
++                          kma ? &k : NULL, net, NULL, 0);
+ 
+  out:
+       return err;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index bd7e8d406c71e..d65051959f852 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2246,8 +2246,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct 
net_device *dev,
+                                       copy_skb = skb_get(skb);
+                                       skb_head = skb->data;
+                               }
+-                              if (copy_skb)
++                              if (copy_skb) {
++                                      memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 
0,
++                                             
sizeof(PACKET_SKB_CB(copy_skb)->sa.ll));
+                                       skb_set_owner_r(copy_skb, sk);
++                              }
+                       }
+                       snaplen = po->rx_ring.frame_size - macoff;
+                       if ((int)snaplen < 0) {
+@@ -3406,6 +3409,8 @@ static int packet_recvmsg(struct socket *sock, struct 
msghdr *msg, size_t len,
+       sock_recv_ts_and_drops(msg, sk, skb);
+ 
+       if (msg->msg_name) {
++              const size_t max_len = min(sizeof(skb->cb),
++                                         sizeof(struct sockaddr_storage));
+               int copy_len;
+ 
+               /* If the address length field is there to be filled
+@@ -3428,6 +3433,10 @@ static int packet_recvmsg(struct socket *sock, struct 
msghdr *msg, size_t len,
+                               msg->msg_namelen = sizeof(struct sockaddr_ll);
+                       }
+               }
++              if (WARN_ON_ONCE(copy_len > max_len)) {
++                      copy_len = max_len;
++                      msg->msg_namelen = copy_len;
++              }
+               memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
+       }
+ 
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index ebca069064dfd..3d52431dea9bf 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -164,6 +164,12 @@ static enum sctp_disposition __sctp_sf_do_9_1_abort(
+                                       void *arg,
+                                       struct sctp_cmd_seq *commands);
+ 
++static enum sctp_disposition
++__sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep,
++                         const struct sctp_association *asoc,
++                         const union sctp_subtype type, void *arg,
++                         struct sctp_cmd_seq *commands);
++
+ /* Small helper function that checks if the chunk length
+  * is of the appropriate length.  The 'required_length' argument
+  * is set to be the size of a specific chunk we are testing.
+@@ -345,6 +351,14 @@ enum sctp_disposition sctp_sf_do_5_1B_init(struct net 
*net,
+       if (!chunk->singleton)
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ 
++      /* Make sure that the INIT chunk has a valid length.
++       * Normally, this would cause an ABORT with a Protocol Violation
++       * error, but since we don't have an association, we'll
++       * just discard the packet.
++       */
++      if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
++              return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++
+       /* If the packet is an OOTB packet which is temporarily on the
+        * control endpoint, respond with an ABORT.
+        */
+@@ -359,14 +373,6 @@ enum sctp_disposition sctp_sf_do_5_1B_init(struct net 
*net,
+       if (chunk->sctp_hdr->vtag != 0)
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
+ 
+-      /* Make sure that the INIT chunk has a valid length.
+-       * Normally, this would cause an ABORT with a Protocol Violation
+-       * error, but since we don't have an association, we'll
+-       * just discard the packet.
+-       */
+-      if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
+-              return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+-
+       /* If the INIT is coming toward a closing socket, we'll send back
+        * and ABORT.  Essentially, this catches the race of INIT being
+        * backloged to the socket at the same time as the user isses close().
+@@ -1499,19 +1505,16 @@ static enum sctp_disposition 
sctp_sf_do_unexpected_init(
+       if (!chunk->singleton)
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ 
++      /* Make sure that the INIT chunk has a valid length. */
++      if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
++              return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++
+       /* 3.1 A packet containing an INIT chunk MUST have a zero Verification
+        * Tag.
+        */
+       if (chunk->sctp_hdr->vtag != 0)
+               return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
+ 
+-      /* Make sure that the INIT chunk has a valid length.
+-       * In this case, we generate a protocol violation since we have
+-       * an association established.
+-       */
+-      if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
+-              return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+-                                                commands);
+       /* Grab the INIT header.  */
+       chunk->subh.init_hdr = (struct sctp_inithdr *)chunk->skb->data;
+ 
+@@ -1829,9 +1832,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
+        * its peer.
+       */
+       if (sctp_state(asoc, SHUTDOWN_ACK_SENT)) {
+-              disposition = sctp_sf_do_9_2_reshutack(net, ep, asoc,
+-                              SCTP_ST_CHUNK(chunk->chunk_hdr->type),
+-                              chunk, commands);
++              disposition = __sctp_sf_do_9_2_reshutack(net, ep, asoc,
++                                                       
SCTP_ST_CHUNK(chunk->chunk_hdr->type),
++                                                       chunk, commands);
+               if (SCTP_DISPOSITION_NOMEM == disposition)
+                       goto nomem;
+ 
+@@ -2301,7 +2304,7 @@ enum sctp_disposition sctp_sf_shutdown_pending_abort(
+        */
+       if (SCTP_ADDR_DEL ==
+                   sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
+-              return sctp_sf_discard_chunk(net, ep, asoc, type, arg, 
commands);
++              return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ 
+       if (!sctp_err_chunk_valid(chunk))
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+@@ -2347,7 +2350,7 @@ enum sctp_disposition sctp_sf_shutdown_sent_abort(
+        */
+       if (SCTP_ADDR_DEL ==
+                   sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
+-              return sctp_sf_discard_chunk(net, ep, asoc, type, arg, 
commands);
++              return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ 
+       if (!sctp_err_chunk_valid(chunk))
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+@@ -2617,7 +2620,7 @@ enum sctp_disposition sctp_sf_do_9_1_abort(
+        */
+       if (SCTP_ADDR_DEL ==
+                   sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
+-              return sctp_sf_discard_chunk(net, ep, asoc, type, arg, 
commands);
++              return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ 
+       if (!sctp_err_chunk_valid(chunk))
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+@@ -2930,13 +2933,11 @@ enum sctp_disposition sctp_sf_do_9_2_shut_ctsn(
+  * that belong to this association, it should discard the INIT chunk and
+  * retransmit the SHUTDOWN ACK chunk.
+  */
+-enum sctp_disposition sctp_sf_do_9_2_reshutack(
+-                                      struct net *net,
+-                                      const struct sctp_endpoint *ep,
+-                                      const struct sctp_association *asoc,
+-                                      const union sctp_subtype type,
+-                                      void *arg,
+-                                      struct sctp_cmd_seq *commands)
++static enum sctp_disposition
++__sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep,
++                         const struct sctp_association *asoc,
++                         const union sctp_subtype type, void *arg,
++                         struct sctp_cmd_seq *commands)
+ {
+       struct sctp_chunk *chunk = arg;
+       struct sctp_chunk *reply;
+@@ -2970,6 +2971,26 @@ nomem:
+       return SCTP_DISPOSITION_NOMEM;
+ }
+ 
++enum sctp_disposition
++sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep,
++                       const struct sctp_association *asoc,
++                       const union sctp_subtype type, void *arg,
++                       struct sctp_cmd_seq *commands)
++{
++      struct sctp_chunk *chunk = arg;
++
++      if (!chunk->singleton)
++              return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++
++      if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_init_chunk)))
++              return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++
++      if (chunk->sctp_hdr->vtag != 0)
++              return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
++
++      return __sctp_sf_do_9_2_reshutack(net, ep, asoc, type, arg, commands);
++}
++
+ /*
+  * sctp_sf_do_ecn_cwr
+  *
+@@ -3766,6 +3787,11 @@ enum sctp_disposition sctp_sf_do_asconf(struct net *net,
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+       }
+ 
++      /* Make sure that the ASCONF ADDIP chunk has a valid length.  */
++      if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_addip_chunk)))
++              return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
++                                                commands);
++
+       /* ADD-IP: Section 4.1.1
+        * This chunk MUST be sent in an authenticated way by using
+        * the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk
+@@ -3773,13 +3799,7 @@ enum sctp_disposition sctp_sf_do_asconf(struct net *net,
+        * described in [I-D.ietf-tsvwg-sctp-auth].
+        */
+       if (!net->sctp.addip_noauth && !chunk->auth)
+-              return sctp_sf_discard_chunk(net, ep, asoc, type, arg,
+-                                           commands);
+-
+-      /* Make sure that the ASCONF ADDIP chunk has a valid length.  */
+-      if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_addip_chunk)))
+-              return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+-                                                commands);
++              return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ 
+       hdr = (struct sctp_addiphdr *)chunk->skb->data;
+       serial = ntohl(hdr->serial);
+@@ -3908,6 +3928,12 @@ enum sctp_disposition sctp_sf_do_asconf_ack(struct net 
*net,
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+       }
+ 
++      /* Make sure that the ADDIP chunk has a valid length.  */
++      if (!sctp_chunk_length_valid(asconf_ack,
++                                   sizeof(struct sctp_addip_chunk)))
++              return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
++                                                commands);
++
+       /* ADD-IP, Section 4.1.2:
+        * This chunk MUST be sent in an authenticated way by using
+        * the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk
+@@ -3915,14 +3941,7 @@ enum sctp_disposition sctp_sf_do_asconf_ack(struct net 
*net,
+        * described in [I-D.ietf-tsvwg-sctp-auth].
+        */
+       if (!net->sctp.addip_noauth && !asconf_ack->auth)
+-              return sctp_sf_discard_chunk(net, ep, asoc, type, arg,
+-                                           commands);
+-
+-      /* Make sure that the ADDIP chunk has a valid length.  */
+-      if (!sctp_chunk_length_valid(asconf_ack,
+-                                   sizeof(struct sctp_addip_chunk)))
+-              return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+-                                                commands);
++              return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ 
+       addip_hdr = (struct sctp_addiphdr *)asconf_ack->skb->data;
+       rcvd_serial = ntohl(addip_hdr->serial);
+@@ -4494,6 +4513,9 @@ enum sctp_disposition sctp_sf_discard_chunk(struct net 
*net,
+ {
+       struct sctp_chunk *chunk = arg;
+ 
++      if (asoc && !sctp_vtag_verify(chunk, asoc))
++              return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++
+       /* Make sure that the chunk has a valid length.
+        * Since we don't know the chunk type, we use a general
+        * chunkhdr structure to make a comparison.
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index c5806f46f6c95..2799ff117f5ad 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -15518,7 +15518,8 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
+       wdev->chandef = *chandef;
+       wdev->preset_chandef = *chandef;
+ 
+-      if (wdev->iftype == NL80211_IFTYPE_STATION &&
++      if ((wdev->iftype == NL80211_IFTYPE_STATION ||
++           wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) &&
+           !WARN_ON(!wdev->current_bss))
+               wdev->current_bss->pub.channel = chandef->chan;
+ 
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index e9aea82f370de..ab6d0c6576a6b 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -3050,7 +3050,7 @@ static bool xfrm_migrate_selector_match(const struct 
xfrm_selector *sel_cmp,
+ }
+ 
+ static struct xfrm_policy *xfrm_migrate_policy_find(const struct 
xfrm_selector *sel,
+-                                                  u8 dir, u8 type, struct net 
*net)
++                                                  u8 dir, u8 type, struct net 
*net, u32 if_id)
+ {
+       struct xfrm_policy *pol, *ret = NULL;
+       struct hlist_head *chain;
+@@ -3059,7 +3059,8 @@ static struct xfrm_policy 
*xfrm_migrate_policy_find(const struct xfrm_selector *
+       spin_lock_bh(&net->xfrm.xfrm_policy_lock);
+       chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, 
dir);
+       hlist_for_each_entry(pol, chain, bydst) {
+-              if (xfrm_migrate_selector_match(sel, &pol->selector) &&
++              if ((if_id == 0 || pol->if_id == if_id) &&
++                  xfrm_migrate_selector_match(sel, &pol->selector) &&
+                   pol->type == type) {
+                       ret = pol;
+                       priority = ret->priority;
+@@ -3071,7 +3072,8 @@ static struct xfrm_policy 
*xfrm_migrate_policy_find(const struct xfrm_selector *
+               if ((pol->priority >= priority) && ret)
+                       break;
+ 
+-              if (xfrm_migrate_selector_match(sel, &pol->selector) &&
++              if ((if_id == 0 || pol->if_id == if_id) &&
++                  xfrm_migrate_selector_match(sel, &pol->selector) &&
+                   pol->type == type) {
+                       ret = pol;
+                       break;
+@@ -3187,7 +3189,7 @@ static int xfrm_migrate_check(const struct xfrm_migrate 
*m, int num_migrate)
+ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+                struct xfrm_migrate *m, int num_migrate,
+                struct xfrm_kmaddress *k, struct net *net,
+-               struct xfrm_encap_tmpl *encap)
++               struct xfrm_encap_tmpl *encap, u32 if_id)
+ {
+       int i, err, nx_cur = 0, nx_new = 0;
+       struct xfrm_policy *pol = NULL;
+@@ -3206,14 +3208,14 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 
dir, u8 type,
+       }
+ 
+       /* Stage 1 - find policy */
+-      if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
++      if ((pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id)) == 
NULL) {
+               err = -ENOENT;
+               goto out;
+       }
+ 
+       /* Stage 2 - find and update state(s) */
+       for (i = 0, mp = m; i < num_migrate; i++, mp++) {
+-              if ((x = xfrm_migrate_state_find(mp, net))) {
++              if ((x = xfrm_migrate_state_find(mp, net, if_id))) {
+                       x_cur[nx_cur] = x;
+                       nx_cur++;
+                       xc = xfrm_state_migrate(x, mp, encap);
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index 44acc724122bb..cf147e1837a92 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -1442,9 +1442,6 @@ static struct xfrm_state *xfrm_state_clone(struct 
xfrm_state *orig,
+       memcpy(&x->mark, &orig->mark, sizeof(x->mark));
+       memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark));
+ 
+-      if (xfrm_init_state(x) < 0)
+-              goto error;
+-
+       x->props.flags = orig->props.flags;
+       x->props.extra_flags = orig->props.extra_flags;
+ 
+@@ -1466,7 +1463,8 @@ out:
+       return NULL;
+ }
+ 
+-struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net 
*net)
++struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net 
*net,
++                                              u32 if_id)
+ {
+       unsigned int h;
+       struct xfrm_state *x = NULL;
+@@ -1482,6 +1480,8 @@ struct xfrm_state *xfrm_migrate_state_find(struct 
xfrm_migrate *m, struct net *n
+                               continue;
+                       if (m->reqid && x->props.reqid != m->reqid)
+                               continue;
++                      if (if_id != 0 && x->if_id != if_id)
++                              continue;
+                       if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
+                                            m->old_family) ||
+                           !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
+@@ -1497,6 +1497,8 @@ struct xfrm_state *xfrm_migrate_state_find(struct 
xfrm_migrate *m, struct net *n
+                       if (x->props.mode != m->mode ||
+                           x->id.proto != m->proto)
+                               continue;
++                      if (if_id != 0 && x->if_id != if_id)
++                              continue;
+                       if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
+                                            m->old_family) ||
+                           !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
+@@ -1523,6 +1525,11 @@ struct xfrm_state *xfrm_state_migrate(struct xfrm_state 
*x,
+       if (!xc)
+               return NULL;
+ 
++      xc->props.family = m->new_family;
++
++      if (xfrm_init_state(xc) < 0)
++              goto error;
++
+       memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
+       memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
+ 
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 8d8f9e778cd4f..3db5cd70b16ac 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -620,13 +620,8 @@ static struct xfrm_state *xfrm_state_construct(struct net 
*net,
+ 
+       xfrm_smark_init(attrs, &x->props.smark);
+ 
+-      if (attrs[XFRMA_IF_ID]) {
++      if (attrs[XFRMA_IF_ID])
+               x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
+-              if (!x->if_id) {
+-                      err = -EINVAL;
+-                      goto error;
+-              }
+-      }
+ 
+       err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV]);
+       if (err)
+@@ -1332,13 +1327,8 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, 
struct nlmsghdr *nlh,
+ 
+       mark = xfrm_mark_get(attrs, &m);
+ 
+-      if (attrs[XFRMA_IF_ID]) {
++      if (attrs[XFRMA_IF_ID])
+               if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
+-              if (!if_id) {
+-                      err = -EINVAL;
+-                      goto out_noput;
+-              }
+-      }
+ 
+       if (p->info.seq) {
+               x = xfrm_find_acq_byseq(net, mark, p->info.seq);
+@@ -1640,13 +1630,8 @@ static struct xfrm_policy *xfrm_policy_construct(struct 
net *net, struct xfrm_us
+ 
+       xfrm_mark_get(attrs, &xp->mark);
+ 
+-      if (attrs[XFRMA_IF_ID]) {
++      if (attrs[XFRMA_IF_ID])
+               xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
+-              if (!xp->if_id) {
+-                      err = -EINVAL;
+-                      goto error;
+-              }
+-      }
+ 
+       return xp;
+  error:
+@@ -2384,6 +2369,7 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct 
nlmsghdr *nlh,
+       int n = 0;
+       struct net *net = sock_net(skb->sk);
+       struct xfrm_encap_tmpl  *encap = NULL;
++      u32 if_id = 0;
+ 
+       if (attrs[XFRMA_MIGRATE] == NULL)
+               return -EINVAL;
+@@ -2408,7 +2394,10 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct 
nlmsghdr *nlh,
+                       return 0;
+       }
+ 
+-      err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap);
++      if (attrs[XFRMA_IF_ID])
++              if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
++
++      err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap, 
if_id);
+ 
+       kfree(encap);
+ 
+diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
+index b6f8349abc672..6abd1dce10ffe 100644
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -217,7 +217,7 @@ void symbols__fixup_end(struct rb_root *symbols)
+               prev = curr;
+               curr = rb_entry(nd, struct symbol, rb_node);
+ 
+-              if (prev->end == prev->start && prev->end != curr->start)
++              if (prev->end == prev->start || prev->end != curr->start)
+                       arch__symbols__fixup_end(prev, curr);
+       }
+ 
+diff --git a/tools/testing/selftests/vm/userfaultfd.c 
b/tools/testing/selftests/vm/userfaultfd.c
+index 1963440f67251..b2c7043c0c30d 100644
+--- a/tools/testing/selftests/vm/userfaultfd.c
++++ b/tools/testing/selftests/vm/userfaultfd.c
+@@ -60,6 +60,7 @@
+ #include <signal.h>
+ #include <poll.h>
+ #include <string.h>
++#include <linux/mman.h>
+ #include <sys/mman.h>
+ #include <sys/syscall.h>
+ #include <sys/ioctl.h>
+diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
+index 34d08ee637471..a161951e55a54 100644
+--- a/virt/kvm/arm/psci.c
++++ b/virt/kvm/arm/psci.c
+@@ -429,6 +429,18 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
+                               break;
+                       }
+                       break;
++              case ARM_SMCCC_ARCH_WORKAROUND_3:
++                      switch (kvm_arm_get_spectre_bhb_state()) {
++                      case SPECTRE_VULNERABLE:
++                              break;
++                      case SPECTRE_MITIGATED:
++                              val = SMCCC_RET_SUCCESS;
++                              break;
++                      case SPECTRE_UNAFFECTED:
++                              val = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
++                              break;
++                      }
++                      break;
+               }
+               break;
+       default:

Reply via email to