#define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f)
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
@@ -105,7 +117,8 @@ static u32 get_alt_insn(struct alt_instr *alt,
__le32 *insnptr, __le32 *altinsnp
return insn;
}
-static void __apply_alternatives(void *alt_region, bool
use_linear_alias)
+static void __apply_alternatives(void *alt_region, bool
use_linear_alias,
+ unsigned long feature_mask)
{
struct alt_instr *alt;
struct alt_region *region = alt_region;
@@ -115,6 +128,9 @@ static void __apply_alternatives(void *alt_region,
bool use_linear_alias)
u32 insn;
int i, nr_inst;
+ if ((BIT(alt->cpufeature) & feature_mask) == 0)
+ continue;
+
if (!cpus_have_cap(alt->cpufeature))
continue;
@@ -138,6 +154,21 @@ static void __apply_alternatives(void
*alt_region, bool use_linear_alias)
}
/*
+ * This is called very early in the boot process (directly after we run
+ * a feature detect on the boot CPU). No need to worry about other CPUs
+ * here.
+ */
+void apply_alternatives_early(void)
+{
+ struct alt_region region = {
+ .begin = (struct alt_instr *)__alt_instructions,
+ .end = (struct alt_instr *)__alt_instructions_end,
+ };
+
+ __apply_alternatives(®ion, true, EARLY_APPLY_FEATURE_MASK);
+}
+
+/*
* We might be patching the stop_machine state machine, so implement a
* really simple polling protocol here.
*/
@@ -156,7 +187,9 @@ static int __apply_alternatives_multi_stop(void
*unused)
isb();
} else {
BUG_ON(patched);
- __apply_alternatives(®ion, true);
+
+ __apply_alternatives(®ion, true, ~EARLY_APPLY_FEATURE_MASK);
+
/* Barriers provided by the cache flushing */
WRITE_ONCE(patched, 1);
}
@@ -177,5 +210,5 @@ void apply_alternatives(void *start, size_t length)
.end = start + length,
};
- __apply_alternatives(®ion, false);
+ __apply_alternatives(®ion, false, -1);
}
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 551eb07..37361b5 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -453,6 +453,12 @@ void __init smp_prepare_boot_cpu(void)
* cpuinfo_store_boot_cpu() above.
*/
update_cpu_errata_workarounds();
+ /*
+ * We now know enough about the boot CPU to apply the
+ * alternatives that cannot wait until interrupt handling
+ * and/or scheduling is enabled.
+ */
+ apply_alternatives_early();
}
static u64 __init of_get_cpu_mpidr(struct device_node *dn)
--
1.9.1