2 * Contains CPU specific errata definitions
4 * Copyright (C) 2014 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/types.h>
21 #include <asm/cputype.h>
22 #include <asm/cpufeature.h>
24 static bool __maybe_unused
25 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
27 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
28 return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
29 entry->midr_range_min,
30 entry->midr_range_max);
33 static bool __maybe_unused
34 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
38 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
40 model = read_cpuid_id();
41 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
42 MIDR_ARCHITECTURE_MASK;
44 return model == entry->midr_model;
48 has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
51 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
52 return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
53 (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
56 static int cpu_enable_trap_ctr_access(void *__unused)
58 /* Clear SCTLR_EL1.UCT */
59 config_sctlr_el1(SCTLR_EL1_UCT, 0);
63 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
64 #include <asm/mmu_context.h>
65 #include <asm/cacheflush.h>
67 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
70 extern char __qcom_hyp_sanitize_link_stack_start[];
71 extern char __qcom_hyp_sanitize_link_stack_end[];
72 extern char __smccc_workaround_1_smc_start[];
73 extern char __smccc_workaround_1_smc_end[];
74 extern char __smccc_workaround_1_hvc_start[];
75 extern char __smccc_workaround_1_hvc_end[];
77 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
78 const char *hyp_vecs_end)
80 void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
83 for (i = 0; i < SZ_2K; i += 0x80)
84 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
86 flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
89 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
90 const char *hyp_vecs_start,
91 const char *hyp_vecs_end)
93 static int last_slot = -1;
94 static DEFINE_SPINLOCK(bp_lock);
98 for_each_possible_cpu(cpu) {
99 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
100 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
107 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
108 / SZ_2K) <= last_slot);
110 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
113 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
114 __this_cpu_write(bp_hardening_data.fn, fn);
115 spin_unlock(&bp_lock);
118 #define __qcom_hyp_sanitize_link_stack_start NULL
119 #define __qcom_hyp_sanitize_link_stack_end NULL
120 #define __smccc_workaround_1_smc_start NULL
121 #define __smccc_workaround_1_smc_end NULL
122 #define __smccc_workaround_1_hvc_start NULL
123 #define __smccc_workaround_1_hvc_end NULL
125 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
126 const char *hyp_vecs_start,
127 const char *hyp_vecs_end)
129 __this_cpu_write(bp_hardening_data.fn, fn);
131 #endif /* CONFIG_KVM */
133 static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
134 bp_hardening_cb_t fn,
135 const char *hyp_vecs_start,
136 const char *hyp_vecs_end)
140 if (!entry->matches(entry, SCOPE_LOCAL_CPU))
143 pfr0 = read_cpuid(ID_AA64PFR0_EL1);
144 if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
147 __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
150 #include <uapi/linux/psci.h>
151 #include <linux/arm-smccc.h>
152 #include <linux/psci.h>
154 static void call_smc_arch_workaround_1(void)
156 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
159 static void call_hvc_arch_workaround_1(void)
161 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
164 static int enable_smccc_arch_workaround_1(void *data)
166 const struct arm64_cpu_capabilities *entry = data;
167 bp_hardening_cb_t cb;
168 void *smccc_start, *smccc_end;
169 struct arm_smccc_res res;
171 if (!entry->matches(entry, SCOPE_LOCAL_CPU))
174 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
177 switch (psci_ops.conduit) {
178 case PSCI_CONDUIT_HVC:
179 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
180 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
183 cb = call_hvc_arch_workaround_1;
184 smccc_start = __smccc_workaround_1_hvc_start;
185 smccc_end = __smccc_workaround_1_hvc_end;
188 case PSCI_CONDUIT_SMC:
189 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
190 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
193 cb = call_smc_arch_workaround_1;
194 smccc_start = __smccc_workaround_1_smc_start;
195 smccc_end = __smccc_workaround_1_smc_end;
202 install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
207 static void qcom_link_stack_sanitization(void)
211 asm volatile("mov %0, x30 \n"
219 static int qcom_enable_link_stack_sanitization(void *data)
221 const struct arm64_cpu_capabilities *entry = data;
223 install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
224 __qcom_hyp_sanitize_link_stack_start,
225 __qcom_hyp_sanitize_link_stack_end);
229 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
231 #define MIDR_RANGE(model, min, max) \
232 .def_scope = SCOPE_LOCAL_CPU, \
233 .matches = is_affected_midr_range, \
234 .midr_model = model, \
235 .midr_range_min = min, \
236 .midr_range_max = max
238 #define MIDR_ALL_VERSIONS(model) \
239 .def_scope = SCOPE_LOCAL_CPU, \
240 .matches = is_affected_midr_range, \
241 .midr_model = model, \
242 .midr_range_min = 0, \
243 .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
245 const struct arm64_cpu_capabilities arm64_errata[] = {
246 #if defined(CONFIG_ARM64_ERRATUM_826319) || \
247 defined(CONFIG_ARM64_ERRATUM_827319) || \
248 defined(CONFIG_ARM64_ERRATUM_824069)
250 /* Cortex-A53 r0p[012] */
251 .desc = "ARM errata 826319, 827319, 824069",
252 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
253 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
254 .enable = cpu_enable_cache_maint_trap,
257 #ifdef CONFIG_ARM64_ERRATUM_819472
259 /* Cortex-A53 r0p[01] */
260 .desc = "ARM errata 819472",
261 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
262 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
263 .enable = cpu_enable_cache_maint_trap,
266 #ifdef CONFIG_ARM64_ERRATUM_832075
268 /* Cortex-A57 r0p0 - r1p2 */
269 .desc = "ARM erratum 832075",
270 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
271 MIDR_RANGE(MIDR_CORTEX_A57,
272 MIDR_CPU_VAR_REV(0, 0),
273 MIDR_CPU_VAR_REV(1, 2)),
276 #ifdef CONFIG_ARM64_ERRATUM_834220
278 /* Cortex-A57 r0p0 - r1p2 */
279 .desc = "ARM erratum 834220",
280 .capability = ARM64_WORKAROUND_834220,
281 MIDR_RANGE(MIDR_CORTEX_A57,
282 MIDR_CPU_VAR_REV(0, 0),
283 MIDR_CPU_VAR_REV(1, 2)),
286 #ifdef CONFIG_ARM64_ERRATUM_845719
288 /* Cortex-A53 r0p[01234] */
289 .desc = "ARM erratum 845719",
290 .capability = ARM64_WORKAROUND_845719,
291 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
294 #ifdef CONFIG_CAVIUM_ERRATUM_23154
296 /* Cavium ThunderX, pass 1.x */
297 .desc = "Cavium erratum 23154",
298 .capability = ARM64_WORKAROUND_CAVIUM_23154,
299 MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
302 #ifdef CONFIG_CAVIUM_ERRATUM_27456
304 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
305 .desc = "Cavium erratum 27456",
306 .capability = ARM64_WORKAROUND_CAVIUM_27456,
307 MIDR_RANGE(MIDR_THUNDERX,
308 MIDR_CPU_VAR_REV(0, 0),
309 MIDR_CPU_VAR_REV(1, 1)),
312 /* Cavium ThunderX, T81 pass 1.0 */
313 .desc = "Cavium erratum 27456",
314 .capability = ARM64_WORKAROUND_CAVIUM_27456,
315 MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
318 #ifdef CONFIG_CAVIUM_ERRATUM_30115
320 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
321 .desc = "Cavium erratum 30115",
322 .capability = ARM64_WORKAROUND_CAVIUM_30115,
323 MIDR_RANGE(MIDR_THUNDERX, 0x00,
324 (1 << MIDR_VARIANT_SHIFT) | 2),
327 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
328 .desc = "Cavium erratum 30115",
329 .capability = ARM64_WORKAROUND_CAVIUM_30115,
330 MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x02),
333 /* Cavium ThunderX, T83 pass 1.0 */
334 .desc = "Cavium erratum 30115",
335 .capability = ARM64_WORKAROUND_CAVIUM_30115,
336 MIDR_RANGE(MIDR_THUNDERX_83XX, 0x00, 0x00),
340 .desc = "Mismatched cache line size",
341 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
342 .matches = has_mismatched_cache_line_size,
343 .def_scope = SCOPE_LOCAL_CPU,
344 .enable = cpu_enable_trap_ctr_access,
346 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
348 .desc = "Qualcomm Technologies Falkor erratum 1003",
349 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
350 MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
351 MIDR_CPU_VAR_REV(0, 0),
352 MIDR_CPU_VAR_REV(0, 0)),
355 .desc = "Qualcomm Technologies Kryo erratum 1003",
356 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
357 .def_scope = SCOPE_LOCAL_CPU,
358 .midr_model = MIDR_QCOM_KRYO,
359 .matches = is_kryo_midr,
362 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
364 .desc = "Qualcomm Technologies Falkor erratum 1009",
365 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
366 MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
367 MIDR_CPU_VAR_REV(0, 0),
368 MIDR_CPU_VAR_REV(0, 0)),
371 #ifdef CONFIG_ARM64_ERRATUM_858921
373 /* Cortex-A73 all versions */
374 .desc = "ARM erratum 858921",
375 .capability = ARM64_WORKAROUND_858921,
376 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
379 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
381 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
382 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
383 .enable = enable_smccc_arch_workaround_1,
386 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
387 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
388 .enable = enable_smccc_arch_workaround_1,
391 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
392 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
393 .enable = enable_smccc_arch_workaround_1,
396 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
397 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
398 .enable = enable_smccc_arch_workaround_1,
401 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
402 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
403 .enable = qcom_enable_link_stack_sanitization,
406 .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
407 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
410 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
411 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
412 .enable = qcom_enable_link_stack_sanitization,
415 .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
416 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
419 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
420 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
421 .enable = enable_smccc_arch_workaround_1,
424 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
425 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
426 .enable = enable_smccc_arch_workaround_1,
434 * The CPU Errata work arounds are detected and applied at boot time
435 * and the related information is freed soon after. If the new CPU requires
436 * an errata not detected at boot, fail this CPU.
438 void verify_local_cpu_errata_workarounds(void)
440 const struct arm64_cpu_capabilities *caps = arm64_errata;
442 for (; caps->matches; caps++) {
443 if (cpus_have_cap(caps->capability)) {
445 caps->enable((void *)caps);
446 } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
447 pr_crit("CPU%d: Requires work around for %s, not detected"
450 caps->desc ? : "an erratum");
456 void update_cpu_errata_workarounds(void)
458 update_cpu_capabilities(arm64_errata, "enabling workaround for");
461 void __init enable_errata_workarounds(void)
463 enable_cpu_capabilities(arm64_errata);