1 // SPDX-License-Identifier: GPL-2.0-only
3 * Contains CPU specific errata definitions
5 * Copyright (C) 2014 ARM Ltd.
8 #include <linux/arm-smccc.h>
9 #include <linux/types.h>
10 #include <linux/cpu.h>
12 #include <asm/cputype.h>
13 #include <asm/cpufeature.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/smp_plat.h>
17 static bool __maybe_unused
18 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
20 const struct arm64_midr_revidr *fix;
21 u32 midr = read_cpuid_id(), revidr;
23 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
24 if (!is_midr_in_range(midr, &entry->midr_range))
27 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
28 revidr = read_cpuid(REVIDR_EL1);
29 for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
30 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
36 static bool __maybe_unused
37 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
40 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
41 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
44 static bool __maybe_unused
45 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
49 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
51 model = read_cpuid_id();
52 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
53 MIDR_ARCHITECTURE_MASK;
55 return model == entry->midr_range.model;
59 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
62 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
63 u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
64 u64 ctr_raw, ctr_real;
66 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
69 * We want to make sure that all the CPUs in the system expose
70 * a consistent CTR_EL0 to make sure that applications behaves
71 * correctly with migration.
73 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
75 * 1) It is safe if the system doesn't support IDC, as CPU anyway
76 * reports IDC = 0, consistent with the rest.
78 * 2) If the system has IDC, it is still safe as we trap CTR_EL0
79 * access on this CPU via the ARM64_HAS_CACHE_IDC capability.
81 * So, we need to make sure either the raw CTR_EL0 or the effective
82 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
84 ctr_raw = read_cpuid_cachetype() & mask;
85 ctr_real = read_cpuid_effective_cachetype() & mask;
87 return (ctr_real != sys) && (ctr_raw != sys);
91 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
93 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
94 bool enable_uct_trap = false;
96 /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
97 if ((read_cpuid_cachetype() & mask) !=
98 (arm64_ftr_reg_ctrel0.sys_val & mask))
99 enable_uct_trap = true;
101 /* ... or if the system is affected by an erratum */
102 if (cap->capability == ARM64_WORKAROUND_1542419)
103 enable_uct_trap = true;
106 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
109 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
111 #include <asm/mmu_context.h>
112 #include <asm/cacheflush.h>
114 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
116 #ifdef CONFIG_RANDOMIZE_BASE
117 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
118 const char *hyp_vecs_end)
120 void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
123 for (i = 0; i < SZ_2K; i += 0x80)
124 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
126 __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
129 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
131 static DEFINE_RAW_SPINLOCK(bp_lock);
133 const char *hyp_vecs_start = __smccc_workaround_1_smc;
134 const char *hyp_vecs_end = __smccc_workaround_1_smc +
135 __SMCCC_WORKAROUND_1_SMC_SZ;
138 * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
139 * we're a guest. Skip the hyp-vectors work.
141 if (!is_hyp_mode_available()) {
142 __this_cpu_write(bp_hardening_data.fn, fn);
146 raw_spin_lock(&bp_lock);
147 for_each_possible_cpu(cpu) {
148 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
149 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
155 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
156 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
157 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
160 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
161 __this_cpu_write(bp_hardening_data.fn, fn);
162 raw_spin_unlock(&bp_lock);
165 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
167 __this_cpu_write(bp_hardening_data.fn, fn);
169 #endif /* CONFIG_RANDOMIZE_BASE */
171 #include <linux/arm-smccc.h>
173 static void __maybe_unused call_smc_arch_workaround_1(void)
175 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
178 static void call_hvc_arch_workaround_1(void)
180 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
183 static void qcom_link_stack_sanitization(void)
187 asm volatile("mov %0, x30 \n"
195 static bool __nospectre_v2;
196 static int __init parse_nospectre_v2(char *str)
198 __nospectre_v2 = true;
201 early_param("nospectre_v2", parse_nospectre_v2);
205 * 0: No workaround required
206 * 1: Workaround installed
208 static int detect_harden_bp_fw(void)
210 bp_hardening_cb_t cb;
211 struct arm_smccc_res res;
212 u32 midr = read_cpuid_id();
214 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
215 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
217 switch ((int)res.a0) {
219 /* Firmware says we're just fine */
227 switch (arm_smccc_1_1_get_conduit()) {
228 case SMCCC_CONDUIT_HVC:
229 cb = call_hvc_arch_workaround_1;
232 case SMCCC_CONDUIT_SMC:
233 cb = call_smc_arch_workaround_1;
240 if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
241 ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
242 cb = qcom_link_stack_sanitization;
244 install_bp_hardening_cb(cb);
248 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
250 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
251 static bool __ssb_safe = true;
253 static const struct ssbd_options {
257 { "force-on", ARM64_SSBD_FORCE_ENABLE, },
258 { "force-off", ARM64_SSBD_FORCE_DISABLE, },
259 { "kernel", ARM64_SSBD_KERNEL, },
262 static int __init ssbd_cfg(char *buf)
269 for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
270 int len = strlen(ssbd_options[i].str);
272 if (strncmp(buf, ssbd_options[i].str, len))
275 ssbd_state = ssbd_options[i].state;
281 early_param("ssbd", ssbd_cfg);
283 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
284 __le32 *origptr, __le32 *updptr,
289 BUG_ON(nr_inst != 1);
291 switch (arm_smccc_1_1_get_conduit()) {
292 case SMCCC_CONDUIT_HVC:
293 insn = aarch64_insn_get_hvc_value();
295 case SMCCC_CONDUIT_SMC:
296 insn = aarch64_insn_get_smc_value();
302 *updptr = cpu_to_le32(insn);
305 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
306 __le32 *origptr, __le32 *updptr,
309 BUG_ON(nr_inst != 1);
311 * Only allow mitigation on EL1 entry/exit and guest
312 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
315 if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
316 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
319 void arm64_set_ssbd_mitigation(bool state)
323 if (this_cpu_has_cap(ARM64_SSBS)) {
325 asm volatile(SET_PSTATE_SSBS(0));
327 asm volatile(SET_PSTATE_SSBS(1));
331 conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, state,
334 WARN_ON_ONCE(conduit == SMCCC_CONDUIT_NONE);
337 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
340 struct arm_smccc_res res;
341 bool required = true;
343 bool this_cpu_safe = false;
346 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
348 if (cpu_mitigations_off())
349 ssbd_state = ARM64_SSBD_FORCE_DISABLE;
351 /* delay setting __ssb_safe until we get a firmware response */
352 if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
353 this_cpu_safe = true;
355 if (this_cpu_has_cap(ARM64_SSBS)) {
362 conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
363 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
365 if (conduit == SMCCC_CONDUIT_NONE) {
366 ssbd_state = ARM64_SSBD_UNKNOWN;
375 case SMCCC_RET_NOT_SUPPORTED:
376 ssbd_state = ARM64_SSBD_UNKNOWN;
381 /* machines with mixed mitigation requirements must not return this */
382 case SMCCC_RET_NOT_REQUIRED:
383 pr_info_once("%s mitigation not required\n", entry->desc);
384 ssbd_state = ARM64_SSBD_MITIGATED;
387 case SMCCC_RET_SUCCESS:
392 case 1: /* Mitigation not required on this CPU */
403 switch (ssbd_state) {
404 case ARM64_SSBD_FORCE_DISABLE:
405 arm64_set_ssbd_mitigation(false);
409 case ARM64_SSBD_KERNEL:
411 __this_cpu_write(arm64_ssbd_callback_required, 1);
412 arm64_set_ssbd_mitigation(true);
416 case ARM64_SSBD_FORCE_ENABLE:
417 arm64_set_ssbd_mitigation(true);
427 switch (ssbd_state) {
428 case ARM64_SSBD_FORCE_DISABLE:
429 pr_info_once("%s disabled from command-line\n", entry->desc);
432 case ARM64_SSBD_FORCE_ENABLE:
433 pr_info_once("%s forced from command-line\n", entry->desc);
440 static void cpu_enable_ssbd_mitigation(const struct arm64_cpu_capabilities *cap)
442 if (ssbd_state != ARM64_SSBD_FORCE_DISABLE)
443 cap->matches(cap, SCOPE_LOCAL_CPU);
446 /* known invulnerable cores */
447 static const struct midr_range arm64_ssb_cpus[] = {
448 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
449 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
450 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
451 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
452 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
453 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
457 #ifdef CONFIG_ARM64_ERRATUM_1463225
458 DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
461 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
464 return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
468 static void __maybe_unused
469 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
471 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
474 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
475 .matches = is_affected_midr_range, \
476 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
478 #define CAP_MIDR_ALL_VERSIONS(model) \
479 .matches = is_affected_midr_range, \
480 .midr_range = MIDR_ALL_VERSIONS(model)
482 #define MIDR_FIXED(rev, revidr_mask) \
483 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
485 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
486 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
487 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
489 #define CAP_MIDR_RANGE_LIST(list) \
490 .matches = is_affected_midr_range_list, \
491 .midr_range_list = list
493 /* Errata affecting a range of revisions of given model variant */
494 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
495 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
497 /* Errata affecting a single variant/revision of a model */
498 #define ERRATA_MIDR_REV(model, var, rev) \
499 ERRATA_MIDR_RANGE(model, var, rev, var, rev)
501 /* Errata affecting all variants/revisions of a given a model */
502 #define ERRATA_MIDR_ALL_VERSIONS(model) \
503 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
504 CAP_MIDR_ALL_VERSIONS(model)
506 /* Errata affecting a list of midr ranges, with same work around */
507 #define ERRATA_MIDR_RANGE_LIST(midr_list) \
508 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
509 CAP_MIDR_RANGE_LIST(midr_list)
511 /* Track overall mitigation state. We are only mitigated if all cores are ok */
512 static bool __hardenbp_enab = true;
513 static bool __spectrev2_safe = true;
515 int get_spectre_v2_workaround_state(void)
517 if (__spectrev2_safe)
518 return ARM64_BP_HARDEN_NOT_REQUIRED;
520 if (!__hardenbp_enab)
521 return ARM64_BP_HARDEN_UNKNOWN;
523 return ARM64_BP_HARDEN_WA_NEEDED;
527 * List of CPUs that do not need any Spectre-v2 mitigation at all.
529 static const struct midr_range spectre_v2_safe_list[] = {
530 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
531 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
532 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
533 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
534 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
535 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
536 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
541 * Track overall bp hardening for all heterogeneous cores in the machine.
542 * We are only considered "safe" if all booted cores are known safe.
544 static bool __maybe_unused
545 check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
549 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
551 /* If the CPU has CSV2 set, we're safe */
552 if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
553 ID_AA64PFR0_CSV2_SHIFT))
556 /* Alternatively, we have a list of unaffected CPUs */
557 if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
560 /* Fallback to firmware detection */
561 need_wa = detect_harden_bp_fw();
565 __spectrev2_safe = false;
568 if (__nospectre_v2 || cpu_mitigations_off()) {
569 pr_info_once("spectrev2 mitigation disabled by command line option\n");
570 __hardenbp_enab = false;
575 pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
576 __hardenbp_enab = false;
579 return (need_wa > 0);
583 cpu_enable_branch_predictor_hardening(const struct arm64_cpu_capabilities *cap)
585 cap->matches(cap, SCOPE_LOCAL_CPU);
588 static const __maybe_unused struct midr_range tx2_family_cpus[] = {
589 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
590 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
594 static bool __maybe_unused
595 needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
600 if (!is_affected_midr_range_list(entry, scope) ||
601 !is_hyp_mode_available())
604 for_each_possible_cpu(i) {
605 if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
612 static bool __maybe_unused
613 has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
616 u32 midr = read_cpuid_id();
617 bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
618 const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
620 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
621 return is_midr_in_range(midr, &range) && has_dic;
624 #ifdef CONFIG_RANDOMIZE_BASE
626 static const struct midr_range ca57_a72[] = {
627 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
628 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
634 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
635 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
636 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
638 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
641 .midr_range.model = MIDR_QCOM_KRYO,
642 .matches = is_kryo_midr,
645 #ifdef CONFIG_ARM64_ERRATUM_1286807
647 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
654 #ifdef CONFIG_CAVIUM_ERRATUM_27456
655 const struct midr_range cavium_erratum_27456_cpus[] = {
656 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
657 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
658 /* Cavium ThunderX, T81 pass 1.0 */
659 MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
664 #ifdef CONFIG_CAVIUM_ERRATUM_30115
665 static const struct midr_range cavium_erratum_30115_cpus[] = {
666 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
667 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
668 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
669 MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
670 /* Cavium ThunderX, T83 pass 1.0 */
671 MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
676 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
677 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
679 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
682 .midr_range.model = MIDR_QCOM_KRYO,
683 .matches = is_kryo_midr,
689 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
690 static const struct midr_range workaround_clean_cache[] = {
691 #if defined(CONFIG_ARM64_ERRATUM_826319) || \
692 defined(CONFIG_ARM64_ERRATUM_827319) || \
693 defined(CONFIG_ARM64_ERRATUM_824069)
694 /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
695 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
697 #ifdef CONFIG_ARM64_ERRATUM_819472
698 /* Cortex-A53 r0p[01] : ARM errata 819472 */
699 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
705 #ifdef CONFIG_ARM64_ERRATUM_1418040
707 * - 1188873 affects r0p0 to r2p0
708 * - 1418040 affects r0p0 to r3p1
710 static const struct midr_range erratum_1418040_list[] = {
711 /* Cortex-A76 r0p0 to r3p1 */
712 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
713 /* Neoverse-N1 r0p0 to r3p1 */
714 MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
715 /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
716 MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
721 #ifdef CONFIG_ARM64_ERRATUM_845719
722 static const struct midr_range erratum_845719_list[] = {
723 /* Cortex-A53 r0p[01234] */
724 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
725 /* Brahma-B53 r0p[0] */
726 MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
731 #ifdef CONFIG_ARM64_ERRATUM_843419
732 static const struct arm64_cpu_capabilities erratum_843419_list[] = {
734 /* Cortex-A53 r0p[01234] */
735 .matches = is_affected_midr_range,
736 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
737 MIDR_FIXED(0x4, BIT(8)),
740 /* Brahma-B53 r0p[0] */
741 .matches = is_affected_midr_range,
742 ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
748 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
749 static const struct midr_range erratum_speculative_at_list[] = {
750 #ifdef CONFIG_ARM64_ERRATUM_1165522
751 /* Cortex A76 r0p0 to r2p0 */
752 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
754 #ifdef CONFIG_ARM64_ERRATUM_1319367
755 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
756 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
758 #ifdef CONFIG_ARM64_ERRATUM_1530923
759 /* Cortex A55 r0p0 to r2p0 */
760 MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
761 /* Kryo4xx Silver (rdpe => r1p0) */
762 MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
768 #ifdef CONFIG_ARM64_ERRATUM_1463225
769 static const struct midr_range erratum_1463225[] = {
770 /* Cortex-A76 r0p0 - r3p1 */
771 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
772 /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
773 MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
778 const struct arm64_cpu_capabilities arm64_errata[] = {
779 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
781 .desc = "ARM errata 826319, 827319, 824069, or 819472",
782 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
783 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
784 .cpu_enable = cpu_enable_cache_maint_trap,
787 #ifdef CONFIG_ARM64_ERRATUM_832075
789 /* Cortex-A57 r0p0 - r1p2 */
790 .desc = "ARM erratum 832075",
791 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
792 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
797 #ifdef CONFIG_ARM64_ERRATUM_834220
799 /* Cortex-A57 r0p0 - r1p2 */
800 .desc = "ARM erratum 834220",
801 .capability = ARM64_WORKAROUND_834220,
802 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
807 #ifdef CONFIG_ARM64_ERRATUM_843419
809 .desc = "ARM erratum 843419",
810 .capability = ARM64_WORKAROUND_843419,
811 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
812 .matches = cpucap_multi_entry_cap_matches,
813 .match_list = erratum_843419_list,
816 #ifdef CONFIG_ARM64_ERRATUM_845719
818 .desc = "ARM erratum 845719",
819 .capability = ARM64_WORKAROUND_845719,
820 ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
823 #ifdef CONFIG_CAVIUM_ERRATUM_23154
825 /* Cavium ThunderX, pass 1.x */
826 .desc = "Cavium erratum 23154",
827 .capability = ARM64_WORKAROUND_CAVIUM_23154,
828 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
831 #ifdef CONFIG_CAVIUM_ERRATUM_27456
833 .desc = "Cavium erratum 27456",
834 .capability = ARM64_WORKAROUND_CAVIUM_27456,
835 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
838 #ifdef CONFIG_CAVIUM_ERRATUM_30115
840 .desc = "Cavium erratum 30115",
841 .capability = ARM64_WORKAROUND_CAVIUM_30115,
842 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
846 .desc = "Mismatched cache type (CTR_EL0)",
847 .capability = ARM64_MISMATCHED_CACHE_TYPE,
848 .matches = has_mismatched_cache_type,
849 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
850 .cpu_enable = cpu_enable_trap_ctr_access,
852 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
854 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
855 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
856 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
857 .matches = cpucap_multi_entry_cap_matches,
858 .match_list = qcom_erratum_1003_list,
861 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
863 .desc = "Qualcomm erratum 1009, or ARM erratum 1286807",
864 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
865 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
866 .matches = cpucap_multi_entry_cap_matches,
867 .match_list = arm64_repeat_tlbi_list,
870 #ifdef CONFIG_ARM64_ERRATUM_858921
872 /* Cortex-A73 all versions */
873 .desc = "ARM erratum 858921",
874 .capability = ARM64_WORKAROUND_858921,
875 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
879 .desc = "Branch predictor hardening",
880 .capability = ARM64_SPECTRE_V2,
881 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
882 .matches = check_branch_predictor,
883 .cpu_enable = cpu_enable_branch_predictor_hardening,
885 #ifdef CONFIG_RANDOMIZE_BASE
887 .desc = "EL2 vector hardening",
888 .capability = ARM64_HARDEN_EL2_VECTORS,
889 ERRATA_MIDR_RANGE_LIST(ca57_a72),
893 .desc = "Speculative Store Bypass Disable",
894 .capability = ARM64_SSBD,
895 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
896 .matches = has_ssbd_mitigation,
897 .cpu_enable = cpu_enable_ssbd_mitigation,
898 .midr_range_list = arm64_ssb_cpus,
900 #ifdef CONFIG_ARM64_ERRATUM_1418040
902 .desc = "ARM erratum 1418040",
903 .capability = ARM64_WORKAROUND_1418040,
904 ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
905 .type = (ARM64_CPUCAP_SCOPE_LOCAL_CPU |
906 ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU),
909 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
911 .desc = "ARM errata 1165522, 1319367, or 1530923",
912 .capability = ARM64_WORKAROUND_SPECULATIVE_AT,
913 ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
916 #ifdef CONFIG_ARM64_ERRATUM_1463225
918 .desc = "ARM erratum 1463225",
919 .capability = ARM64_WORKAROUND_1463225,
920 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
921 .matches = has_cortex_a76_erratum_1463225,
922 .midr_range_list = erratum_1463225,
925 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
927 .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
928 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
929 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
930 .matches = needs_tx2_tvm_workaround,
933 .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
934 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
935 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
938 #ifdef CONFIG_ARM64_ERRATUM_1542419
940 /* we depend on the firmware portion for correctness */
941 .desc = "ARM erratum 1542419 (kernel portion)",
942 .capability = ARM64_WORKAROUND_1542419,
943 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
944 .matches = has_neoverse_n1_erratum_1542419,
945 .cpu_enable = cpu_enable_trap_ctr_access,
952 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
955 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
958 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
961 switch (get_spectre_v2_workaround_state()) {
962 case ARM64_BP_HARDEN_NOT_REQUIRED:
963 return sprintf(buf, "Not affected\n");
964 case ARM64_BP_HARDEN_WA_NEEDED:
965 return sprintf(buf, "Mitigation: Branch predictor hardening\n");
966 case ARM64_BP_HARDEN_UNKNOWN:
968 return sprintf(buf, "Vulnerable\n");
972 ssize_t cpu_show_spec_store_bypass(struct device *dev,
973 struct device_attribute *attr, char *buf)
976 return sprintf(buf, "Not affected\n");
978 switch (ssbd_state) {
979 case ARM64_SSBD_KERNEL:
980 case ARM64_SSBD_FORCE_ENABLE:
981 return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
984 return sprintf(buf, "Vulnerable\n");