powerpc/perf/hv-24x7: Move cpumask file to top folder of hv-24x7 driver
[linux-2.6-microblaze.git] / arch / arm64 / kernel / cpu_errata.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Contains CPU specific errata definitions
4  *
5  * Copyright (C) 2014 ARM Ltd.
6  */
7
8 #include <linux/arm-smccc.h>
9 #include <linux/types.h>
10 #include <linux/cpu.h>
11 #include <asm/cpu.h>
12 #include <asm/cputype.h>
13 #include <asm/cpufeature.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/smp_plat.h>
16
17 static bool __maybe_unused
18 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
19 {
20         const struct arm64_midr_revidr *fix;
21         u32 midr = read_cpuid_id(), revidr;
22
23         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
24         if (!is_midr_in_range(midr, &entry->midr_range))
25                 return false;
26
27         midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
28         revidr = read_cpuid(REVIDR_EL1);
29         for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
30                 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
31                         return false;
32
33         return true;
34 }
35
36 static bool __maybe_unused
37 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
38                             int scope)
39 {
40         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
41         return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
42 }
43
44 static bool __maybe_unused
45 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
46 {
47         u32 model;
48
49         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
50
51         model = read_cpuid_id();
52         model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
53                  MIDR_ARCHITECTURE_MASK;
54
55         return model == entry->midr_range.model;
56 }
57
58 static bool
59 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
60                           int scope)
61 {
62         u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
63         u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
64         u64 ctr_raw, ctr_real;
65
66         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
67
68         /*
69          * We want to make sure that all the CPUs in the system expose
70          * a consistent CTR_EL0 to make sure that applications behaves
71          * correctly with migration.
72          *
73          * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
74          *
75          * 1) It is safe if the system doesn't support IDC, as CPU anyway
76          *    reports IDC = 0, consistent with the rest.
77          *
78          * 2) If the system has IDC, it is still safe as we trap CTR_EL0
79          *    access on this CPU via the ARM64_HAS_CACHE_IDC capability.
80          *
81          * So, we need to make sure either the raw CTR_EL0 or the effective
82          * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
83          */
84         ctr_raw = read_cpuid_cachetype() & mask;
85         ctr_real = read_cpuid_effective_cachetype() & mask;
86
87         return (ctr_real != sys) && (ctr_raw != sys);
88 }
89
90 static void
91 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
92 {
93         u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
94         bool enable_uct_trap = false;
95
96         /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
97         if ((read_cpuid_cachetype() & mask) !=
98             (arm64_ftr_reg_ctrel0.sys_val & mask))
99                 enable_uct_trap = true;
100
101         /* ... or if the system is affected by an erratum */
102         if (cap->capability == ARM64_WORKAROUND_1542419)
103                 enable_uct_trap = true;
104
105         if (enable_uct_trap)
106                 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
107 }
108
109 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
110
111 #include <asm/mmu_context.h>
112 #include <asm/cacheflush.h>
113
114 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
115
116 #ifdef CONFIG_KVM_INDIRECT_VECTORS
117 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
118                                 const char *hyp_vecs_end)
119 {
120         void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
121         int i;
122
123         for (i = 0; i < SZ_2K; i += 0x80)
124                 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
125
126         __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
127 }
128
129 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
130                                     const char *hyp_vecs_start,
131                                     const char *hyp_vecs_end)
132 {
133         static DEFINE_RAW_SPINLOCK(bp_lock);
134         int cpu, slot = -1;
135
136         /*
137          * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
138          * we're a guest. Skip the hyp-vectors work.
139          */
140         if (!hyp_vecs_start) {
141                 __this_cpu_write(bp_hardening_data.fn, fn);
142                 return;
143         }
144
145         raw_spin_lock(&bp_lock);
146         for_each_possible_cpu(cpu) {
147                 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
148                         slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
149                         break;
150                 }
151         }
152
153         if (slot == -1) {
154                 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
155                 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
156                 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
157         }
158
159         __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
160         __this_cpu_write(bp_hardening_data.fn, fn);
161         raw_spin_unlock(&bp_lock);
162 }
163 #else
164 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
165                                       const char *hyp_vecs_start,
166                                       const char *hyp_vecs_end)
167 {
168         __this_cpu_write(bp_hardening_data.fn, fn);
169 }
170 #endif  /* CONFIG_KVM_INDIRECT_VECTORS */
171
172 #include <linux/arm-smccc.h>
173
174 static void __maybe_unused call_smc_arch_workaround_1(void)
175 {
176         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
177 }
178
179 static void call_hvc_arch_workaround_1(void)
180 {
181         arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
182 }
183
184 static void qcom_link_stack_sanitization(void)
185 {
186         u64 tmp;
187
188         asm volatile("mov       %0, x30         \n"
189                      ".rept     16              \n"
190                      "bl        . + 4           \n"
191                      ".endr                     \n"
192                      "mov       x30, %0         \n"
193                      : "=&r" (tmp));
194 }
195
196 static bool __nospectre_v2;
197 static int __init parse_nospectre_v2(char *str)
198 {
199         __nospectre_v2 = true;
200         return 0;
201 }
202 early_param("nospectre_v2", parse_nospectre_v2);
203
204 /*
205  * -1: No workaround
206  *  0: No workaround required
207  *  1: Workaround installed
208  */
209 static int detect_harden_bp_fw(void)
210 {
211         bp_hardening_cb_t cb;
212         void *smccc_start, *smccc_end;
213         struct arm_smccc_res res;
214         u32 midr = read_cpuid_id();
215
216         arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
217                              ARM_SMCCC_ARCH_WORKAROUND_1, &res);
218
219         switch ((int)res.a0) {
220         case 1:
221                 /* Firmware says we're just fine */
222                 return 0;
223         case 0:
224                 break;
225         default:
226                 return -1;
227         }
228
229         switch (arm_smccc_1_1_get_conduit()) {
230         case SMCCC_CONDUIT_HVC:
231                 cb = call_hvc_arch_workaround_1;
232                 /* This is a guest, no need to patch KVM vectors */
233                 smccc_start = NULL;
234                 smccc_end = NULL;
235                 break;
236
237 #if IS_ENABLED(CONFIG_KVM)
238         case SMCCC_CONDUIT_SMC:
239                 cb = call_smc_arch_workaround_1;
240                 smccc_start = __smccc_workaround_1_smc;
241                 smccc_end = __smccc_workaround_1_smc +
242                         __SMCCC_WORKAROUND_1_SMC_SZ;
243                 break;
244 #endif
245
246         default:
247                 return -1;
248         }
249
250         if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
251             ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
252                 cb = qcom_link_stack_sanitization;
253
254         if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
255                 install_bp_hardening_cb(cb, smccc_start, smccc_end);
256
257         return 1;
258 }
259
260 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
261
262 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
263 static bool __ssb_safe = true;
264
265 static const struct ssbd_options {
266         const char      *str;
267         int             state;
268 } ssbd_options[] = {
269         { "force-on",   ARM64_SSBD_FORCE_ENABLE, },
270         { "force-off",  ARM64_SSBD_FORCE_DISABLE, },
271         { "kernel",     ARM64_SSBD_KERNEL, },
272 };
273
274 static int __init ssbd_cfg(char *buf)
275 {
276         int i;
277
278         if (!buf || !buf[0])
279                 return -EINVAL;
280
281         for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
282                 int len = strlen(ssbd_options[i].str);
283
284                 if (strncmp(buf, ssbd_options[i].str, len))
285                         continue;
286
287                 ssbd_state = ssbd_options[i].state;
288                 return 0;
289         }
290
291         return -EINVAL;
292 }
293 early_param("ssbd", ssbd_cfg);
294
295 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
296                                        __le32 *origptr, __le32 *updptr,
297                                        int nr_inst)
298 {
299         u32 insn;
300
301         BUG_ON(nr_inst != 1);
302
303         switch (arm_smccc_1_1_get_conduit()) {
304         case SMCCC_CONDUIT_HVC:
305                 insn = aarch64_insn_get_hvc_value();
306                 break;
307         case SMCCC_CONDUIT_SMC:
308                 insn = aarch64_insn_get_smc_value();
309                 break;
310         default:
311                 return;
312         }
313
314         *updptr = cpu_to_le32(insn);
315 }
316
317 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
318                                       __le32 *origptr, __le32 *updptr,
319                                       int nr_inst)
320 {
321         BUG_ON(nr_inst != 1);
322         /*
323          * Only allow mitigation on EL1 entry/exit and guest
324          * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
325          * be flipped.
326          */
327         if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
328                 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
329 }
330
331 void arm64_set_ssbd_mitigation(bool state)
332 {
333         int conduit;
334
335         if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
336                 pr_info_once("SSBD disabled by kernel configuration\n");
337                 return;
338         }
339
340         if (this_cpu_has_cap(ARM64_SSBS)) {
341                 if (state)
342                         asm volatile(SET_PSTATE_SSBS(0));
343                 else
344                         asm volatile(SET_PSTATE_SSBS(1));
345                 return;
346         }
347
348         conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, state,
349                                        NULL);
350
351         WARN_ON_ONCE(conduit == SMCCC_CONDUIT_NONE);
352 }
353
354 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
355                                     int scope)
356 {
357         struct arm_smccc_res res;
358         bool required = true;
359         s32 val;
360         bool this_cpu_safe = false;
361         int conduit;
362
363         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
364
365         if (cpu_mitigations_off())
366                 ssbd_state = ARM64_SSBD_FORCE_DISABLE;
367
368         /* delay setting __ssb_safe until we get a firmware response */
369         if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
370                 this_cpu_safe = true;
371
372         if (this_cpu_has_cap(ARM64_SSBS)) {
373                 if (!this_cpu_safe)
374                         __ssb_safe = false;
375                 required = false;
376                 goto out_printmsg;
377         }
378
379         conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
380                                        ARM_SMCCC_ARCH_WORKAROUND_2, &res);
381
382         if (conduit == SMCCC_CONDUIT_NONE) {
383                 ssbd_state = ARM64_SSBD_UNKNOWN;
384                 if (!this_cpu_safe)
385                         __ssb_safe = false;
386                 return false;
387         }
388
389         val = (s32)res.a0;
390
391         switch (val) {
392         case SMCCC_RET_NOT_SUPPORTED:
393                 ssbd_state = ARM64_SSBD_UNKNOWN;
394                 if (!this_cpu_safe)
395                         __ssb_safe = false;
396                 return false;
397
398         /* machines with mixed mitigation requirements must not return this */
399         case SMCCC_RET_NOT_REQUIRED:
400                 pr_info_once("%s mitigation not required\n", entry->desc);
401                 ssbd_state = ARM64_SSBD_MITIGATED;
402                 return false;
403
404         case SMCCC_RET_SUCCESS:
405                 __ssb_safe = false;
406                 required = true;
407                 break;
408
409         case 1: /* Mitigation not required on this CPU */
410                 required = false;
411                 break;
412
413         default:
414                 WARN_ON(1);
415                 if (!this_cpu_safe)
416                         __ssb_safe = false;
417                 return false;
418         }
419
420         switch (ssbd_state) {
421         case ARM64_SSBD_FORCE_DISABLE:
422                 arm64_set_ssbd_mitigation(false);
423                 required = false;
424                 break;
425
426         case ARM64_SSBD_KERNEL:
427                 if (required) {
428                         __this_cpu_write(arm64_ssbd_callback_required, 1);
429                         arm64_set_ssbd_mitigation(true);
430                 }
431                 break;
432
433         case ARM64_SSBD_FORCE_ENABLE:
434                 arm64_set_ssbd_mitigation(true);
435                 required = true;
436                 break;
437
438         default:
439                 WARN_ON(1);
440                 break;
441         }
442
443 out_printmsg:
444         switch (ssbd_state) {
445         case ARM64_SSBD_FORCE_DISABLE:
446                 pr_info_once("%s disabled from command-line\n", entry->desc);
447                 break;
448
449         case ARM64_SSBD_FORCE_ENABLE:
450                 pr_info_once("%s forced from command-line\n", entry->desc);
451                 break;
452         }
453
454         return required;
455 }
456
457 /* known invulnerable cores */
458 static const struct midr_range arm64_ssb_cpus[] = {
459         MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
460         MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
461         MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
462         MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
463         MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
464         MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
465         {},
466 };
467
468 #ifdef CONFIG_ARM64_ERRATUM_1463225
469 DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
470
471 static bool
472 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
473                                int scope)
474 {
475         return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
476 }
477 #endif
478
479 static void __maybe_unused
480 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
481 {
482         sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
483 }
484
485 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)       \
486         .matches = is_affected_midr_range,                      \
487         .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
488
489 #define CAP_MIDR_ALL_VERSIONS(model)                                    \
490         .matches = is_affected_midr_range,                              \
491         .midr_range = MIDR_ALL_VERSIONS(model)
492
493 #define MIDR_FIXED(rev, revidr_mask) \
494         .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
495
496 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)            \
497         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                         \
498         CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
499
500 #define CAP_MIDR_RANGE_LIST(list)                               \
501         .matches = is_affected_midr_range_list,                 \
502         .midr_range_list = list
503
504 /* Errata affecting a range of revisions of  given model variant */
505 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)      \
506         ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
507
508 /* Errata affecting a single variant/revision of a model */
509 #define ERRATA_MIDR_REV(model, var, rev)        \
510         ERRATA_MIDR_RANGE(model, var, rev, var, rev)
511
512 /* Errata affecting all variants/revisions of a given a model */
513 #define ERRATA_MIDR_ALL_VERSIONS(model)                         \
514         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
515         CAP_MIDR_ALL_VERSIONS(model)
516
517 /* Errata affecting a list of midr ranges, with same work around */
518 #define ERRATA_MIDR_RANGE_LIST(midr_list)                       \
519         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
520         CAP_MIDR_RANGE_LIST(midr_list)
521
522 /* Track overall mitigation state. We are only mitigated if all cores are ok */
523 static bool __hardenbp_enab = true;
524 static bool __spectrev2_safe = true;
525
526 int get_spectre_v2_workaround_state(void)
527 {
528         if (__spectrev2_safe)
529                 return ARM64_BP_HARDEN_NOT_REQUIRED;
530
531         if (!__hardenbp_enab)
532                 return ARM64_BP_HARDEN_UNKNOWN;
533
534         return ARM64_BP_HARDEN_WA_NEEDED;
535 }
536
537 /*
538  * List of CPUs that do not need any Spectre-v2 mitigation at all.
539  */
540 static const struct midr_range spectre_v2_safe_list[] = {
541         MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
542         MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
543         MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
544         MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
545         MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
546         MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
547         MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
548         { /* sentinel */ }
549 };
550
551 /*
552  * Track overall bp hardening for all heterogeneous cores in the machine.
553  * We are only considered "safe" if all booted cores are known safe.
554  */
555 static bool __maybe_unused
556 check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
557 {
558         int need_wa;
559
560         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
561
562         /* If the CPU has CSV2 set, we're safe */
563         if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
564                                                  ID_AA64PFR0_CSV2_SHIFT))
565                 return false;
566
567         /* Alternatively, we have a list of unaffected CPUs */
568         if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
569                 return false;
570
571         /* Fallback to firmware detection */
572         need_wa = detect_harden_bp_fw();
573         if (!need_wa)
574                 return false;
575
576         __spectrev2_safe = false;
577
578         if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
579                 pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
580                 __hardenbp_enab = false;
581                 return false;
582         }
583
584         /* forced off */
585         if (__nospectre_v2 || cpu_mitigations_off()) {
586                 pr_info_once("spectrev2 mitigation disabled by command line option\n");
587                 __hardenbp_enab = false;
588                 return false;
589         }
590
591         if (need_wa < 0) {
592                 pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
593                 __hardenbp_enab = false;
594         }
595
596         return (need_wa > 0);
597 }
598
599 static const __maybe_unused struct midr_range tx2_family_cpus[] = {
600         MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
601         MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
602         {},
603 };
604
605 static bool __maybe_unused
606 needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
607                          int scope)
608 {
609         int i;
610
611         if (!is_affected_midr_range_list(entry, scope) ||
612             !is_hyp_mode_available())
613                 return false;
614
615         for_each_possible_cpu(i) {
616                 if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
617                         return true;
618         }
619
620         return false;
621 }
622
623 static bool __maybe_unused
624 has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
625                                 int scope)
626 {
627         u32 midr = read_cpuid_id();
628         bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
629         const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
630
631         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
632         return is_midr_in_range(midr, &range) && has_dic;
633 }
634
635 #ifdef CONFIG_RANDOMIZE_BASE
636
637 static const struct midr_range ca57_a72[] = {
638         MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
639         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
640         {},
641 };
642
643 #endif
644
645 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
646 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
647 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
648         {
649                 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
650         },
651         {
652                 .midr_range.model = MIDR_QCOM_KRYO,
653                 .matches = is_kryo_midr,
654         },
655 #endif
656 #ifdef CONFIG_ARM64_ERRATUM_1286807
657         {
658                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
659         },
660 #endif
661         {},
662 };
663 #endif
664
665 #ifdef CONFIG_CAVIUM_ERRATUM_27456
666 const struct midr_range cavium_erratum_27456_cpus[] = {
667         /* Cavium ThunderX, T88 pass 1.x - 2.1 */
668         MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
669         /* Cavium ThunderX, T81 pass 1.0 */
670         MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
671         {},
672 };
673 #endif
674
675 #ifdef CONFIG_CAVIUM_ERRATUM_30115
676 static const struct midr_range cavium_erratum_30115_cpus[] = {
677         /* Cavium ThunderX, T88 pass 1.x - 2.2 */
678         MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
679         /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
680         MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
681         /* Cavium ThunderX, T83 pass 1.0 */
682         MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
683         {},
684 };
685 #endif
686
687 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
688 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
689         {
690                 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
691         },
692         {
693                 .midr_range.model = MIDR_QCOM_KRYO,
694                 .matches = is_kryo_midr,
695         },
696         {},
697 };
698 #endif
699
700 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
701 static const struct midr_range workaround_clean_cache[] = {
702 #if     defined(CONFIG_ARM64_ERRATUM_826319) || \
703         defined(CONFIG_ARM64_ERRATUM_827319) || \
704         defined(CONFIG_ARM64_ERRATUM_824069)
705         /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
706         MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
707 #endif
708 #ifdef  CONFIG_ARM64_ERRATUM_819472
709         /* Cortex-A53 r0p[01] : ARM errata 819472 */
710         MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
711 #endif
712         {},
713 };
714 #endif
715
716 #ifdef CONFIG_ARM64_ERRATUM_1418040
717 /*
718  * - 1188873 affects r0p0 to r2p0
719  * - 1418040 affects r0p0 to r3p1
720  */
721 static const struct midr_range erratum_1418040_list[] = {
722         /* Cortex-A76 r0p0 to r3p1 */
723         MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
724         /* Neoverse-N1 r0p0 to r3p1 */
725         MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
726         /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
727         MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
728         {},
729 };
730 #endif
731
732 #ifdef CONFIG_ARM64_ERRATUM_845719
733 static const struct midr_range erratum_845719_list[] = {
734         /* Cortex-A53 r0p[01234] */
735         MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
736         /* Brahma-B53 r0p[0] */
737         MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
738         {},
739 };
740 #endif
741
742 #ifdef CONFIG_ARM64_ERRATUM_843419
743 static const struct arm64_cpu_capabilities erratum_843419_list[] = {
744         {
745                 /* Cortex-A53 r0p[01234] */
746                 .matches = is_affected_midr_range,
747                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
748                 MIDR_FIXED(0x4, BIT(8)),
749         },
750         {
751                 /* Brahma-B53 r0p[0] */
752                 .matches = is_affected_midr_range,
753                 ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
754         },
755         {},
756 };
757 #endif
758
759 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
760 static const struct midr_range erratum_speculative_at_list[] = {
761 #ifdef CONFIG_ARM64_ERRATUM_1165522
762         /* Cortex A76 r0p0 to r2p0 */
763         MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
764 #endif
765 #ifdef CONFIG_ARM64_ERRATUM_1319367
766         MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
767         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
768 #endif
769 #ifdef CONFIG_ARM64_ERRATUM_1530923
770         /* Cortex A55 r0p0 to r2p0 */
771         MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
772         /* Kryo4xx Silver (rdpe => r1p0) */
773         MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
774 #endif
775         {},
776 };
777 #endif
778
779 #ifdef CONFIG_ARM64_ERRATUM_1463225
780 static const struct midr_range erratum_1463225[] = {
781         /* Cortex-A76 r0p0 - r3p1 */
782         MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
783         /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
784         MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
785         {},
786 };
787 #endif
788
789 const struct arm64_cpu_capabilities arm64_errata[] = {
790 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
791         {
792                 .desc = "ARM errata 826319, 827319, 824069, or 819472",
793                 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
794                 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
795                 .cpu_enable = cpu_enable_cache_maint_trap,
796         },
797 #endif
798 #ifdef CONFIG_ARM64_ERRATUM_832075
799         {
800         /* Cortex-A57 r0p0 - r1p2 */
801                 .desc = "ARM erratum 832075",
802                 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
803                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
804                                   0, 0,
805                                   1, 2),
806         },
807 #endif
808 #ifdef CONFIG_ARM64_ERRATUM_834220
809         {
810         /* Cortex-A57 r0p0 - r1p2 */
811                 .desc = "ARM erratum 834220",
812                 .capability = ARM64_WORKAROUND_834220,
813                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
814                                   0, 0,
815                                   1, 2),
816         },
817 #endif
818 #ifdef CONFIG_ARM64_ERRATUM_843419
819         {
820                 .desc = "ARM erratum 843419",
821                 .capability = ARM64_WORKAROUND_843419,
822                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
823                 .matches = cpucap_multi_entry_cap_matches,
824                 .match_list = erratum_843419_list,
825         },
826 #endif
827 #ifdef CONFIG_ARM64_ERRATUM_845719
828         {
829                 .desc = "ARM erratum 845719",
830                 .capability = ARM64_WORKAROUND_845719,
831                 ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
832         },
833 #endif
834 #ifdef CONFIG_CAVIUM_ERRATUM_23154
835         {
836         /* Cavium ThunderX, pass 1.x */
837                 .desc = "Cavium erratum 23154",
838                 .capability = ARM64_WORKAROUND_CAVIUM_23154,
839                 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
840         },
841 #endif
842 #ifdef CONFIG_CAVIUM_ERRATUM_27456
843         {
844                 .desc = "Cavium erratum 27456",
845                 .capability = ARM64_WORKAROUND_CAVIUM_27456,
846                 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
847         },
848 #endif
849 #ifdef CONFIG_CAVIUM_ERRATUM_30115
850         {
851                 .desc = "Cavium erratum 30115",
852                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
853                 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
854         },
855 #endif
856         {
857                 .desc = "Mismatched cache type (CTR_EL0)",
858                 .capability = ARM64_MISMATCHED_CACHE_TYPE,
859                 .matches = has_mismatched_cache_type,
860                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
861                 .cpu_enable = cpu_enable_trap_ctr_access,
862         },
863 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
864         {
865                 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
866                 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
867                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
868                 .matches = cpucap_multi_entry_cap_matches,
869                 .match_list = qcom_erratum_1003_list,
870         },
871 #endif
872 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
873         {
874                 .desc = "Qualcomm erratum 1009, or ARM erratum 1286807",
875                 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
876                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
877                 .matches = cpucap_multi_entry_cap_matches,
878                 .match_list = arm64_repeat_tlbi_list,
879         },
880 #endif
881 #ifdef CONFIG_ARM64_ERRATUM_858921
882         {
883         /* Cortex-A73 all versions */
884                 .desc = "ARM erratum 858921",
885                 .capability = ARM64_WORKAROUND_858921,
886                 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
887         },
888 #endif
889         {
890                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
891                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
892                 .matches = check_branch_predictor,
893         },
894 #ifdef CONFIG_RANDOMIZE_BASE
895         {
896                 .desc = "EL2 vector hardening",
897                 .capability = ARM64_HARDEN_EL2_VECTORS,
898                 ERRATA_MIDR_RANGE_LIST(ca57_a72),
899         },
900 #endif
901         {
902                 .desc = "Speculative Store Bypass Disable",
903                 .capability = ARM64_SSBD,
904                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
905                 .matches = has_ssbd_mitigation,
906                 .midr_range_list = arm64_ssb_cpus,
907         },
908 #ifdef CONFIG_ARM64_ERRATUM_1418040
909         {
910                 .desc = "ARM erratum 1418040",
911                 .capability = ARM64_WORKAROUND_1418040,
912                 ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
913         },
914 #endif
915 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
916         {
917                 .desc = "ARM errata 1165522, 1319367, or 1530923",
918                 .capability = ARM64_WORKAROUND_SPECULATIVE_AT,
919                 ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
920         },
921 #endif
922 #ifdef CONFIG_ARM64_ERRATUM_1463225
923         {
924                 .desc = "ARM erratum 1463225",
925                 .capability = ARM64_WORKAROUND_1463225,
926                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
927                 .matches = has_cortex_a76_erratum_1463225,
928                 .midr_range_list = erratum_1463225,
929         },
930 #endif
931 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
932         {
933                 .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
934                 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
935                 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
936                 .matches = needs_tx2_tvm_workaround,
937         },
938         {
939                 .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
940                 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
941                 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
942         },
943 #endif
944 #ifdef CONFIG_ARM64_ERRATUM_1542419
945         {
946                 /* we depend on the firmware portion for correctness */
947                 .desc = "ARM erratum 1542419 (kernel portion)",
948                 .capability = ARM64_WORKAROUND_1542419,
949                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
950                 .matches = has_neoverse_n1_erratum_1542419,
951                 .cpu_enable = cpu_enable_trap_ctr_access,
952         },
953 #endif
954         {
955         }
956 };
957
958 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
959                             char *buf)
960 {
961         return sprintf(buf, "Mitigation: __user pointer sanitization\n");
962 }
963
964 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
965                 char *buf)
966 {
967         switch (get_spectre_v2_workaround_state()) {
968         case ARM64_BP_HARDEN_NOT_REQUIRED:
969                 return sprintf(buf, "Not affected\n");
970         case ARM64_BP_HARDEN_WA_NEEDED:
971                 return sprintf(buf, "Mitigation: Branch predictor hardening\n");
972         case ARM64_BP_HARDEN_UNKNOWN:
973         default:
974                 return sprintf(buf, "Vulnerable\n");
975         }
976 }
977
978 ssize_t cpu_show_spec_store_bypass(struct device *dev,
979                 struct device_attribute *attr, char *buf)
980 {
981         if (__ssb_safe)
982                 return sprintf(buf, "Not affected\n");
983
984         switch (ssbd_state) {
985         case ARM64_SSBD_KERNEL:
986         case ARM64_SSBD_FORCE_ENABLE:
987                 if (IS_ENABLED(CONFIG_ARM64_SSBD))
988                         return sprintf(buf,
989                             "Mitigation: Speculative Store Bypass disabled via prctl\n");
990         }
991
992         return sprintf(buf, "Vulnerable\n");
993 }