Merge tag 'dma-mapping-5.7' of git://git.infradead.org/users/hch/dma-mapping
[linux-2.6-microblaze.git] / arch / arm64 / kernel / cpu_errata.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Contains CPU specific errata definitions
4  *
5  * Copyright (C) 2014 ARM Ltd.
6  */
7
8 #include <linux/arm-smccc.h>
9 #include <linux/types.h>
10 #include <linux/cpu.h>
11 #include <asm/cpu.h>
12 #include <asm/cputype.h>
13 #include <asm/cpufeature.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/smp_plat.h>
16
17 static bool __maybe_unused
18 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
19 {
20         const struct arm64_midr_revidr *fix;
21         u32 midr = read_cpuid_id(), revidr;
22
23         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
24         if (!is_midr_in_range(midr, &entry->midr_range))
25                 return false;
26
27         midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
28         revidr = read_cpuid(REVIDR_EL1);
29         for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
30                 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
31                         return false;
32
33         return true;
34 }
35
36 static bool __maybe_unused
37 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
38                             int scope)
39 {
40         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
41         return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
42 }
43
44 static bool __maybe_unused
45 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
46 {
47         u32 model;
48
49         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
50
51         model = read_cpuid_id();
52         model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
53                  MIDR_ARCHITECTURE_MASK;
54
55         return model == entry->midr_range.model;
56 }
57
58 static bool
59 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
60                           int scope)
61 {
62         u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
63         u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
64         u64 ctr_raw, ctr_real;
65
66         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
67
68         /*
69          * We want to make sure that all the CPUs in the system expose
70          * a consistent CTR_EL0 to make sure that applications behaves
71          * correctly with migration.
72          *
73          * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
74          *
75          * 1) It is safe if the system doesn't support IDC, as CPU anyway
76          *    reports IDC = 0, consistent with the rest.
77          *
78          * 2) If the system has IDC, it is still safe as we trap CTR_EL0
79          *    access on this CPU via the ARM64_HAS_CACHE_IDC capability.
80          *
81          * So, we need to make sure either the raw CTR_EL0 or the effective
82          * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
83          */
84         ctr_raw = read_cpuid_cachetype() & mask;
85         ctr_real = read_cpuid_effective_cachetype() & mask;
86
87         return (ctr_real != sys) && (ctr_raw != sys);
88 }
89
90 static void
91 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
92 {
93         u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
94         bool enable_uct_trap = false;
95
96         /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
97         if ((read_cpuid_cachetype() & mask) !=
98             (arm64_ftr_reg_ctrel0.sys_val & mask))
99                 enable_uct_trap = true;
100
101         /* ... or if the system is affected by an erratum */
102         if (cap->capability == ARM64_WORKAROUND_1542419)
103                 enable_uct_trap = true;
104
105         if (enable_uct_trap)
106                 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
107 }
108
109 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
110
111 #include <asm/mmu_context.h>
112 #include <asm/cacheflush.h>
113
114 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
115
116 #ifdef CONFIG_KVM_INDIRECT_VECTORS
117 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
118                                 const char *hyp_vecs_end)
119 {
120         void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
121         int i;
122
123         for (i = 0; i < SZ_2K; i += 0x80)
124                 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
125
126         __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
127 }
128
129 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
130                                     const char *hyp_vecs_start,
131                                     const char *hyp_vecs_end)
132 {
133         static DEFINE_RAW_SPINLOCK(bp_lock);
134         int cpu, slot = -1;
135
136         /*
137          * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
138          * we're a guest. Skip the hyp-vectors work.
139          */
140         if (!hyp_vecs_start) {
141                 __this_cpu_write(bp_hardening_data.fn, fn);
142                 return;
143         }
144
145         raw_spin_lock(&bp_lock);
146         for_each_possible_cpu(cpu) {
147                 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
148                         slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
149                         break;
150                 }
151         }
152
153         if (slot == -1) {
154                 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
155                 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
156                 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
157         }
158
159         __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
160         __this_cpu_write(bp_hardening_data.fn, fn);
161         raw_spin_unlock(&bp_lock);
162 }
163 #else
164 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
165                                       const char *hyp_vecs_start,
166                                       const char *hyp_vecs_end)
167 {
168         __this_cpu_write(bp_hardening_data.fn, fn);
169 }
170 #endif  /* CONFIG_KVM_INDIRECT_VECTORS */
171
172 #include <linux/arm-smccc.h>
173
174 static void __maybe_unused call_smc_arch_workaround_1(void)
175 {
176         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
177 }
178
179 static void call_hvc_arch_workaround_1(void)
180 {
181         arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
182 }
183
184 static void qcom_link_stack_sanitization(void)
185 {
186         u64 tmp;
187
188         asm volatile("mov       %0, x30         \n"
189                      ".rept     16              \n"
190                      "bl        . + 4           \n"
191                      ".endr                     \n"
192                      "mov       x30, %0         \n"
193                      : "=&r" (tmp));
194 }
195
196 static bool __nospectre_v2;
197 static int __init parse_nospectre_v2(char *str)
198 {
199         __nospectre_v2 = true;
200         return 0;
201 }
202 early_param("nospectre_v2", parse_nospectre_v2);
203
204 /*
205  * -1: No workaround
206  *  0: No workaround required
207  *  1: Workaround installed
208  */
209 static int detect_harden_bp_fw(void)
210 {
211         bp_hardening_cb_t cb;
212         void *smccc_start, *smccc_end;
213         struct arm_smccc_res res;
214         u32 midr = read_cpuid_id();
215
216         arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
217                              ARM_SMCCC_ARCH_WORKAROUND_1, &res);
218
219         switch ((int)res.a0) {
220         case 1:
221                 /* Firmware says we're just fine */
222                 return 0;
223         case 0:
224                 break;
225         default:
226                 return -1;
227         }
228
229         switch (arm_smccc_1_1_get_conduit()) {
230         case SMCCC_CONDUIT_HVC:
231                 cb = call_hvc_arch_workaround_1;
232                 /* This is a guest, no need to patch KVM vectors */
233                 smccc_start = NULL;
234                 smccc_end = NULL;
235                 break;
236
237 #if IS_ENABLED(CONFIG_KVM_ARM_HOST)
238         case SMCCC_CONDUIT_SMC:
239                 cb = call_smc_arch_workaround_1;
240                 smccc_start = __smccc_workaround_1_smc;
241                 smccc_end = __smccc_workaround_1_smc +
242                         __SMCCC_WORKAROUND_1_SMC_SZ;
243                 break;
244 #endif
245
246         default:
247                 return -1;
248         }
249
250         if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
251             ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
252                 cb = qcom_link_stack_sanitization;
253
254         if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
255                 install_bp_hardening_cb(cb, smccc_start, smccc_end);
256
257         return 1;
258 }
259
260 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
261
262 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
263 static bool __ssb_safe = true;
264
265 static const struct ssbd_options {
266         const char      *str;
267         int             state;
268 } ssbd_options[] = {
269         { "force-on",   ARM64_SSBD_FORCE_ENABLE, },
270         { "force-off",  ARM64_SSBD_FORCE_DISABLE, },
271         { "kernel",     ARM64_SSBD_KERNEL, },
272 };
273
274 static int __init ssbd_cfg(char *buf)
275 {
276         int i;
277
278         if (!buf || !buf[0])
279                 return -EINVAL;
280
281         for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
282                 int len = strlen(ssbd_options[i].str);
283
284                 if (strncmp(buf, ssbd_options[i].str, len))
285                         continue;
286
287                 ssbd_state = ssbd_options[i].state;
288                 return 0;
289         }
290
291         return -EINVAL;
292 }
293 early_param("ssbd", ssbd_cfg);
294
295 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
296                                        __le32 *origptr, __le32 *updptr,
297                                        int nr_inst)
298 {
299         u32 insn;
300
301         BUG_ON(nr_inst != 1);
302
303         switch (arm_smccc_1_1_get_conduit()) {
304         case SMCCC_CONDUIT_HVC:
305                 insn = aarch64_insn_get_hvc_value();
306                 break;
307         case SMCCC_CONDUIT_SMC:
308                 insn = aarch64_insn_get_smc_value();
309                 break;
310         default:
311                 return;
312         }
313
314         *updptr = cpu_to_le32(insn);
315 }
316
317 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
318                                       __le32 *origptr, __le32 *updptr,
319                                       int nr_inst)
320 {
321         BUG_ON(nr_inst != 1);
322         /*
323          * Only allow mitigation on EL1 entry/exit and guest
324          * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
325          * be flipped.
326          */
327         if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
328                 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
329 }
330
331 void arm64_set_ssbd_mitigation(bool state)
332 {
333         int conduit;
334
335         if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
336                 pr_info_once("SSBD disabled by kernel configuration\n");
337                 return;
338         }
339
340         if (this_cpu_has_cap(ARM64_SSBS)) {
341                 if (state)
342                         asm volatile(SET_PSTATE_SSBS(0));
343                 else
344                         asm volatile(SET_PSTATE_SSBS(1));
345                 return;
346         }
347
348         conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, state,
349                                        NULL);
350
351         WARN_ON_ONCE(conduit == SMCCC_CONDUIT_NONE);
352 }
353
354 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
355                                     int scope)
356 {
357         struct arm_smccc_res res;
358         bool required = true;
359         s32 val;
360         bool this_cpu_safe = false;
361         int conduit;
362
363         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
364
365         if (cpu_mitigations_off())
366                 ssbd_state = ARM64_SSBD_FORCE_DISABLE;
367
368         /* delay setting __ssb_safe until we get a firmware response */
369         if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
370                 this_cpu_safe = true;
371
372         if (this_cpu_has_cap(ARM64_SSBS)) {
373                 if (!this_cpu_safe)
374                         __ssb_safe = false;
375                 required = false;
376                 goto out_printmsg;
377         }
378
379         conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
380                                        ARM_SMCCC_ARCH_WORKAROUND_2, &res);
381
382         if (conduit == SMCCC_CONDUIT_NONE) {
383                 ssbd_state = ARM64_SSBD_UNKNOWN;
384                 if (!this_cpu_safe)
385                         __ssb_safe = false;
386                 return false;
387         }
388
389         val = (s32)res.a0;
390
391         switch (val) {
392         case SMCCC_RET_NOT_SUPPORTED:
393                 ssbd_state = ARM64_SSBD_UNKNOWN;
394                 if (!this_cpu_safe)
395                         __ssb_safe = false;
396                 return false;
397
398         /* machines with mixed mitigation requirements must not return this */
399         case SMCCC_RET_NOT_REQUIRED:
400                 pr_info_once("%s mitigation not required\n", entry->desc);
401                 ssbd_state = ARM64_SSBD_MITIGATED;
402                 return false;
403
404         case SMCCC_RET_SUCCESS:
405                 __ssb_safe = false;
406                 required = true;
407                 break;
408
409         case 1: /* Mitigation not required on this CPU */
410                 required = false;
411                 break;
412
413         default:
414                 WARN_ON(1);
415                 if (!this_cpu_safe)
416                         __ssb_safe = false;
417                 return false;
418         }
419
420         switch (ssbd_state) {
421         case ARM64_SSBD_FORCE_DISABLE:
422                 arm64_set_ssbd_mitigation(false);
423                 required = false;
424                 break;
425
426         case ARM64_SSBD_KERNEL:
427                 if (required) {
428                         __this_cpu_write(arm64_ssbd_callback_required, 1);
429                         arm64_set_ssbd_mitigation(true);
430                 }
431                 break;
432
433         case ARM64_SSBD_FORCE_ENABLE:
434                 arm64_set_ssbd_mitigation(true);
435                 required = true;
436                 break;
437
438         default:
439                 WARN_ON(1);
440                 break;
441         }
442
443 out_printmsg:
444         switch (ssbd_state) {
445         case ARM64_SSBD_FORCE_DISABLE:
446                 pr_info_once("%s disabled from command-line\n", entry->desc);
447                 break;
448
449         case ARM64_SSBD_FORCE_ENABLE:
450                 pr_info_once("%s forced from command-line\n", entry->desc);
451                 break;
452         }
453
454         return required;
455 }
456
457 /* known invulnerable cores */
458 static const struct midr_range arm64_ssb_cpus[] = {
459         MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
460         MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
461         MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
462         MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
463         {},
464 };
465
466 #ifdef CONFIG_ARM64_ERRATUM_1463225
467 DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
468
469 static bool
470 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
471                                int scope)
472 {
473         u32 midr = read_cpuid_id();
474         /* Cortex-A76 r0p0 - r3p1 */
475         struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
476
477         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
478         return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
479 }
480 #endif
481
482 static void __maybe_unused
483 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
484 {
485         sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
486 }
487
488 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)       \
489         .matches = is_affected_midr_range,                      \
490         .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
491
492 #define CAP_MIDR_ALL_VERSIONS(model)                                    \
493         .matches = is_affected_midr_range,                              \
494         .midr_range = MIDR_ALL_VERSIONS(model)
495
496 #define MIDR_FIXED(rev, revidr_mask) \
497         .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
498
499 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)            \
500         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                         \
501         CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
502
503 #define CAP_MIDR_RANGE_LIST(list)                               \
504         .matches = is_affected_midr_range_list,                 \
505         .midr_range_list = list
506
507 /* Errata affecting a range of revisions of  given model variant */
508 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)      \
509         ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
510
511 /* Errata affecting a single variant/revision of a model */
512 #define ERRATA_MIDR_REV(model, var, rev)        \
513         ERRATA_MIDR_RANGE(model, var, rev, var, rev)
514
515 /* Errata affecting all variants/revisions of a given a model */
516 #define ERRATA_MIDR_ALL_VERSIONS(model)                         \
517         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
518         CAP_MIDR_ALL_VERSIONS(model)
519
520 /* Errata affecting a list of midr ranges, with same work around */
521 #define ERRATA_MIDR_RANGE_LIST(midr_list)                       \
522         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
523         CAP_MIDR_RANGE_LIST(midr_list)
524
525 /* Track overall mitigation state. We are only mitigated if all cores are ok */
526 static bool __hardenbp_enab = true;
527 static bool __spectrev2_safe = true;
528
529 int get_spectre_v2_workaround_state(void)
530 {
531         if (__spectrev2_safe)
532                 return ARM64_BP_HARDEN_NOT_REQUIRED;
533
534         if (!__hardenbp_enab)
535                 return ARM64_BP_HARDEN_UNKNOWN;
536
537         return ARM64_BP_HARDEN_WA_NEEDED;
538 }
539
540 /*
541  * List of CPUs that do not need any Spectre-v2 mitigation at all.
542  */
543 static const struct midr_range spectre_v2_safe_list[] = {
544         MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
545         MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
546         MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
547         MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
548         MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
549         MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
550         MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
551         { /* sentinel */ }
552 };
553
554 /*
555  * Track overall bp hardening for all heterogeneous cores in the machine.
556  * We are only considered "safe" if all booted cores are known safe.
557  */
558 static bool __maybe_unused
559 check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
560 {
561         int need_wa;
562
563         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
564
565         /* If the CPU has CSV2 set, we're safe */
566         if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
567                                                  ID_AA64PFR0_CSV2_SHIFT))
568                 return false;
569
570         /* Alternatively, we have a list of unaffected CPUs */
571         if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
572                 return false;
573
574         /* Fallback to firmware detection */
575         need_wa = detect_harden_bp_fw();
576         if (!need_wa)
577                 return false;
578
579         __spectrev2_safe = false;
580
581         if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
582                 pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
583                 __hardenbp_enab = false;
584                 return false;
585         }
586
587         /* forced off */
588         if (__nospectre_v2 || cpu_mitigations_off()) {
589                 pr_info_once("spectrev2 mitigation disabled by command line option\n");
590                 __hardenbp_enab = false;
591                 return false;
592         }
593
594         if (need_wa < 0) {
595                 pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
596                 __hardenbp_enab = false;
597         }
598
599         return (need_wa > 0);
600 }
601
602 static const __maybe_unused struct midr_range tx2_family_cpus[] = {
603         MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
604         MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
605         {},
606 };
607
608 static bool __maybe_unused
609 needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
610                          int scope)
611 {
612         int i;
613
614         if (!is_affected_midr_range_list(entry, scope) ||
615             !is_hyp_mode_available())
616                 return false;
617
618         for_each_possible_cpu(i) {
619                 if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
620                         return true;
621         }
622
623         return false;
624 }
625
626 static bool __maybe_unused
627 has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
628                                 int scope)
629 {
630         u32 midr = read_cpuid_id();
631         bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
632         const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
633
634         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
635         return is_midr_in_range(midr, &range) && has_dic;
636 }
637
638 #if defined(CONFIG_HARDEN_EL2_VECTORS) || defined(CONFIG_ARM64_ERRATUM_1319367)
639
640 static const struct midr_range ca57_a72[] = {
641         MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
642         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
643         {},
644 };
645
646 #endif
647
648 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
649 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
650 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
651         {
652                 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
653         },
654         {
655                 .midr_range.model = MIDR_QCOM_KRYO,
656                 .matches = is_kryo_midr,
657         },
658 #endif
659 #ifdef CONFIG_ARM64_ERRATUM_1286807
660         {
661                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
662         },
663 #endif
664         {},
665 };
666 #endif
667
668 #ifdef CONFIG_CAVIUM_ERRATUM_27456
669 const struct midr_range cavium_erratum_27456_cpus[] = {
670         /* Cavium ThunderX, T88 pass 1.x - 2.1 */
671         MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
672         /* Cavium ThunderX, T81 pass 1.0 */
673         MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
674         {},
675 };
676 #endif
677
678 #ifdef CONFIG_CAVIUM_ERRATUM_30115
679 static const struct midr_range cavium_erratum_30115_cpus[] = {
680         /* Cavium ThunderX, T88 pass 1.x - 2.2 */
681         MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
682         /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
683         MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
684         /* Cavium ThunderX, T83 pass 1.0 */
685         MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
686         {},
687 };
688 #endif
689
690 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
691 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
692         {
693                 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
694         },
695         {
696                 .midr_range.model = MIDR_QCOM_KRYO,
697                 .matches = is_kryo_midr,
698         },
699         {},
700 };
701 #endif
702
703 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
704 static const struct midr_range workaround_clean_cache[] = {
705 #if     defined(CONFIG_ARM64_ERRATUM_826319) || \
706         defined(CONFIG_ARM64_ERRATUM_827319) || \
707         defined(CONFIG_ARM64_ERRATUM_824069)
708         /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
709         MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
710 #endif
711 #ifdef  CONFIG_ARM64_ERRATUM_819472
712         /* Cortex-A53 r0p[01] : ARM errata 819472 */
713         MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
714 #endif
715         {},
716 };
717 #endif
718
719 #ifdef CONFIG_ARM64_ERRATUM_1418040
720 /*
721  * - 1188873 affects r0p0 to r2p0
722  * - 1418040 affects r0p0 to r3p1
723  */
724 static const struct midr_range erratum_1418040_list[] = {
725         /* Cortex-A76 r0p0 to r3p1 */
726         MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
727         /* Neoverse-N1 r0p0 to r3p1 */
728         MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
729         {},
730 };
731 #endif
732
733 #ifdef CONFIG_ARM64_ERRATUM_845719
734 static const struct midr_range erratum_845719_list[] = {
735         /* Cortex-A53 r0p[01234] */
736         MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
737         /* Brahma-B53 r0p[0] */
738         MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
739         {},
740 };
741 #endif
742
743 #ifdef CONFIG_ARM64_ERRATUM_843419
744 static const struct arm64_cpu_capabilities erratum_843419_list[] = {
745         {
746                 /* Cortex-A53 r0p[01234] */
747                 .matches = is_affected_midr_range,
748                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
749                 MIDR_FIXED(0x4, BIT(8)),
750         },
751         {
752                 /* Brahma-B53 r0p[0] */
753                 .matches = is_affected_midr_range,
754                 ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
755         },
756         {},
757 };
758 #endif
759
760 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT_VHE
761 static const struct midr_range erratum_speculative_at_vhe_list[] = {
762 #ifdef CONFIG_ARM64_ERRATUM_1165522
763         /* Cortex A76 r0p0 to r2p0 */
764         MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
765 #endif
766 #ifdef CONFIG_ARM64_ERRATUM_1530923
767         /* Cortex A55 r0p0 to r2p0 */
768         MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
769 #endif
770         {},
771 };
772 #endif
773
774 const struct arm64_cpu_capabilities arm64_errata[] = {
775 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
776         {
777                 .desc = "ARM errata 826319, 827319, 824069, 819472",
778                 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
779                 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
780                 .cpu_enable = cpu_enable_cache_maint_trap,
781         },
782 #endif
783 #ifdef CONFIG_ARM64_ERRATUM_832075
784         {
785         /* Cortex-A57 r0p0 - r1p2 */
786                 .desc = "ARM erratum 832075",
787                 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
788                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
789                                   0, 0,
790                                   1, 2),
791         },
792 #endif
793 #ifdef CONFIG_ARM64_ERRATUM_834220
794         {
795         /* Cortex-A57 r0p0 - r1p2 */
796                 .desc = "ARM erratum 834220",
797                 .capability = ARM64_WORKAROUND_834220,
798                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
799                                   0, 0,
800                                   1, 2),
801         },
802 #endif
803 #ifdef CONFIG_ARM64_ERRATUM_843419
804         {
805                 .desc = "ARM erratum 843419",
806                 .capability = ARM64_WORKAROUND_843419,
807                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
808                 .matches = cpucap_multi_entry_cap_matches,
809                 .match_list = erratum_843419_list,
810         },
811 #endif
812 #ifdef CONFIG_ARM64_ERRATUM_845719
813         {
814                 .desc = "ARM erratum 845719",
815                 .capability = ARM64_WORKAROUND_845719,
816                 ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
817         },
818 #endif
819 #ifdef CONFIG_CAVIUM_ERRATUM_23154
820         {
821         /* Cavium ThunderX, pass 1.x */
822                 .desc = "Cavium erratum 23154",
823                 .capability = ARM64_WORKAROUND_CAVIUM_23154,
824                 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
825         },
826 #endif
827 #ifdef CONFIG_CAVIUM_ERRATUM_27456
828         {
829                 .desc = "Cavium erratum 27456",
830                 .capability = ARM64_WORKAROUND_CAVIUM_27456,
831                 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
832         },
833 #endif
834 #ifdef CONFIG_CAVIUM_ERRATUM_30115
835         {
836                 .desc = "Cavium erratum 30115",
837                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
838                 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
839         },
840 #endif
841         {
842                 .desc = "Mismatched cache type (CTR_EL0)",
843                 .capability = ARM64_MISMATCHED_CACHE_TYPE,
844                 .matches = has_mismatched_cache_type,
845                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
846                 .cpu_enable = cpu_enable_trap_ctr_access,
847         },
848 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
849         {
850                 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
851                 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
852                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
853                 .matches = cpucap_multi_entry_cap_matches,
854                 .match_list = qcom_erratum_1003_list,
855         },
856 #endif
857 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
858         {
859                 .desc = "Qualcomm erratum 1009, ARM erratum 1286807",
860                 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
861                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
862                 .matches = cpucap_multi_entry_cap_matches,
863                 .match_list = arm64_repeat_tlbi_list,
864         },
865 #endif
866 #ifdef CONFIG_ARM64_ERRATUM_858921
867         {
868         /* Cortex-A73 all versions */
869                 .desc = "ARM erratum 858921",
870                 .capability = ARM64_WORKAROUND_858921,
871                 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
872         },
873 #endif
874         {
875                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
876                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
877                 .matches = check_branch_predictor,
878         },
879 #ifdef CONFIG_HARDEN_EL2_VECTORS
880         {
881                 .desc = "EL2 vector hardening",
882                 .capability = ARM64_HARDEN_EL2_VECTORS,
883                 ERRATA_MIDR_RANGE_LIST(ca57_a72),
884         },
885 #endif
886         {
887                 .desc = "Speculative Store Bypass Disable",
888                 .capability = ARM64_SSBD,
889                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
890                 .matches = has_ssbd_mitigation,
891                 .midr_range_list = arm64_ssb_cpus,
892         },
893 #ifdef CONFIG_ARM64_ERRATUM_1418040
894         {
895                 .desc = "ARM erratum 1418040",
896                 .capability = ARM64_WORKAROUND_1418040,
897                 ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
898         },
899 #endif
900 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT_VHE
901         {
902                 .desc = "ARM errata 1165522, 1530923",
903                 .capability = ARM64_WORKAROUND_SPECULATIVE_AT_VHE,
904                 ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_vhe_list),
905         },
906 #endif
907 #ifdef CONFIG_ARM64_ERRATUM_1463225
908         {
909                 .desc = "ARM erratum 1463225",
910                 .capability = ARM64_WORKAROUND_1463225,
911                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
912                 .matches = has_cortex_a76_erratum_1463225,
913         },
914 #endif
915 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
916         {
917                 .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
918                 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
919                 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
920                 .matches = needs_tx2_tvm_workaround,
921         },
922         {
923                 .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
924                 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
925                 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
926         },
927 #endif
928 #ifdef CONFIG_ARM64_ERRATUM_1542419
929         {
930                 /* we depend on the firmware portion for correctness */
931                 .desc = "ARM erratum 1542419 (kernel portion)",
932                 .capability = ARM64_WORKAROUND_1542419,
933                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
934                 .matches = has_neoverse_n1_erratum_1542419,
935                 .cpu_enable = cpu_enable_trap_ctr_access,
936         },
937 #endif
938 #ifdef CONFIG_ARM64_ERRATUM_1319367
939         {
940                 .desc = "ARM erratum 1319367",
941                 .capability = ARM64_WORKAROUND_SPECULATIVE_AT_NVHE,
942                 ERRATA_MIDR_RANGE_LIST(ca57_a72),
943         },
944 #endif
945         {
946         }
947 };
948
949 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
950                             char *buf)
951 {
952         return sprintf(buf, "Mitigation: __user pointer sanitization\n");
953 }
954
955 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
956                 char *buf)
957 {
958         switch (get_spectre_v2_workaround_state()) {
959         case ARM64_BP_HARDEN_NOT_REQUIRED:
960                 return sprintf(buf, "Not affected\n");
961         case ARM64_BP_HARDEN_WA_NEEDED:
962                 return sprintf(buf, "Mitigation: Branch predictor hardening\n");
963         case ARM64_BP_HARDEN_UNKNOWN:
964         default:
965                 return sprintf(buf, "Vulnerable\n");
966         }
967 }
968
969 ssize_t cpu_show_spec_store_bypass(struct device *dev,
970                 struct device_attribute *attr, char *buf)
971 {
972         if (__ssb_safe)
973                 return sprintf(buf, "Not affected\n");
974
975         switch (ssbd_state) {
976         case ARM64_SSBD_KERNEL:
977         case ARM64_SSBD_FORCE_ENABLE:
978                 if (IS_ENABLED(CONFIG_ARM64_SSBD))
979                         return sprintf(buf,
980                             "Mitigation: Speculative Store Bypass disabled via prctl\n");
981         }
982
983         return sprintf(buf, "Vulnerable\n");
984 }