2797bc2c8c6a88ec4687eefbdfaf558d8fa761e9
[linux-2.6-microblaze.git] / arch / arm64 / kernel / cpu_errata.c
1 /*
2  * Contains CPU specific errata definitions
3  *
4  * Copyright (C) 2014 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/types.h>
20 #include <asm/cpu.h>
21 #include <asm/cputype.h>
22 #include <asm/cpufeature.h>
23
24 static bool __maybe_unused
25 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
26 {
27         const struct arm64_midr_revidr *fix;
28         u32 midr = read_cpuid_id(), revidr;
29
30         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
31         if (!is_midr_in_range(midr, &entry->midr_range))
32                 return false;
33
34         midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
35         revidr = read_cpuid(REVIDR_EL1);
36         for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
37                 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
38                         return false;
39
40         return true;
41 }
42
43 static bool __maybe_unused
44 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
45                             int scope)
46 {
47         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
48         return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
49 }
50
51 static bool __maybe_unused
52 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
53 {
54         u32 model;
55
56         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
57
58         model = read_cpuid_id();
59         model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
60                  MIDR_ARCHITECTURE_MASK;
61
62         return model == entry->midr_range.model;
63 }
64
65 static bool
66 has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
67                                 int scope)
68 {
69         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
70         return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
71                 (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
72 }
73
74 static void
75 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
76 {
77         /* Clear SCTLR_EL1.UCT */
78         config_sctlr_el1(SCTLR_EL1_UCT, 0);
79 }
80
81 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
82
83 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
84 #include <asm/mmu_context.h>
85 #include <asm/cacheflush.h>
86
87 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
88
89 #ifdef CONFIG_KVM_INDIRECT_VECTORS
90 extern char __smccc_workaround_1_smc_start[];
91 extern char __smccc_workaround_1_smc_end[];
92
93 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
94                                 const char *hyp_vecs_end)
95 {
96         void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
97         int i;
98
99         for (i = 0; i < SZ_2K; i += 0x80)
100                 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
101
102         flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
103 }
104
105 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
106                                       const char *hyp_vecs_start,
107                                       const char *hyp_vecs_end)
108 {
109         static DEFINE_SPINLOCK(bp_lock);
110         int cpu, slot = -1;
111
112         spin_lock(&bp_lock);
113         for_each_possible_cpu(cpu) {
114                 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
115                         slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
116                         break;
117                 }
118         }
119
120         if (slot == -1) {
121                 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
122                 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
123                 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
124         }
125
126         __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
127         __this_cpu_write(bp_hardening_data.fn, fn);
128         spin_unlock(&bp_lock);
129 }
130 #else
131 #define __smccc_workaround_1_smc_start          NULL
132 #define __smccc_workaround_1_smc_end            NULL
133
134 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
135                                       const char *hyp_vecs_start,
136                                       const char *hyp_vecs_end)
137 {
138         __this_cpu_write(bp_hardening_data.fn, fn);
139 }
140 #endif  /* CONFIG_KVM_INDIRECT_VECTORS */
141
142 static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
143                                      bp_hardening_cb_t fn,
144                                      const char *hyp_vecs_start,
145                                      const char *hyp_vecs_end)
146 {
147         u64 pfr0;
148
149         if (!entry->matches(entry, SCOPE_LOCAL_CPU))
150                 return;
151
152         pfr0 = read_cpuid(ID_AA64PFR0_EL1);
153         if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
154                 return;
155
156         __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
157 }
158
159 #include <uapi/linux/psci.h>
160 #include <linux/arm-smccc.h>
161 #include <linux/psci.h>
162
163 static void call_smc_arch_workaround_1(void)
164 {
165         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
166 }
167
168 static void call_hvc_arch_workaround_1(void)
169 {
170         arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
171 }
172
173 static void qcom_link_stack_sanitization(void)
174 {
175         u64 tmp;
176
177         asm volatile("mov       %0, x30         \n"
178                      ".rept     16              \n"
179                      "bl        . + 4           \n"
180                      ".endr                     \n"
181                      "mov       x30, %0         \n"
182                      : "=&r" (tmp));
183 }
184
185 static void
186 enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
187 {
188         bp_hardening_cb_t cb;
189         void *smccc_start, *smccc_end;
190         struct arm_smccc_res res;
191         u32 midr = read_cpuid_id();
192
193         if (!entry->matches(entry, SCOPE_LOCAL_CPU))
194                 return;
195
196         if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
197                 return;
198
199         switch (psci_ops.conduit) {
200         case PSCI_CONDUIT_HVC:
201                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
202                                   ARM_SMCCC_ARCH_WORKAROUND_1, &res);
203                 if ((int)res.a0 < 0)
204                         return;
205                 cb = call_hvc_arch_workaround_1;
206                 /* This is a guest, no need to patch KVM vectors */
207                 smccc_start = NULL;
208                 smccc_end = NULL;
209                 break;
210
211         case PSCI_CONDUIT_SMC:
212                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
213                                   ARM_SMCCC_ARCH_WORKAROUND_1, &res);
214                 if ((int)res.a0 < 0)
215                         return;
216                 cb = call_smc_arch_workaround_1;
217                 smccc_start = __smccc_workaround_1_smc_start;
218                 smccc_end = __smccc_workaround_1_smc_end;
219                 break;
220
221         default:
222                 return;
223         }
224
225         if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
226             ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
227                 cb = qcom_link_stack_sanitization;
228
229         install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
230
231         return;
232 }
233 #endif  /* CONFIG_HARDEN_BRANCH_PREDICTOR */
234
235 #ifdef CONFIG_ARM64_SSBD
236 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
237
238 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
239
240 static const struct ssbd_options {
241         const char      *str;
242         int             state;
243 } ssbd_options[] = {
244         { "force-on",   ARM64_SSBD_FORCE_ENABLE, },
245         { "force-off",  ARM64_SSBD_FORCE_DISABLE, },
246         { "kernel",     ARM64_SSBD_KERNEL, },
247 };
248
249 static int __init ssbd_cfg(char *buf)
250 {
251         int i;
252
253         if (!buf || !buf[0])
254                 return -EINVAL;
255
256         for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
257                 int len = strlen(ssbd_options[i].str);
258
259                 if (strncmp(buf, ssbd_options[i].str, len))
260                         continue;
261
262                 ssbd_state = ssbd_options[i].state;
263                 return 0;
264         }
265
266         return -EINVAL;
267 }
268 early_param("ssbd", ssbd_cfg);
269
270 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
271                                        __le32 *origptr, __le32 *updptr,
272                                        int nr_inst)
273 {
274         u32 insn;
275
276         BUG_ON(nr_inst != 1);
277
278         switch (psci_ops.conduit) {
279         case PSCI_CONDUIT_HVC:
280                 insn = aarch64_insn_get_hvc_value();
281                 break;
282         case PSCI_CONDUIT_SMC:
283                 insn = aarch64_insn_get_smc_value();
284                 break;
285         default:
286                 return;
287         }
288
289         *updptr = cpu_to_le32(insn);
290 }
291
292 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
293                                       __le32 *origptr, __le32 *updptr,
294                                       int nr_inst)
295 {
296         BUG_ON(nr_inst != 1);
297         /*
298          * Only allow mitigation on EL1 entry/exit and guest
299          * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
300          * be flipped.
301          */
302         if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
303                 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
304 }
305
306 static void arm64_set_ssbd_mitigation(bool state)
307 {
308         switch (psci_ops.conduit) {
309         case PSCI_CONDUIT_HVC:
310                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
311                 break;
312
313         case PSCI_CONDUIT_SMC:
314                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
315                 break;
316
317         default:
318                 WARN_ON_ONCE(1);
319                 break;
320         }
321 }
322
323 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
324                                     int scope)
325 {
326         struct arm_smccc_res res;
327         bool required = true;
328         s32 val;
329
330         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
331
332         if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
333                 ssbd_state = ARM64_SSBD_UNKNOWN;
334                 return false;
335         }
336
337         switch (psci_ops.conduit) {
338         case PSCI_CONDUIT_HVC:
339                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
340                                   ARM_SMCCC_ARCH_WORKAROUND_2, &res);
341                 break;
342
343         case PSCI_CONDUIT_SMC:
344                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
345                                   ARM_SMCCC_ARCH_WORKAROUND_2, &res);
346                 break;
347
348         default:
349                 ssbd_state = ARM64_SSBD_UNKNOWN;
350                 return false;
351         }
352
353         val = (s32)res.a0;
354
355         switch (val) {
356         case SMCCC_RET_NOT_SUPPORTED:
357                 ssbd_state = ARM64_SSBD_UNKNOWN;
358                 return false;
359
360         case SMCCC_RET_NOT_REQUIRED:
361                 pr_info_once("%s mitigation not required\n", entry->desc);
362                 ssbd_state = ARM64_SSBD_MITIGATED;
363                 return false;
364
365         case SMCCC_RET_SUCCESS:
366                 required = true;
367                 break;
368
369         case 1: /* Mitigation not required on this CPU */
370                 required = false;
371                 break;
372
373         default:
374                 WARN_ON(1);
375                 return false;
376         }
377
378         switch (ssbd_state) {
379         case ARM64_SSBD_FORCE_DISABLE:
380                 pr_info_once("%s disabled from command-line\n", entry->desc);
381                 arm64_set_ssbd_mitigation(false);
382                 required = false;
383                 break;
384
385         case ARM64_SSBD_KERNEL:
386                 if (required) {
387                         __this_cpu_write(arm64_ssbd_callback_required, 1);
388                         arm64_set_ssbd_mitigation(true);
389                 }
390                 break;
391
392         case ARM64_SSBD_FORCE_ENABLE:
393                 pr_info_once("%s forced from command-line\n", entry->desc);
394                 arm64_set_ssbd_mitigation(true);
395                 required = true;
396                 break;
397
398         default:
399                 WARN_ON(1);
400                 break;
401         }
402
403         return required;
404 }
405 #endif  /* CONFIG_ARM64_SSBD */
406
407 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)       \
408         .matches = is_affected_midr_range,                      \
409         .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
410
411 #define CAP_MIDR_ALL_VERSIONS(model)                                    \
412         .matches = is_affected_midr_range,                              \
413         .midr_range = MIDR_ALL_VERSIONS(model)
414
415 #define MIDR_FIXED(rev, revidr_mask) \
416         .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
417
418 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)            \
419         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                         \
420         CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
421
422 #define CAP_MIDR_RANGE_LIST(list)                               \
423         .matches = is_affected_midr_range_list,                 \
424         .midr_range_list = list
425
426 /* Errata affecting a range of revisions of  given model variant */
427 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)      \
428         ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
429
430 /* Errata affecting a single variant/revision of a model */
431 #define ERRATA_MIDR_REV(model, var, rev)        \
432         ERRATA_MIDR_RANGE(model, var, rev, var, rev)
433
434 /* Errata affecting all variants/revisions of a given a model */
435 #define ERRATA_MIDR_ALL_VERSIONS(model)                         \
436         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
437         CAP_MIDR_ALL_VERSIONS(model)
438
439 /* Errata affecting a list of midr ranges, with same work around */
440 #define ERRATA_MIDR_RANGE_LIST(midr_list)                       \
441         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
442         CAP_MIDR_RANGE_LIST(midr_list)
443
444 /*
445  * Generic helper for handling capabilties with multiple (match,enable) pairs
446  * of call backs, sharing the same capability bit.
447  * Iterate over each entry to see if at least one matches.
448  */
449 static bool __maybe_unused
450 multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope)
451 {
452         const struct arm64_cpu_capabilities *caps;
453
454         for (caps = entry->match_list; caps->matches; caps++)
455                 if (caps->matches(caps, scope))
456                         return true;
457
458         return false;
459 }
460
461 /*
462  * Take appropriate action for all matching entries in the shared capability
463  * entry.
464  */
465 static void __maybe_unused
466 multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
467 {
468         const struct arm64_cpu_capabilities *caps;
469
470         for (caps = entry->match_list; caps->matches; caps++)
471                 if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
472                     caps->cpu_enable)
473                         caps->cpu_enable(caps);
474 }
475
476 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
477
478 /*
479  * List of CPUs where we need to issue a psci call to
480  * harden the branch predictor.
481  */
482 static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
483         MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
484         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
485         MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
486         MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
487         MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
488         MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
489         MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
490         MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
491         {},
492 };
493
494 #endif
495
496 #ifdef CONFIG_HARDEN_EL2_VECTORS
497
498 static const struct midr_range arm64_harden_el2_vectors[] = {
499         MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
500         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
501         {},
502 };
503
504 #endif
505
506 const struct arm64_cpu_capabilities arm64_errata[] = {
507 #if     defined(CONFIG_ARM64_ERRATUM_826319) || \
508         defined(CONFIG_ARM64_ERRATUM_827319) || \
509         defined(CONFIG_ARM64_ERRATUM_824069)
510         {
511         /* Cortex-A53 r0p[012] */
512                 .desc = "ARM errata 826319, 827319, 824069",
513                 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
514                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
515                 .cpu_enable = cpu_enable_cache_maint_trap,
516         },
517 #endif
518 #ifdef CONFIG_ARM64_ERRATUM_819472
519         {
520         /* Cortex-A53 r0p[01] */
521                 .desc = "ARM errata 819472",
522                 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
523                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
524                 .cpu_enable = cpu_enable_cache_maint_trap,
525         },
526 #endif
527 #ifdef CONFIG_ARM64_ERRATUM_832075
528         {
529         /* Cortex-A57 r0p0 - r1p2 */
530                 .desc = "ARM erratum 832075",
531                 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
532                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
533                                   0, 0,
534                                   1, 2),
535         },
536 #endif
537 #ifdef CONFIG_ARM64_ERRATUM_834220
538         {
539         /* Cortex-A57 r0p0 - r1p2 */
540                 .desc = "ARM erratum 834220",
541                 .capability = ARM64_WORKAROUND_834220,
542                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
543                                   0, 0,
544                                   1, 2),
545         },
546 #endif
547 #ifdef CONFIG_ARM64_ERRATUM_843419
548         {
549         /* Cortex-A53 r0p[01234] */
550                 .desc = "ARM erratum 843419",
551                 .capability = ARM64_WORKAROUND_843419,
552                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
553                 MIDR_FIXED(0x4, BIT(8)),
554         },
555 #endif
556 #ifdef CONFIG_ARM64_ERRATUM_845719
557         {
558         /* Cortex-A53 r0p[01234] */
559                 .desc = "ARM erratum 845719",
560                 .capability = ARM64_WORKAROUND_845719,
561                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
562         },
563 #endif
564 #ifdef CONFIG_CAVIUM_ERRATUM_23154
565         {
566         /* Cavium ThunderX, pass 1.x */
567                 .desc = "Cavium erratum 23154",
568                 .capability = ARM64_WORKAROUND_CAVIUM_23154,
569                 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
570         },
571 #endif
572 #ifdef CONFIG_CAVIUM_ERRATUM_27456
573         {
574         /* Cavium ThunderX, T88 pass 1.x - 2.1 */
575                 .desc = "Cavium erratum 27456",
576                 .capability = ARM64_WORKAROUND_CAVIUM_27456,
577                 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
578                                   0, 0,
579                                   1, 1),
580         },
581         {
582         /* Cavium ThunderX, T81 pass 1.0 */
583                 .desc = "Cavium erratum 27456",
584                 .capability = ARM64_WORKAROUND_CAVIUM_27456,
585                 ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
586         },
587 #endif
588 #ifdef CONFIG_CAVIUM_ERRATUM_30115
589         {
590         /* Cavium ThunderX, T88 pass 1.x - 2.2 */
591                 .desc = "Cavium erratum 30115",
592                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
593                 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
594                                       0, 0,
595                                       1, 2),
596         },
597         {
598         /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
599                 .desc = "Cavium erratum 30115",
600                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
601                 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
602         },
603         {
604         /* Cavium ThunderX, T83 pass 1.0 */
605                 .desc = "Cavium erratum 30115",
606                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
607                 ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
608         },
609 #endif
610         {
611                 .desc = "Mismatched cache line size",
612                 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
613                 .matches = has_mismatched_cache_line_size,
614                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
615                 .cpu_enable = cpu_enable_trap_ctr_access,
616         },
617 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
618         {
619                 .desc = "Qualcomm Technologies Falkor erratum 1003",
620                 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
621                 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
622         },
623         {
624                 .desc = "Qualcomm Technologies Kryo erratum 1003",
625                 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
626                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
627                 .midr_range.model = MIDR_QCOM_KRYO,
628                 .matches = is_kryo_midr,
629         },
630 #endif
631 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
632         {
633                 .desc = "Qualcomm Technologies Falkor erratum 1009",
634                 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
635                 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
636         },
637 #endif
638 #ifdef CONFIG_ARM64_ERRATUM_858921
639         {
640         /* Cortex-A73 all versions */
641                 .desc = "ARM erratum 858921",
642                 .capability = ARM64_WORKAROUND_858921,
643                 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
644         },
645 #endif
646 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
647         {
648                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
649                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
650                 .cpu_enable = enable_smccc_arch_workaround_1,
651                 ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
652         },
653 #endif
654 #ifdef CONFIG_HARDEN_EL2_VECTORS
655         {
656                 .desc = "EL2 vector hardening",
657                 .capability = ARM64_HARDEN_EL2_VECTORS,
658                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
659                 ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
660         },
661 #endif
662 #ifdef CONFIG_ARM64_SSBD
663         {
664                 .desc = "Speculative Store Bypass Disable",
665                 .capability = ARM64_SSBD,
666                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
667                 .matches = has_ssbd_mitigation,
668         },
669 #endif
670         {
671         }
672 };