Merge tag 'scmi-fix-5.5-2' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep...
[linux-2.6-microblaze.git] / arch / arm64 / kernel / cpufeature.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Contains CPU feature definitions
4  *
5  * Copyright (C) 2015 ARM Ltd.
6  */
7
8 #define pr_fmt(fmt) "CPU features: " fmt
9
10 #include <linux/bsearch.h>
11 #include <linux/cpumask.h>
12 #include <linux/crash_dump.h>
13 #include <linux/sort.h>
14 #include <linux/stop_machine.h>
15 #include <linux/types.h>
16 #include <linux/mm.h>
17 #include <linux/cpu.h>
18 #include <asm/cpu.h>
19 #include <asm/cpufeature.h>
20 #include <asm/cpu_ops.h>
21 #include <asm/fpsimd.h>
22 #include <asm/mmu_context.h>
23 #include <asm/processor.h>
24 #include <asm/sysreg.h>
25 #include <asm/traps.h>
26 #include <asm/virt.h>
27
28 /* Kernel representation of AT_HWCAP and AT_HWCAP2 */
29 static unsigned long elf_hwcap __read_mostly;
30
31 #ifdef CONFIG_COMPAT
32 #define COMPAT_ELF_HWCAP_DEFAULT        \
33                                 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
34                                  COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
35                                  COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
36                                  COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
37                                  COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
38                                  COMPAT_HWCAP_LPAE)
39 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
40 unsigned int compat_elf_hwcap2 __read_mostly;
41 #endif
42
43 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
44 EXPORT_SYMBOL(cpu_hwcaps);
45 static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS];
46
47 /* Need also bit for ARM64_CB_PATCH */
48 DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
49
50 /*
51  * Flag to indicate if we have computed the system wide
52  * capabilities based on the boot time active CPUs. This
53  * will be used to determine if a new booting CPU should
54  * go through the verification process to make sure that it
55  * supports the system capabilities, without using a hotplug
56  * notifier.
57  */
58 static bool sys_caps_initialised;
59
60 static inline void set_sys_caps_initialised(void)
61 {
62         sys_caps_initialised = true;
63 }
64
65 static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
66 {
67         /* file-wide pr_fmt adds "CPU features: " prefix */
68         pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
69         return 0;
70 }
71
72 static struct notifier_block cpu_hwcaps_notifier = {
73         .notifier_call = dump_cpu_hwcaps
74 };
75
76 static int __init register_cpu_hwcaps_dumper(void)
77 {
78         atomic_notifier_chain_register(&panic_notifier_list,
79                                        &cpu_hwcaps_notifier);
80         return 0;
81 }
82 __initcall(register_cpu_hwcaps_dumper);
83
84 DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
85 EXPORT_SYMBOL(cpu_hwcap_keys);
86
87 #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
88         {                                               \
89                 .sign = SIGNED,                         \
90                 .visible = VISIBLE,                     \
91                 .strict = STRICT,                       \
92                 .type = TYPE,                           \
93                 .shift = SHIFT,                         \
94                 .width = WIDTH,                         \
95                 .safe_val = SAFE_VAL,                   \
96         }
97
98 /* Define a feature with unsigned values */
99 #define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
100         __ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
101
102 /* Define a feature with a signed value */
103 #define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
104         __ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
105
106 #define ARM64_FTR_END                                   \
107         {                                               \
108                 .width = 0,                             \
109         }
110
111 /* meta feature for alternatives */
112 static bool __maybe_unused
113 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
114
115 static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
116
117 /*
118  * NOTE: Any changes to the visibility of features should be kept in
119  * sync with the documentation of the CPU feature register ABI.
120  */
121 static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
122         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
123         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
124         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
125         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
126         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
127         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
128         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
129         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
130         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
131         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
132         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
133         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
134         ARM64_FTR_END,
135 };
136
137 static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
138         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
139         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0),
140         ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
141                        FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0),
142         ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
143                        FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPA_SHIFT, 4, 0),
144         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
145         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
146         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
147         ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
148                        FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0),
149         ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
150                        FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0),
151         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
152         ARM64_FTR_END,
153 };
154
155 static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
156         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
157         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
158         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
159         ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
160                                    FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
161         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
162         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
163         S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
164         S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
165         /* Linux doesn't care about the EL3 */
166         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
167         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
168         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
169         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
170         ARM64_FTR_END,
171 };
172
173 static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
174         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
175         ARM64_FTR_END,
176 };
177
178 static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
179         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
180         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
181         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
182         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
183         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
184         ARM64_FTR_END,
185 };
186
187 static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
188         /*
189          * We already refuse to boot CPUs that don't support our configured
190          * page size, so we can only detect mismatches for a page size other
191          * than the one we're currently using. Unfortunately, SoCs like this
192          * exist in the wild so, even though we don't like it, we'll have to go
193          * along with it and treat them as non-strict.
194          */
195         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
196         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
197         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
198
199         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
200         /* Linux shouldn't care about secure memory */
201         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
202         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
203         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
204         /*
205          * Differing PARange is fine as long as all peripherals and memory are mapped
206          * within the minimum PARange of all CPUs
207          */
208         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
209         ARM64_FTR_END,
210 };
211
212 static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
213         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
214         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
215         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
216         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
217         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
218         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
219         ARM64_FTR_END,
220 };
221
222 static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
223         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
224         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
225         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
226         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
227         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
228         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
229         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
230         ARM64_FTR_END,
231 };
232
233 static const struct arm64_ftr_bits ftr_ctr[] = {
234         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
235         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
236         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
237         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
238         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
239         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
240         /*
241          * Linux can handle differing I-cache policies. Userspace JITs will
242          * make use of *minLine.
243          * If we have differing I-cache policies, report it as the weakest - VIPT.
244          */
245         ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT),       /* L1Ip */
246         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
247         ARM64_FTR_END,
248 };
249
250 struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
251         .name           = "SYS_CTR_EL0",
252         .ftr_bits       = ftr_ctr
253 };
254
255 static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
256         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf),   /* InnerShr */
257         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),       /* FCSE */
258         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0),    /* AuxReg */
259         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),       /* TCM */
260         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),       /* ShareLvl */
261         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf),    /* OuterShr */
262         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),        /* PMSA */
263         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),        /* VMSA */
264         ARM64_FTR_END,
265 };
266
267 static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
268         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 36, 28, 0),
269         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
270         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
271         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
272         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
273         /*
274          * We can instantiate multiple PMU instances with different levels
275          * of support.
276          */
277         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
278         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
279         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
280         ARM64_FTR_END,
281 };
282
283 static const struct arm64_ftr_bits ftr_mvfr2[] = {
284         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),                /* FPMisc */
285         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),                /* SIMDMisc */
286         ARM64_FTR_END,
287 };
288
289 static const struct arm64_ftr_bits ftr_dczid[] = {
290         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 4, 1, 1),            /* DZP */
291         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),       /* BS */
292         ARM64_FTR_END,
293 };
294
295
296 static const struct arm64_ftr_bits ftr_id_isar5[] = {
297         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
298         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
299         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
300         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
301         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
302         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
303         ARM64_FTR_END,
304 };
305
306 static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
307         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),        /* ac2 */
308         ARM64_FTR_END,
309 };
310
311 static const struct arm64_ftr_bits ftr_id_pfr0[] = {
312         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),               /* State3 */
313         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),                /* State2 */
314         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),                /* State1 */
315         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),                /* State0 */
316         ARM64_FTR_END,
317 };
318
319 static const struct arm64_ftr_bits ftr_id_dfr0[] = {
320         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
321         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf),   /* PerfMon */
322         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
323         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
324         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
325         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
326         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
327         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
328         ARM64_FTR_END,
329 };
330
331 static const struct arm64_ftr_bits ftr_zcr[] = {
332         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
333                 ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0),        /* LEN */
334         ARM64_FTR_END,
335 };
336
337 /*
338  * Common ftr bits for a 32bit register with all hidden, strict
339  * attributes, with 4bit feature fields and a default safe value of
340  * 0. Covers the following 32bit registers:
341  * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
342  */
343 static const struct arm64_ftr_bits ftr_generic_32bits[] = {
344         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
345         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
346         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
347         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
348         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
349         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
350         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
351         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
352         ARM64_FTR_END,
353 };
354
355 /* Table for a single 32bit feature value */
356 static const struct arm64_ftr_bits ftr_single32[] = {
357         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0),
358         ARM64_FTR_END,
359 };
360
361 static const struct arm64_ftr_bits ftr_raz[] = {
362         ARM64_FTR_END,
363 };
364
365 #define ARM64_FTR_REG(id, table) {              \
366         .sys_id = id,                           \
367         .reg =  &(struct arm64_ftr_reg){        \
368                 .name = #id,                    \
369                 .ftr_bits = &((table)[0]),      \
370         }}
371
372 static const struct __ftr_reg_entry {
373         u32                     sys_id;
374         struct arm64_ftr_reg    *reg;
375 } arm64_ftr_regs[] = {
376
377         /* Op1 = 0, CRn = 0, CRm = 1 */
378         ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
379         ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
380         ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
381         ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
382         ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
383         ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
384         ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
385
386         /* Op1 = 0, CRn = 0, CRm = 2 */
387         ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
388         ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
389         ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
390         ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
391         ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
392         ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
393         ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
394
395         /* Op1 = 0, CRn = 0, CRm = 3 */
396         ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
397         ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
398         ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
399
400         /* Op1 = 0, CRn = 0, CRm = 4 */
401         ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
402         ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
403         ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0),
404
405         /* Op1 = 0, CRn = 0, CRm = 5 */
406         ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
407         ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz),
408
409         /* Op1 = 0, CRn = 0, CRm = 6 */
410         ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
411         ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
412
413         /* Op1 = 0, CRn = 0, CRm = 7 */
414         ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
415         ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
416         ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
417
418         /* Op1 = 0, CRn = 1, CRm = 2 */
419         ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
420
421         /* Op1 = 3, CRn = 0, CRm = 0 */
422         { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
423         ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
424
425         /* Op1 = 3, CRn = 14, CRm = 0 */
426         ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32),
427 };
428
429 static int search_cmp_ftr_reg(const void *id, const void *regp)
430 {
431         return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
432 }
433
434 /*
435  * get_arm64_ftr_reg - Lookup a feature register entry using its
436  * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
437  * ascending order of sys_id , we use binary search to find a matching
438  * entry.
439  *
440  * returns - Upon success,  matching ftr_reg entry for id.
441  *         - NULL on failure. It is upto the caller to decide
442  *           the impact of a failure.
443  */
444 static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
445 {
446         const struct __ftr_reg_entry *ret;
447
448         ret = bsearch((const void *)(unsigned long)sys_id,
449                         arm64_ftr_regs,
450                         ARRAY_SIZE(arm64_ftr_regs),
451                         sizeof(arm64_ftr_regs[0]),
452                         search_cmp_ftr_reg);
453         if (ret)
454                 return ret->reg;
455         return NULL;
456 }
457
458 static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
459                                s64 ftr_val)
460 {
461         u64 mask = arm64_ftr_mask(ftrp);
462
463         reg &= ~mask;
464         reg |= (ftr_val << ftrp->shift) & mask;
465         return reg;
466 }
467
468 static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
469                                 s64 cur)
470 {
471         s64 ret = 0;
472
473         switch (ftrp->type) {
474         case FTR_EXACT:
475                 ret = ftrp->safe_val;
476                 break;
477         case FTR_LOWER_SAFE:
478                 ret = new < cur ? new : cur;
479                 break;
480         case FTR_HIGHER_OR_ZERO_SAFE:
481                 if (!cur || !new)
482                         break;
483                 /* Fallthrough */
484         case FTR_HIGHER_SAFE:
485                 ret = new > cur ? new : cur;
486                 break;
487         default:
488                 BUG();
489         }
490
491         return ret;
492 }
493
494 static void __init sort_ftr_regs(void)
495 {
496         int i;
497
498         /* Check that the array is sorted so that we can do the binary search */
499         for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++)
500                 BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
501 }
502
503 /*
504  * Initialise the CPU feature register from Boot CPU values.
505  * Also initiliases the strict_mask for the register.
506  * Any bits that are not covered by an arm64_ftr_bits entry are considered
507  * RES0 for the system-wide value, and must strictly match.
508  */
509 static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
510 {
511         u64 val = 0;
512         u64 strict_mask = ~0x0ULL;
513         u64 user_mask = 0;
514         u64 valid_mask = 0;
515
516         const struct arm64_ftr_bits *ftrp;
517         struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
518
519         BUG_ON(!reg);
520
521         for (ftrp  = reg->ftr_bits; ftrp->width; ftrp++) {
522                 u64 ftr_mask = arm64_ftr_mask(ftrp);
523                 s64 ftr_new = arm64_ftr_value(ftrp, new);
524
525                 val = arm64_ftr_set_value(ftrp, val, ftr_new);
526
527                 valid_mask |= ftr_mask;
528                 if (!ftrp->strict)
529                         strict_mask &= ~ftr_mask;
530                 if (ftrp->visible)
531                         user_mask |= ftr_mask;
532                 else
533                         reg->user_val = arm64_ftr_set_value(ftrp,
534                                                             reg->user_val,
535                                                             ftrp->safe_val);
536         }
537
538         val &= valid_mask;
539
540         reg->sys_val = val;
541         reg->strict_mask = strict_mask;
542         reg->user_mask = user_mask;
543 }
544
545 extern const struct arm64_cpu_capabilities arm64_errata[];
546 static const struct arm64_cpu_capabilities arm64_features[];
547
548 static void __init
549 init_cpu_hwcaps_indirect_list_from_array(const struct arm64_cpu_capabilities *caps)
550 {
551         for (; caps->matches; caps++) {
552                 if (WARN(caps->capability >= ARM64_NCAPS,
553                         "Invalid capability %d\n", caps->capability))
554                         continue;
555                 if (WARN(cpu_hwcaps_ptrs[caps->capability],
556                         "Duplicate entry for capability %d\n",
557                         caps->capability))
558                         continue;
559                 cpu_hwcaps_ptrs[caps->capability] = caps;
560         }
561 }
562
563 static void __init init_cpu_hwcaps_indirect_list(void)
564 {
565         init_cpu_hwcaps_indirect_list_from_array(arm64_features);
566         init_cpu_hwcaps_indirect_list_from_array(arm64_errata);
567 }
568
569 static void __init setup_boot_cpu_capabilities(void);
570
571 void __init init_cpu_features(struct cpuinfo_arm64 *info)
572 {
573         /* Before we start using the tables, make sure it is sorted */
574         sort_ftr_regs();
575
576         init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
577         init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
578         init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
579         init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
580         init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
581         init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
582         init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
583         init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
584         init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
585         init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
586         init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
587         init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
588         init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
589
590         if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
591                 init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
592                 init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
593                 init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
594                 init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
595                 init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
596                 init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
597                 init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
598                 init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
599                 init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
600                 init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
601                 init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
602                 init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
603                 init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
604                 init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
605                 init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
606                 init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
607         }
608
609         if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
610                 init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
611                 sve_init_vq_map();
612         }
613
614         /*
615          * Initialize the indirect array of CPU hwcaps capabilities pointers
616          * before we handle the boot CPU below.
617          */
618         init_cpu_hwcaps_indirect_list();
619
620         /*
621          * Detect and enable early CPU capabilities based on the boot CPU,
622          * after we have initialised the CPU feature infrastructure.
623          */
624         setup_boot_cpu_capabilities();
625 }
626
627 static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
628 {
629         const struct arm64_ftr_bits *ftrp;
630
631         for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
632                 s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
633                 s64 ftr_new = arm64_ftr_value(ftrp, new);
634
635                 if (ftr_cur == ftr_new)
636                         continue;
637                 /* Find a safe value */
638                 ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
639                 reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
640         }
641
642 }
643
644 static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
645 {
646         struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
647
648         BUG_ON(!regp);
649         update_cpu_ftr_reg(regp, val);
650         if ((boot & regp->strict_mask) == (val & regp->strict_mask))
651                 return 0;
652         pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
653                         regp->name, boot, cpu, val);
654         return 1;
655 }
656
657 /*
658  * Update system wide CPU feature registers with the values from a
659  * non-boot CPU. Also performs SANITY checks to make sure that there
660  * aren't any insane variations from that of the boot CPU.
661  */
662 void update_cpu_features(int cpu,
663                          struct cpuinfo_arm64 *info,
664                          struct cpuinfo_arm64 *boot)
665 {
666         int taint = 0;
667
668         /*
669          * The kernel can handle differing I-cache policies, but otherwise
670          * caches should look identical. Userspace JITs will make use of
671          * *minLine.
672          */
673         taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
674                                       info->reg_ctr, boot->reg_ctr);
675
676         /*
677          * Userspace may perform DC ZVA instructions. Mismatched block sizes
678          * could result in too much or too little memory being zeroed if a
679          * process is preempted and migrated between CPUs.
680          */
681         taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
682                                       info->reg_dczid, boot->reg_dczid);
683
684         /* If different, timekeeping will be broken (especially with KVM) */
685         taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
686                                       info->reg_cntfrq, boot->reg_cntfrq);
687
688         /*
689          * The kernel uses self-hosted debug features and expects CPUs to
690          * support identical debug features. We presently need CTX_CMPs, WRPs,
691          * and BRPs to be identical.
692          * ID_AA64DFR1 is currently RES0.
693          */
694         taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
695                                       info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
696         taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
697                                       info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
698         /*
699          * Even in big.LITTLE, processors should be identical instruction-set
700          * wise.
701          */
702         taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
703                                       info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
704         taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
705                                       info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
706
707         /*
708          * Differing PARange support is fine as long as all peripherals and
709          * memory are mapped within the minimum PARange of all CPUs.
710          * Linux should not care about secure memory.
711          */
712         taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
713                                       info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
714         taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
715                                       info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
716         taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
717                                       info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
718
719         /*
720          * EL3 is not our concern.
721          */
722         taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
723                                       info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
724         taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
725                                       info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
726
727         taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
728                                       info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
729
730         /*
731          * If we have AArch32, we care about 32-bit features for compat.
732          * If the system doesn't support AArch32, don't update them.
733          */
734         if (id_aa64pfr0_32bit_el0(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
735                 id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
736
737                 taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
738                                         info->reg_id_dfr0, boot->reg_id_dfr0);
739                 taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
740                                         info->reg_id_isar0, boot->reg_id_isar0);
741                 taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
742                                         info->reg_id_isar1, boot->reg_id_isar1);
743                 taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
744                                         info->reg_id_isar2, boot->reg_id_isar2);
745                 taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
746                                         info->reg_id_isar3, boot->reg_id_isar3);
747                 taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
748                                         info->reg_id_isar4, boot->reg_id_isar4);
749                 taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
750                                         info->reg_id_isar5, boot->reg_id_isar5);
751
752                 /*
753                  * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
754                  * ACTLR formats could differ across CPUs and therefore would have to
755                  * be trapped for virtualization anyway.
756                  */
757                 taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
758                                         info->reg_id_mmfr0, boot->reg_id_mmfr0);
759                 taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
760                                         info->reg_id_mmfr1, boot->reg_id_mmfr1);
761                 taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
762                                         info->reg_id_mmfr2, boot->reg_id_mmfr2);
763                 taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
764                                         info->reg_id_mmfr3, boot->reg_id_mmfr3);
765                 taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
766                                         info->reg_id_pfr0, boot->reg_id_pfr0);
767                 taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
768                                         info->reg_id_pfr1, boot->reg_id_pfr1);
769                 taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
770                                         info->reg_mvfr0, boot->reg_mvfr0);
771                 taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
772                                         info->reg_mvfr1, boot->reg_mvfr1);
773                 taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
774                                         info->reg_mvfr2, boot->reg_mvfr2);
775         }
776
777         if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
778                 taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
779                                         info->reg_zcr, boot->reg_zcr);
780
781                 /* Probe vector lengths, unless we already gave up on SVE */
782                 if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
783                     !sys_caps_initialised)
784                         sve_update_vq_map();
785         }
786
787         /*
788          * Mismatched CPU features are a recipe for disaster. Don't even
789          * pretend to support them.
790          */
791         if (taint) {
792                 pr_warn_once("Unsupported CPU feature variation detected.\n");
793                 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
794         }
795 }
796
797 u64 read_sanitised_ftr_reg(u32 id)
798 {
799         struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
800
801         /* We shouldn't get a request for an unsupported register */
802         BUG_ON(!regp);
803         return regp->sys_val;
804 }
805
806 #define read_sysreg_case(r)     \
807         case r:         return read_sysreg_s(r)
808
809 /*
810  * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
811  * Read the system register on the current CPU
812  */
813 static u64 __read_sysreg_by_encoding(u32 sys_id)
814 {
815         switch (sys_id) {
816         read_sysreg_case(SYS_ID_PFR0_EL1);
817         read_sysreg_case(SYS_ID_PFR1_EL1);
818         read_sysreg_case(SYS_ID_DFR0_EL1);
819         read_sysreg_case(SYS_ID_MMFR0_EL1);
820         read_sysreg_case(SYS_ID_MMFR1_EL1);
821         read_sysreg_case(SYS_ID_MMFR2_EL1);
822         read_sysreg_case(SYS_ID_MMFR3_EL1);
823         read_sysreg_case(SYS_ID_ISAR0_EL1);
824         read_sysreg_case(SYS_ID_ISAR1_EL1);
825         read_sysreg_case(SYS_ID_ISAR2_EL1);
826         read_sysreg_case(SYS_ID_ISAR3_EL1);
827         read_sysreg_case(SYS_ID_ISAR4_EL1);
828         read_sysreg_case(SYS_ID_ISAR5_EL1);
829         read_sysreg_case(SYS_MVFR0_EL1);
830         read_sysreg_case(SYS_MVFR1_EL1);
831         read_sysreg_case(SYS_MVFR2_EL1);
832
833         read_sysreg_case(SYS_ID_AA64PFR0_EL1);
834         read_sysreg_case(SYS_ID_AA64PFR1_EL1);
835         read_sysreg_case(SYS_ID_AA64ZFR0_EL1);
836         read_sysreg_case(SYS_ID_AA64DFR0_EL1);
837         read_sysreg_case(SYS_ID_AA64DFR1_EL1);
838         read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
839         read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
840         read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
841         read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
842         read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
843
844         read_sysreg_case(SYS_CNTFRQ_EL0);
845         read_sysreg_case(SYS_CTR_EL0);
846         read_sysreg_case(SYS_DCZID_EL0);
847
848         default:
849                 BUG();
850                 return 0;
851         }
852 }
853
854 #include <linux/irqchip/arm-gic-v3.h>
855
856 static bool
857 feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
858 {
859         int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
860
861         return val >= entry->min_field_value;
862 }
863
864 static bool
865 has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
866 {
867         u64 val;
868
869         WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
870         if (scope == SCOPE_SYSTEM)
871                 val = read_sanitised_ftr_reg(entry->sys_reg);
872         else
873                 val = __read_sysreg_by_encoding(entry->sys_reg);
874
875         return feature_matches(val, entry);
876 }
877
878 static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
879 {
880         bool has_sre;
881
882         if (!has_cpuid_feature(entry, scope))
883                 return false;
884
885         has_sre = gic_enable_sre();
886         if (!has_sre)
887                 pr_warn_once("%s present but disabled by higher exception level\n",
888                              entry->desc);
889
890         return has_sre;
891 }
892
893 static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
894 {
895         u32 midr = read_cpuid_id();
896
897         /* Cavium ThunderX pass 1.x and 2.x */
898         return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
899                 MIDR_CPU_VAR_REV(0, 0),
900                 MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
901 }
902
903 static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
904 {
905         u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
906
907         return cpuid_feature_extract_signed_field(pfr0,
908                                         ID_AA64PFR0_FP_SHIFT) < 0;
909 }
910
911 static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
912                           int scope)
913 {
914         u64 ctr;
915
916         if (scope == SCOPE_SYSTEM)
917                 ctr = arm64_ftr_reg_ctrel0.sys_val;
918         else
919                 ctr = read_cpuid_effective_cachetype();
920
921         return ctr & BIT(CTR_IDC_SHIFT);
922 }
923
924 static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unused)
925 {
926         /*
927          * If the CPU exposes raw CTR_EL0.IDC = 0, while effectively
928          * CTR_EL0.IDC = 1 (from CLIDR values), we need to trap accesses
929          * to the CTR_EL0 on this CPU and emulate it with the real/safe
930          * value.
931          */
932         if (!(read_cpuid_cachetype() & BIT(CTR_IDC_SHIFT)))
933                 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
934 }
935
936 static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
937                           int scope)
938 {
939         u64 ctr;
940
941         if (scope == SCOPE_SYSTEM)
942                 ctr = arm64_ftr_reg_ctrel0.sys_val;
943         else
944                 ctr = read_cpuid_cachetype();
945
946         return ctr & BIT(CTR_DIC_SHIFT);
947 }
948
949 static bool __maybe_unused
950 has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
951 {
952         /*
953          * Kdump isn't guaranteed to power-off all secondary CPUs, CNP
954          * may share TLB entries with a CPU stuck in the crashed
955          * kernel.
956          */
957          if (is_kdump_kernel())
958                 return false;
959
960         return has_cpuid_feature(entry, scope);
961 }
962
963 static bool __meltdown_safe = true;
964 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
965
966 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
967                                 int scope)
968 {
969         /* List of CPUs that are not vulnerable and don't need KPTI */
970         static const struct midr_range kpti_safe_list[] = {
971                 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
972                 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
973                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
974                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
975                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
976                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
977                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
978                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
979                 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
980                 { /* sentinel */ }
981         };
982         char const *str = "kpti command line option";
983         bool meltdown_safe;
984
985         meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
986
987         /* Defer to CPU feature registers */
988         if (has_cpuid_feature(entry, scope))
989                 meltdown_safe = true;
990
991         if (!meltdown_safe)
992                 __meltdown_safe = false;
993
994         /*
995          * For reasons that aren't entirely clear, enabling KPTI on Cavium
996          * ThunderX leads to apparent I-cache corruption of kernel text, which
997          * ends as well as you might imagine. Don't even try.
998          */
999         if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
1000                 str = "ARM64_WORKAROUND_CAVIUM_27456";
1001                 __kpti_forced = -1;
1002         }
1003
1004         /* Useful for KASLR robustness */
1005         if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
1006                 if (!__kpti_forced) {
1007                         str = "KASLR";
1008                         __kpti_forced = 1;
1009                 }
1010         }
1011
1012         if (cpu_mitigations_off() && !__kpti_forced) {
1013                 str = "mitigations=off";
1014                 __kpti_forced = -1;
1015         }
1016
1017         if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
1018                 pr_info_once("kernel page table isolation disabled by kernel configuration\n");
1019                 return false;
1020         }
1021
1022         /* Forced? */
1023         if (__kpti_forced) {
1024                 pr_info_once("kernel page table isolation forced %s by %s\n",
1025                              __kpti_forced > 0 ? "ON" : "OFF", str);
1026                 return __kpti_forced > 0;
1027         }
1028
1029         return !meltdown_safe;
1030 }
1031
1032 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1033 static void
1034 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
1035 {
1036         typedef void (kpti_remap_fn)(int, int, phys_addr_t);
1037         extern kpti_remap_fn idmap_kpti_install_ng_mappings;
1038         kpti_remap_fn *remap_fn;
1039
1040         static bool kpti_applied = false;
1041         int cpu = smp_processor_id();
1042
1043         /*
1044          * We don't need to rewrite the page-tables if either we've done
1045          * it already or we have KASLR enabled and therefore have not
1046          * created any global mappings at all.
1047          */
1048         if (kpti_applied || kaslr_offset() > 0)
1049                 return;
1050
1051         remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
1052
1053         cpu_install_idmap();
1054         remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
1055         cpu_uninstall_idmap();
1056
1057         if (!cpu)
1058                 kpti_applied = true;
1059
1060         return;
1061 }
1062 #else
1063 static void
1064 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
1065 {
1066 }
1067 #endif  /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1068
1069 static int __init parse_kpti(char *str)
1070 {
1071         bool enabled;
1072         int ret = strtobool(str, &enabled);
1073
1074         if (ret)
1075                 return ret;
1076
1077         __kpti_forced = enabled ? 1 : -1;
1078         return 0;
1079 }
1080 early_param("kpti", parse_kpti);
1081
1082 #ifdef CONFIG_ARM64_HW_AFDBM
1083 static inline void __cpu_enable_hw_dbm(void)
1084 {
1085         u64 tcr = read_sysreg(tcr_el1) | TCR_HD;
1086
1087         write_sysreg(tcr, tcr_el1);
1088         isb();
1089 }
1090
1091 static bool cpu_has_broken_dbm(void)
1092 {
1093         /* List of CPUs which have broken DBM support. */
1094         static const struct midr_range cpus[] = {
1095 #ifdef CONFIG_ARM64_ERRATUM_1024718
1096                 MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0),  // A55 r0p0 -r1p0
1097 #endif
1098                 {},
1099         };
1100
1101         return is_midr_in_range_list(read_cpuid_id(), cpus);
1102 }
1103
1104 static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap)
1105 {
1106         return has_cpuid_feature(cap, SCOPE_LOCAL_CPU) &&
1107                !cpu_has_broken_dbm();
1108 }
1109
1110 static void cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap)
1111 {
1112         if (cpu_can_use_dbm(cap))
1113                 __cpu_enable_hw_dbm();
1114 }
1115
1116 static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
1117                        int __unused)
1118 {
1119         static bool detected = false;
1120         /*
1121          * DBM is a non-conflicting feature. i.e, the kernel can safely
1122          * run a mix of CPUs with and without the feature. So, we
1123          * unconditionally enable the capability to allow any late CPU
1124          * to use the feature. We only enable the control bits on the
1125          * CPU, if it actually supports.
1126          *
1127          * We have to make sure we print the "feature" detection only
1128          * when at least one CPU actually uses it. So check if this CPU
1129          * can actually use it and print the message exactly once.
1130          *
1131          * This is safe as all CPUs (including secondary CPUs - due to the
1132          * LOCAL_CPU scope - and the hotplugged CPUs - via verification)
1133          * goes through the "matches" check exactly once. Also if a CPU
1134          * matches the criteria, it is guaranteed that the CPU will turn
1135          * the DBM on, as the capability is unconditionally enabled.
1136          */
1137         if (!detected && cpu_can_use_dbm(cap)) {
1138                 detected = true;
1139                 pr_info("detected: Hardware dirty bit management\n");
1140         }
1141
1142         return true;
1143 }
1144
1145 #endif
1146
1147 #ifdef CONFIG_ARM64_VHE
1148 static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
1149 {
1150         return is_kernel_in_hyp_mode();
1151 }
1152
1153 static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
1154 {
1155         /*
1156          * Copy register values that aren't redirected by hardware.
1157          *
1158          * Before code patching, we only set tpidr_el1, all CPUs need to copy
1159          * this value to tpidr_el2 before we patch the code. Once we've done
1160          * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
1161          * do anything here.
1162          */
1163         if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN))
1164                 write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
1165 }
1166 #endif
1167
1168 static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
1169 {
1170         u64 val = read_sysreg_s(SYS_CLIDR_EL1);
1171
1172         /* Check that CLIDR_EL1.LOU{U,IS} are both 0 */
1173         WARN_ON(val & (7 << 27 | 7 << 21));
1174 }
1175
1176 #ifdef CONFIG_ARM64_SSBD
1177 static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
1178 {
1179         if (user_mode(regs))
1180                 return 1;
1181
1182         if (instr & BIT(PSTATE_Imm_shift))
1183                 regs->pstate |= PSR_SSBS_BIT;
1184         else
1185                 regs->pstate &= ~PSR_SSBS_BIT;
1186
1187         arm64_skip_faulting_instruction(regs, 4);
1188         return 0;
1189 }
1190
1191 static struct undef_hook ssbs_emulation_hook = {
1192         .instr_mask     = ~(1U << PSTATE_Imm_shift),
1193         .instr_val      = 0xd500401f | PSTATE_SSBS,
1194         .fn             = ssbs_emulation_handler,
1195 };
1196
1197 static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
1198 {
1199         static bool undef_hook_registered = false;
1200         static DEFINE_RAW_SPINLOCK(hook_lock);
1201
1202         raw_spin_lock(&hook_lock);
1203         if (!undef_hook_registered) {
1204                 register_undef_hook(&ssbs_emulation_hook);
1205                 undef_hook_registered = true;
1206         }
1207         raw_spin_unlock(&hook_lock);
1208
1209         if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
1210                 sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
1211                 arm64_set_ssbd_mitigation(false);
1212         } else {
1213                 arm64_set_ssbd_mitigation(true);
1214         }
1215 }
1216 #endif /* CONFIG_ARM64_SSBD */
1217
1218 #ifdef CONFIG_ARM64_PAN
1219 static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
1220 {
1221         /*
1222          * We modify PSTATE. This won't work from irq context as the PSTATE
1223          * is discarded once we return from the exception.
1224          */
1225         WARN_ON_ONCE(in_interrupt());
1226
1227         sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
1228         asm(SET_PSTATE_PAN(1));
1229 }
1230 #endif /* CONFIG_ARM64_PAN */
1231
1232 #ifdef CONFIG_ARM64_RAS_EXTN
1233 static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
1234 {
1235         /* Firmware may have left a deferred SError in this register. */
1236         write_sysreg_s(0, SYS_DISR_EL1);
1237 }
1238 #endif /* CONFIG_ARM64_RAS_EXTN */
1239
1240 #ifdef CONFIG_ARM64_PTR_AUTH
1241 static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap)
1242 {
1243         sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ENIA | SCTLR_ELx_ENIB |
1244                                        SCTLR_ELx_ENDA | SCTLR_ELx_ENDB);
1245 }
1246 #endif /* CONFIG_ARM64_PTR_AUTH */
1247
1248 #ifdef CONFIG_ARM64_PSEUDO_NMI
1249 static bool enable_pseudo_nmi;
1250
1251 static int __init early_enable_pseudo_nmi(char *p)
1252 {
1253         return strtobool(p, &enable_pseudo_nmi);
1254 }
1255 early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi);
1256
1257 static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
1258                                    int scope)
1259 {
1260         return enable_pseudo_nmi && has_useable_gicv3_cpuif(entry, scope);
1261 }
1262 #endif
1263
1264 static const struct arm64_cpu_capabilities arm64_features[] = {
1265         {
1266                 .desc = "GIC system register CPU interface",
1267                 .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
1268                 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
1269                 .matches = has_useable_gicv3_cpuif,
1270                 .sys_reg = SYS_ID_AA64PFR0_EL1,
1271                 .field_pos = ID_AA64PFR0_GIC_SHIFT,
1272                 .sign = FTR_UNSIGNED,
1273                 .min_field_value = 1,
1274         },
1275 #ifdef CONFIG_ARM64_PAN
1276         {
1277                 .desc = "Privileged Access Never",
1278                 .capability = ARM64_HAS_PAN,
1279                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1280                 .matches = has_cpuid_feature,
1281                 .sys_reg = SYS_ID_AA64MMFR1_EL1,
1282                 .field_pos = ID_AA64MMFR1_PAN_SHIFT,
1283                 .sign = FTR_UNSIGNED,
1284                 .min_field_value = 1,
1285                 .cpu_enable = cpu_enable_pan,
1286         },
1287 #endif /* CONFIG_ARM64_PAN */
1288 #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
1289         {
1290                 .desc = "LSE atomic instructions",
1291                 .capability = ARM64_HAS_LSE_ATOMICS,
1292                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1293                 .matches = has_cpuid_feature,
1294                 .sys_reg = SYS_ID_AA64ISAR0_EL1,
1295                 .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
1296                 .sign = FTR_UNSIGNED,
1297                 .min_field_value = 2,
1298         },
1299 #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
1300         {
1301                 .desc = "Software prefetching using PRFM",
1302                 .capability = ARM64_HAS_NO_HW_PREFETCH,
1303                 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
1304                 .matches = has_no_hw_prefetch,
1305         },
1306 #ifdef CONFIG_ARM64_UAO
1307         {
1308                 .desc = "User Access Override",
1309                 .capability = ARM64_HAS_UAO,
1310                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1311                 .matches = has_cpuid_feature,
1312                 .sys_reg = SYS_ID_AA64MMFR2_EL1,
1313                 .field_pos = ID_AA64MMFR2_UAO_SHIFT,
1314                 .min_field_value = 1,
1315                 /*
1316                  * We rely on stop_machine() calling uao_thread_switch() to set
1317                  * UAO immediately after patching.
1318                  */
1319         },
1320 #endif /* CONFIG_ARM64_UAO */
1321 #ifdef CONFIG_ARM64_PAN
1322         {
1323                 .capability = ARM64_ALT_PAN_NOT_UAO,
1324                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1325                 .matches = cpufeature_pan_not_uao,
1326         },
1327 #endif /* CONFIG_ARM64_PAN */
1328 #ifdef CONFIG_ARM64_VHE
1329         {
1330                 .desc = "Virtualization Host Extensions",
1331                 .capability = ARM64_HAS_VIRT_HOST_EXTN,
1332                 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
1333                 .matches = runs_at_el2,
1334                 .cpu_enable = cpu_copy_el2regs,
1335         },
1336 #endif  /* CONFIG_ARM64_VHE */
1337         {
1338                 .desc = "32-bit EL0 Support",
1339                 .capability = ARM64_HAS_32BIT_EL0,
1340                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1341                 .matches = has_cpuid_feature,
1342                 .sys_reg = SYS_ID_AA64PFR0_EL1,
1343                 .sign = FTR_UNSIGNED,
1344                 .field_pos = ID_AA64PFR0_EL0_SHIFT,
1345                 .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
1346         },
1347         {
1348                 .desc = "Kernel page table isolation (KPTI)",
1349                 .capability = ARM64_UNMAP_KERNEL_AT_EL0,
1350                 .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
1351                 /*
1352                  * The ID feature fields below are used to indicate that
1353                  * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
1354                  * more details.
1355                  */
1356                 .sys_reg = SYS_ID_AA64PFR0_EL1,
1357                 .field_pos = ID_AA64PFR0_CSV3_SHIFT,
1358                 .min_field_value = 1,
1359                 .matches = unmap_kernel_at_el0,
1360                 .cpu_enable = kpti_install_ng_mappings,
1361         },
1362         {
1363                 /* FP/SIMD is not implemented */
1364                 .capability = ARM64_HAS_NO_FPSIMD,
1365                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1366                 .min_field_value = 0,
1367                 .matches = has_no_fpsimd,
1368         },
1369 #ifdef CONFIG_ARM64_PMEM
1370         {
1371                 .desc = "Data cache clean to Point of Persistence",
1372                 .capability = ARM64_HAS_DCPOP,
1373                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1374                 .matches = has_cpuid_feature,
1375                 .sys_reg = SYS_ID_AA64ISAR1_EL1,
1376                 .field_pos = ID_AA64ISAR1_DPB_SHIFT,
1377                 .min_field_value = 1,
1378         },
1379         {
1380                 .desc = "Data cache clean to Point of Deep Persistence",
1381                 .capability = ARM64_HAS_DCPODP,
1382                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1383                 .matches = has_cpuid_feature,
1384                 .sys_reg = SYS_ID_AA64ISAR1_EL1,
1385                 .sign = FTR_UNSIGNED,
1386                 .field_pos = ID_AA64ISAR1_DPB_SHIFT,
1387                 .min_field_value = 2,
1388         },
1389 #endif
1390 #ifdef CONFIG_ARM64_SVE
1391         {
1392                 .desc = "Scalable Vector Extension",
1393                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1394                 .capability = ARM64_SVE,
1395                 .sys_reg = SYS_ID_AA64PFR0_EL1,
1396                 .sign = FTR_UNSIGNED,
1397                 .field_pos = ID_AA64PFR0_SVE_SHIFT,
1398                 .min_field_value = ID_AA64PFR0_SVE,
1399                 .matches = has_cpuid_feature,
1400                 .cpu_enable = sve_kernel_enable,
1401         },
1402 #endif /* CONFIG_ARM64_SVE */
1403 #ifdef CONFIG_ARM64_RAS_EXTN
1404         {
1405                 .desc = "RAS Extension Support",
1406                 .capability = ARM64_HAS_RAS_EXTN,
1407                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1408                 .matches = has_cpuid_feature,
1409                 .sys_reg = SYS_ID_AA64PFR0_EL1,
1410                 .sign = FTR_UNSIGNED,
1411                 .field_pos = ID_AA64PFR0_RAS_SHIFT,
1412                 .min_field_value = ID_AA64PFR0_RAS_V1,
1413                 .cpu_enable = cpu_clear_disr,
1414         },
1415 #endif /* CONFIG_ARM64_RAS_EXTN */
1416         {
1417                 .desc = "Data cache clean to the PoU not required for I/D coherence",
1418                 .capability = ARM64_HAS_CACHE_IDC,
1419                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1420                 .matches = has_cache_idc,
1421                 .cpu_enable = cpu_emulate_effective_ctr,
1422         },
1423         {
1424                 .desc = "Instruction cache invalidation not required for I/D coherence",
1425                 .capability = ARM64_HAS_CACHE_DIC,
1426                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1427                 .matches = has_cache_dic,
1428         },
1429         {
1430                 .desc = "Stage-2 Force Write-Back",
1431                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1432                 .capability = ARM64_HAS_STAGE2_FWB,
1433                 .sys_reg = SYS_ID_AA64MMFR2_EL1,
1434                 .sign = FTR_UNSIGNED,
1435                 .field_pos = ID_AA64MMFR2_FWB_SHIFT,
1436                 .min_field_value = 1,
1437                 .matches = has_cpuid_feature,
1438                 .cpu_enable = cpu_has_fwb,
1439         },
1440 #ifdef CONFIG_ARM64_HW_AFDBM
1441         {
1442                 /*
1443                  * Since we turn this on always, we don't want the user to
1444                  * think that the feature is available when it may not be.
1445                  * So hide the description.
1446                  *
1447                  * .desc = "Hardware pagetable Dirty Bit Management",
1448                  *
1449                  */
1450                 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
1451                 .capability = ARM64_HW_DBM,
1452                 .sys_reg = SYS_ID_AA64MMFR1_EL1,
1453                 .sign = FTR_UNSIGNED,
1454                 .field_pos = ID_AA64MMFR1_HADBS_SHIFT,
1455                 .min_field_value = 2,
1456                 .matches = has_hw_dbm,
1457                 .cpu_enable = cpu_enable_hw_dbm,
1458         },
1459 #endif
1460         {
1461                 .desc = "CRC32 instructions",
1462                 .capability = ARM64_HAS_CRC32,
1463                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1464                 .matches = has_cpuid_feature,
1465                 .sys_reg = SYS_ID_AA64ISAR0_EL1,
1466                 .field_pos = ID_AA64ISAR0_CRC32_SHIFT,
1467                 .min_field_value = 1,
1468         },
1469 #ifdef CONFIG_ARM64_SSBD
1470         {
1471                 .desc = "Speculative Store Bypassing Safe (SSBS)",
1472                 .capability = ARM64_SSBS,
1473                 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
1474                 .matches = has_cpuid_feature,
1475                 .sys_reg = SYS_ID_AA64PFR1_EL1,
1476                 .field_pos = ID_AA64PFR1_SSBS_SHIFT,
1477                 .sign = FTR_UNSIGNED,
1478                 .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
1479                 .cpu_enable = cpu_enable_ssbs,
1480         },
1481 #endif
1482 #ifdef CONFIG_ARM64_CNP
1483         {
1484                 .desc = "Common not Private translations",
1485                 .capability = ARM64_HAS_CNP,
1486                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1487                 .matches = has_useable_cnp,
1488                 .sys_reg = SYS_ID_AA64MMFR2_EL1,
1489                 .sign = FTR_UNSIGNED,
1490                 .field_pos = ID_AA64MMFR2_CNP_SHIFT,
1491                 .min_field_value = 1,
1492                 .cpu_enable = cpu_enable_cnp,
1493         },
1494 #endif
1495         {
1496                 .desc = "Speculation barrier (SB)",
1497                 .capability = ARM64_HAS_SB,
1498                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1499                 .matches = has_cpuid_feature,
1500                 .sys_reg = SYS_ID_AA64ISAR1_EL1,
1501                 .field_pos = ID_AA64ISAR1_SB_SHIFT,
1502                 .sign = FTR_UNSIGNED,
1503                 .min_field_value = 1,
1504         },
1505 #ifdef CONFIG_ARM64_PTR_AUTH
1506         {
1507                 .desc = "Address authentication (architected algorithm)",
1508                 .capability = ARM64_HAS_ADDRESS_AUTH_ARCH,
1509                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1510                 .sys_reg = SYS_ID_AA64ISAR1_EL1,
1511                 .sign = FTR_UNSIGNED,
1512                 .field_pos = ID_AA64ISAR1_APA_SHIFT,
1513                 .min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
1514                 .matches = has_cpuid_feature,
1515                 .cpu_enable = cpu_enable_address_auth,
1516         },
1517         {
1518                 .desc = "Address authentication (IMP DEF algorithm)",
1519                 .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
1520                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1521                 .sys_reg = SYS_ID_AA64ISAR1_EL1,
1522                 .sign = FTR_UNSIGNED,
1523                 .field_pos = ID_AA64ISAR1_API_SHIFT,
1524                 .min_field_value = ID_AA64ISAR1_API_IMP_DEF,
1525                 .matches = has_cpuid_feature,
1526                 .cpu_enable = cpu_enable_address_auth,
1527         },
1528         {
1529                 .desc = "Generic authentication (architected algorithm)",
1530                 .capability = ARM64_HAS_GENERIC_AUTH_ARCH,
1531                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1532                 .sys_reg = SYS_ID_AA64ISAR1_EL1,
1533                 .sign = FTR_UNSIGNED,
1534                 .field_pos = ID_AA64ISAR1_GPA_SHIFT,
1535                 .min_field_value = ID_AA64ISAR1_GPA_ARCHITECTED,
1536                 .matches = has_cpuid_feature,
1537         },
1538         {
1539                 .desc = "Generic authentication (IMP DEF algorithm)",
1540                 .capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF,
1541                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1542                 .sys_reg = SYS_ID_AA64ISAR1_EL1,
1543                 .sign = FTR_UNSIGNED,
1544                 .field_pos = ID_AA64ISAR1_GPI_SHIFT,
1545                 .min_field_value = ID_AA64ISAR1_GPI_IMP_DEF,
1546                 .matches = has_cpuid_feature,
1547         },
1548 #endif /* CONFIG_ARM64_PTR_AUTH */
1549 #ifdef CONFIG_ARM64_PSEUDO_NMI
1550         {
1551                 /*
1552                  * Depends on having GICv3
1553                  */
1554                 .desc = "IRQ priority masking",
1555                 .capability = ARM64_HAS_IRQ_PRIO_MASKING,
1556                 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
1557                 .matches = can_use_gic_priorities,
1558                 .sys_reg = SYS_ID_AA64PFR0_EL1,
1559                 .field_pos = ID_AA64PFR0_GIC_SHIFT,
1560                 .sign = FTR_UNSIGNED,
1561                 .min_field_value = 1,
1562         },
1563 #endif
1564         {},
1565 };
1566
1567 #define HWCAP_CPUID_MATCH(reg, field, s, min_value)                             \
1568                 .matches = has_cpuid_feature,                                   \
1569                 .sys_reg = reg,                                                 \
1570                 .field_pos = field,                                             \
1571                 .sign = s,                                                      \
1572                 .min_field_value = min_value,
1573
1574 #define __HWCAP_CAP(name, cap_type, cap)                                        \
1575                 .desc = name,                                                   \
1576                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,                            \
1577                 .hwcap_type = cap_type,                                         \
1578                 .hwcap = cap,                                                   \
1579
1580 #define HWCAP_CAP(reg, field, s, min_value, cap_type, cap)                      \
1581         {                                                                       \
1582                 __HWCAP_CAP(#cap, cap_type, cap)                                \
1583                 HWCAP_CPUID_MATCH(reg, field, s, min_value)                     \
1584         }
1585
1586 #define HWCAP_MULTI_CAP(list, cap_type, cap)                                    \
1587         {                                                                       \
1588                 __HWCAP_CAP(#cap, cap_type, cap)                                \
1589                 .matches = cpucap_multi_entry_cap_matches,                      \
1590                 .match_list = list,                                             \
1591         }
1592
1593 #ifdef CONFIG_ARM64_PTR_AUTH
1594 static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
1595         {
1596                 HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_APA_SHIFT,
1597                                   FTR_UNSIGNED, ID_AA64ISAR1_APA_ARCHITECTED)
1598         },
1599         {
1600                 HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_SHIFT,
1601                                   FTR_UNSIGNED, ID_AA64ISAR1_API_IMP_DEF)
1602         },
1603         {},
1604 };
1605
1606 static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
1607         {
1608                 HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPA_SHIFT,
1609                                   FTR_UNSIGNED, ID_AA64ISAR1_GPA_ARCHITECTED)
1610         },
1611         {
1612                 HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPI_SHIFT,
1613                                   FTR_UNSIGNED, ID_AA64ISAR1_GPI_IMP_DEF)
1614         },
1615         {},
1616 };
1617 #endif
1618
1619 static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
1620         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL),
1621         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES),
1622         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1),
1623         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2),
1624         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512),
1625         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32),
1626         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS),
1627         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM),
1628         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3),
1629         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3),
1630         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4),
1631         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP),
1632         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM),
1633         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
1634         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
1635         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
1636         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
1637         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
1638         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
1639         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
1640         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP),
1641         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP),
1642         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT),
1643         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FCMA),
1644         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_LRCPC),
1645         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC),
1646         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FRINTTS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FRINT),
1647         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB),
1648         HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
1649 #ifdef CONFIG_ARM64_SVE
1650         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE),
1651         HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SVEVER_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SVEVER_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
1652         HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
1653         HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES_PMULL, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
1654         HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BITPERM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BITPERM, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
1655         HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SHA3_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SHA3, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
1656         HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SM4_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SM4, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
1657 #endif
1658         HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS),
1659 #ifdef CONFIG_ARM64_PTR_AUTH
1660         HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA),
1661         HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG),
1662 #endif
1663         {},
1664 };
1665
1666 static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
1667 #ifdef CONFIG_COMPAT
1668         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
1669         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
1670         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
1671         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
1672         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
1673 #endif
1674         {},
1675 };
1676
1677 static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
1678 {
1679         switch (cap->hwcap_type) {
1680         case CAP_HWCAP:
1681                 cpu_set_feature(cap->hwcap);
1682                 break;
1683 #ifdef CONFIG_COMPAT
1684         case CAP_COMPAT_HWCAP:
1685                 compat_elf_hwcap |= (u32)cap->hwcap;
1686                 break;
1687         case CAP_COMPAT_HWCAP2:
1688                 compat_elf_hwcap2 |= (u32)cap->hwcap;
1689                 break;
1690 #endif
1691         default:
1692                 WARN_ON(1);
1693                 break;
1694         }
1695 }
1696
1697 /* Check if we have a particular HWCAP enabled */
1698 static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
1699 {
1700         bool rc;
1701
1702         switch (cap->hwcap_type) {
1703         case CAP_HWCAP:
1704                 rc = cpu_have_feature(cap->hwcap);
1705                 break;
1706 #ifdef CONFIG_COMPAT
1707         case CAP_COMPAT_HWCAP:
1708                 rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
1709                 break;
1710         case CAP_COMPAT_HWCAP2:
1711                 rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
1712                 break;
1713 #endif
1714         default:
1715                 WARN_ON(1);
1716                 rc = false;
1717         }
1718
1719         return rc;
1720 }
1721
1722 static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
1723 {
1724         /* We support emulation of accesses to CPU ID feature registers */
1725         cpu_set_named_feature(CPUID);
1726         for (; hwcaps->matches; hwcaps++)
1727                 if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
1728                         cap_set_elf_hwcap(hwcaps);
1729 }
1730
1731 static void update_cpu_capabilities(u16 scope_mask)
1732 {
1733         int i;
1734         const struct arm64_cpu_capabilities *caps;
1735
1736         scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
1737         for (i = 0; i < ARM64_NCAPS; i++) {
1738                 caps = cpu_hwcaps_ptrs[i];
1739                 if (!caps || !(caps->type & scope_mask) ||
1740                     cpus_have_cap(caps->capability) ||
1741                     !caps->matches(caps, cpucap_default_scope(caps)))
1742                         continue;
1743
1744                 if (caps->desc)
1745                         pr_info("detected: %s\n", caps->desc);
1746                 cpus_set_cap(caps->capability);
1747
1748                 if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU))
1749                         set_bit(caps->capability, boot_capabilities);
1750         }
1751 }
1752
1753 /*
1754  * Enable all the available capabilities on this CPU. The capabilities
1755  * with BOOT_CPU scope are handled separately and hence skipped here.
1756  */
1757 static int cpu_enable_non_boot_scope_capabilities(void *__unused)
1758 {
1759         int i;
1760         u16 non_boot_scope = SCOPE_ALL & ~SCOPE_BOOT_CPU;
1761
1762         for_each_available_cap(i) {
1763                 const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[i];
1764
1765                 if (WARN_ON(!cap))
1766                         continue;
1767
1768                 if (!(cap->type & non_boot_scope))
1769                         continue;
1770
1771                 if (cap->cpu_enable)
1772                         cap->cpu_enable(cap);
1773         }
1774         return 0;
1775 }
1776
1777 /*
1778  * Run through the enabled capabilities and enable() it on all active
1779  * CPUs
1780  */
1781 static void __init enable_cpu_capabilities(u16 scope_mask)
1782 {
1783         int i;
1784         const struct arm64_cpu_capabilities *caps;
1785         bool boot_scope;
1786
1787         scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
1788         boot_scope = !!(scope_mask & SCOPE_BOOT_CPU);
1789
1790         for (i = 0; i < ARM64_NCAPS; i++) {
1791                 unsigned int num;
1792
1793                 caps = cpu_hwcaps_ptrs[i];
1794                 if (!caps || !(caps->type & scope_mask))
1795                         continue;
1796                 num = caps->capability;
1797                 if (!cpus_have_cap(num))
1798                         continue;
1799
1800                 /* Ensure cpus_have_const_cap(num) works */
1801                 static_branch_enable(&cpu_hwcap_keys[num]);
1802
1803                 if (boot_scope && caps->cpu_enable)
1804                         /*
1805                          * Capabilities with SCOPE_BOOT_CPU scope are finalised
1806                          * before any secondary CPU boots. Thus, each secondary
1807                          * will enable the capability as appropriate via
1808                          * check_local_cpu_capabilities(). The only exception is
1809                          * the boot CPU, for which the capability must be
1810                          * enabled here. This approach avoids costly
1811                          * stop_machine() calls for this case.
1812                          */
1813                         caps->cpu_enable(caps);
1814         }
1815
1816         /*
1817          * For all non-boot scope capabilities, use stop_machine()
1818          * as it schedules the work allowing us to modify PSTATE,
1819          * instead of on_each_cpu() which uses an IPI, giving us a
1820          * PSTATE that disappears when we return.
1821          */
1822         if (!boot_scope)
1823                 stop_machine(cpu_enable_non_boot_scope_capabilities,
1824                              NULL, cpu_online_mask);
1825 }
1826
1827 /*
1828  * Run through the list of capabilities to check for conflicts.
1829  * If the system has already detected a capability, take necessary
1830  * action on this CPU.
1831  *
1832  * Returns "false" on conflicts.
1833  */
1834 static bool verify_local_cpu_caps(u16 scope_mask)
1835 {
1836         int i;
1837         bool cpu_has_cap, system_has_cap;
1838         const struct arm64_cpu_capabilities *caps;
1839
1840         scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
1841
1842         for (i = 0; i < ARM64_NCAPS; i++) {
1843                 caps = cpu_hwcaps_ptrs[i];
1844                 if (!caps || !(caps->type & scope_mask))
1845                         continue;
1846
1847                 cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU);
1848                 system_has_cap = cpus_have_cap(caps->capability);
1849
1850                 if (system_has_cap) {
1851                         /*
1852                          * Check if the new CPU misses an advertised feature,
1853                          * which is not safe to miss.
1854                          */
1855                         if (!cpu_has_cap && !cpucap_late_cpu_optional(caps))
1856                                 break;
1857                         /*
1858                          * We have to issue cpu_enable() irrespective of
1859                          * whether the CPU has it or not, as it is enabeld
1860                          * system wide. It is upto the call back to take
1861                          * appropriate action on this CPU.
1862                          */
1863                         if (caps->cpu_enable)
1864                                 caps->cpu_enable(caps);
1865                 } else {
1866                         /*
1867                          * Check if the CPU has this capability if it isn't
1868                          * safe to have when the system doesn't.
1869                          */
1870                         if (cpu_has_cap && !cpucap_late_cpu_permitted(caps))
1871                                 break;
1872                 }
1873         }
1874
1875         if (i < ARM64_NCAPS) {
1876                 pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
1877                         smp_processor_id(), caps->capability,
1878                         caps->desc, system_has_cap, cpu_has_cap);
1879                 return false;
1880         }
1881
1882         return true;
1883 }
1884
1885 /*
1886  * Check for CPU features that are used in early boot
1887  * based on the Boot CPU value.
1888  */
1889 static void check_early_cpu_features(void)
1890 {
1891         verify_cpu_asid_bits();
1892         /*
1893          * Early features are used by the kernel already. If there
1894          * is a conflict, we cannot proceed further.
1895          */
1896         if (!verify_local_cpu_caps(SCOPE_BOOT_CPU))
1897                 cpu_panic_kernel();
1898 }
1899
1900 static void
1901 verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
1902 {
1903
1904         for (; caps->matches; caps++)
1905                 if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
1906                         pr_crit("CPU%d: missing HWCAP: %s\n",
1907                                         smp_processor_id(), caps->desc);
1908                         cpu_die_early();
1909                 }
1910 }
1911
1912 static void verify_sve_features(void)
1913 {
1914         u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
1915         u64 zcr = read_zcr_features();
1916
1917         unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
1918         unsigned int len = zcr & ZCR_ELx_LEN_MASK;
1919
1920         if (len < safe_len || sve_verify_vq_map()) {
1921                 pr_crit("CPU%d: SVE: vector length support mismatch\n",
1922                         smp_processor_id());
1923                 cpu_die_early();
1924         }
1925
1926         /* Add checks on other ZCR bits here if necessary */
1927 }
1928
1929
1930 /*
1931  * Run through the enabled system capabilities and enable() it on this CPU.
1932  * The capabilities were decided based on the available CPUs at the boot time.
1933  * Any new CPU should match the system wide status of the capability. If the
1934  * new CPU doesn't have a capability which the system now has enabled, we
1935  * cannot do anything to fix it up and could cause unexpected failures. So
1936  * we park the CPU.
1937  */
1938 static void verify_local_cpu_capabilities(void)
1939 {
1940         /*
1941          * The capabilities with SCOPE_BOOT_CPU are checked from
1942          * check_early_cpu_features(), as they need to be verified
1943          * on all secondary CPUs.
1944          */
1945         if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU))
1946                 cpu_die_early();
1947
1948         verify_local_elf_hwcaps(arm64_elf_hwcaps);
1949
1950         if (system_supports_32bit_el0())
1951                 verify_local_elf_hwcaps(compat_elf_hwcaps);
1952
1953         if (system_supports_sve())
1954                 verify_sve_features();
1955 }
1956
1957 void check_local_cpu_capabilities(void)
1958 {
1959         /*
1960          * All secondary CPUs should conform to the early CPU features
1961          * in use by the kernel based on boot CPU.
1962          */
1963         check_early_cpu_features();
1964
1965         /*
1966          * If we haven't finalised the system capabilities, this CPU gets
1967          * a chance to update the errata work arounds and local features.
1968          * Otherwise, this CPU should verify that it has all the system
1969          * advertised capabilities.
1970          */
1971         if (!sys_caps_initialised)
1972                 update_cpu_capabilities(SCOPE_LOCAL_CPU);
1973         else
1974                 verify_local_cpu_capabilities();
1975 }
1976
1977 static void __init setup_boot_cpu_capabilities(void)
1978 {
1979         /* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */
1980         update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
1981         /* Enable the SCOPE_BOOT_CPU capabilities alone right away */
1982         enable_cpu_capabilities(SCOPE_BOOT_CPU);
1983 }
1984
1985 DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
1986 EXPORT_SYMBOL(arm64_const_caps_ready);
1987
1988 static void __init mark_const_caps_ready(void)
1989 {
1990         static_branch_enable(&arm64_const_caps_ready);
1991 }
1992
1993 bool this_cpu_has_cap(unsigned int n)
1994 {
1995         if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
1996                 const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
1997
1998                 if (cap)
1999                         return cap->matches(cap, SCOPE_LOCAL_CPU);
2000         }
2001
2002         return false;
2003 }
2004
2005 void cpu_set_feature(unsigned int num)
2006 {
2007         WARN_ON(num >= MAX_CPU_FEATURES);
2008         elf_hwcap |= BIT(num);
2009 }
2010 EXPORT_SYMBOL_GPL(cpu_set_feature);
2011
2012 bool cpu_have_feature(unsigned int num)
2013 {
2014         WARN_ON(num >= MAX_CPU_FEATURES);
2015         return elf_hwcap & BIT(num);
2016 }
2017 EXPORT_SYMBOL_GPL(cpu_have_feature);
2018
2019 unsigned long cpu_get_elf_hwcap(void)
2020 {
2021         /*
2022          * We currently only populate the first 32 bits of AT_HWCAP. Please
2023          * note that for userspace compatibility we guarantee that bits 62
2024          * and 63 will always be returned as 0.
2025          */
2026         return lower_32_bits(elf_hwcap);
2027 }
2028
2029 unsigned long cpu_get_elf_hwcap2(void)
2030 {
2031         return upper_32_bits(elf_hwcap);
2032 }
2033
2034 static void __init setup_system_capabilities(void)
2035 {
2036         /*
2037          * We have finalised the system-wide safe feature
2038          * registers, finalise the capabilities that depend
2039          * on it. Also enable all the available capabilities,
2040          * that are not enabled already.
2041          */
2042         update_cpu_capabilities(SCOPE_SYSTEM);
2043         enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
2044 }
2045
2046 void __init setup_cpu_features(void)
2047 {
2048         u32 cwg;
2049
2050         setup_system_capabilities();
2051         mark_const_caps_ready();
2052         setup_elf_hwcaps(arm64_elf_hwcaps);
2053
2054         if (system_supports_32bit_el0())
2055                 setup_elf_hwcaps(compat_elf_hwcaps);
2056
2057         if (system_uses_ttbr0_pan())
2058                 pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
2059
2060         sve_setup();
2061         minsigstksz_setup();
2062
2063         /* Advertise that we have computed the system capabilities */
2064         set_sys_caps_initialised();
2065
2066         /*
2067          * Check for sane CTR_EL0.CWG value.
2068          */
2069         cwg = cache_type_cwg();
2070         if (!cwg)
2071                 pr_warn("No Cache Writeback Granule information, assuming %d\n",
2072                         ARCH_DMA_MINALIGN);
2073 }
2074
2075 static bool __maybe_unused
2076 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
2077 {
2078         return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
2079 }
2080
2081 static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
2082 {
2083         cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
2084 }
2085
2086 /*
2087  * We emulate only the following system register space.
2088  * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
2089  * See Table C5-6 System instruction encodings for System register accesses,
2090  * ARMv8 ARM(ARM DDI 0487A.f) for more details.
2091  */
2092 static inline bool __attribute_const__ is_emulated(u32 id)
2093 {
2094         return (sys_reg_Op0(id) == 0x3 &&
2095                 sys_reg_CRn(id) == 0x0 &&
2096                 sys_reg_Op1(id) == 0x0 &&
2097                 (sys_reg_CRm(id) == 0 ||
2098                  ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7))));
2099 }
2100
2101 /*
2102  * With CRm == 0, reg should be one of :
2103  * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1.
2104  */
2105 static inline int emulate_id_reg(u32 id, u64 *valp)
2106 {
2107         switch (id) {
2108         case SYS_MIDR_EL1:
2109                 *valp = read_cpuid_id();
2110                 break;
2111         case SYS_MPIDR_EL1:
2112                 *valp = SYS_MPIDR_SAFE_VAL;
2113                 break;
2114         case SYS_REVIDR_EL1:
2115                 /* IMPLEMENTATION DEFINED values are emulated with 0 */
2116                 *valp = 0;
2117                 break;
2118         default:
2119                 return -EINVAL;
2120         }
2121
2122         return 0;
2123 }
2124
2125 static int emulate_sys_reg(u32 id, u64 *valp)
2126 {
2127         struct arm64_ftr_reg *regp;
2128
2129         if (!is_emulated(id))
2130                 return -EINVAL;
2131
2132         if (sys_reg_CRm(id) == 0)
2133                 return emulate_id_reg(id, valp);
2134
2135         regp = get_arm64_ftr_reg(id);
2136         if (regp)
2137                 *valp = arm64_ftr_reg_user_value(regp);
2138         else
2139                 /*
2140                  * The untracked registers are either IMPLEMENTATION DEFINED
2141                  * (e.g, ID_AFR0_EL1) or reserved RAZ.
2142                  */
2143                 *valp = 0;
2144         return 0;
2145 }
2146
2147 int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt)
2148 {
2149         int rc;
2150         u64 val;
2151
2152         rc = emulate_sys_reg(sys_reg, &val);
2153         if (!rc) {
2154                 pt_regs_write_reg(regs, rt, val);
2155                 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
2156         }
2157         return rc;
2158 }
2159
2160 static int emulate_mrs(struct pt_regs *regs, u32 insn)
2161 {
2162         u32 sys_reg, rt;
2163
2164         /*
2165          * sys_reg values are defined as used in mrs/msr instruction.
2166          * shift the imm value to get the encoding.
2167          */
2168         sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
2169         rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
2170         return do_emulate_mrs(regs, sys_reg, rt);
2171 }
2172
2173 static struct undef_hook mrs_hook = {
2174         .instr_mask = 0xfff00000,
2175         .instr_val  = 0xd5300000,
2176         .pstate_mask = PSR_AA32_MODE_MASK,
2177         .pstate_val = PSR_MODE_EL0t,
2178         .fn = emulate_mrs,
2179 };
2180
2181 static int __init enable_mrs_emulation(void)
2182 {
2183         register_undef_hook(&mrs_hook);
2184         return 0;
2185 }
2186
2187 core_initcall(enable_mrs_emulation);
2188
2189 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
2190                           char *buf)
2191 {
2192         if (__meltdown_safe)
2193                 return sprintf(buf, "Not affected\n");
2194
2195         if (arm64_kernel_unmapped_at_el0())
2196                 return sprintf(buf, "Mitigation: PTI\n");
2197
2198         return sprintf(buf, "Vulnerable\n");
2199 }