2 * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #ifndef __ASM_CPUFEATURE_H
10 #define __ASM_CPUFEATURE_H
12 #include <asm/cpucaps.h>
13 #include <asm/cputype.h>
14 #include <asm/hwcap.h>
15 #include <asm/sysreg.h>
17 #define MAX_CPU_FEATURES 64
18 #define cpu_feature(x) KERNEL_HWCAP_ ## x
22 #include <linux/bug.h>
23 #include <linux/jump_label.h>
24 #include <linux/kernel.h>
27 * CPU feature register tracking
29 * The safe value of a CPUID feature field is dependent on the implications
30 * of the values assigned to it by the architecture. Based on the relationship
31 * between the values, the features are classified into 3 types - LOWER_SAFE,
32 * HIGHER_SAFE and EXACT.
34 * The lowest value of all the CPUs is chosen for LOWER_SAFE and highest
35 * for HIGHER_SAFE. It is expected that all CPUs have the same value for
36 * a field when EXACT is specified, failing which, the safe value specified
37 * in the table is chosen.
41 FTR_EXACT, /* Use a predefined safe value */
42 FTR_LOWER_SAFE, /* Smaller value is safe */
43 FTR_HIGHER_SAFE,/* Bigger value is safe */
46 #define FTR_STRICT true /* SANITY check strict matching required */
47 #define FTR_NONSTRICT false /* SANITY check ignored */
49 #define FTR_SIGNED true /* Value should be treated as signed */
50 #define FTR_UNSIGNED false /* Value should be treated as unsigned */
52 #define FTR_VISIBLE true /* Feature visible to the user space */
53 #define FTR_HIDDEN false /* Feature is hidden from the user */
55 #define FTR_VISIBLE_IF_IS_ENABLED(config) \
56 (IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN)
58 struct arm64_ftr_bits {
59 bool sign; /* Value is signed ? */
61 bool strict; /* CPU Sanity check: strict matching required ? */
65 s64 safe_val; /* safe value for FTR_EXACT features */
69 * @arm64_ftr_reg - Feature register
70 * @strict_mask Bits which should match across all CPUs for sanity.
71 * @sys_val Safe value across the CPUs (system view)
73 struct arm64_ftr_reg {
79 const struct arm64_ftr_bits *ftr_bits;
82 extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
87 * We use arm64_cpu_capabilities to represent system features, errata work
88 * arounds (both used internally by kernel and tracked in cpu_hwcaps) and
89 * ELF HWCAPs (which are exposed to user).
91 * To support systems with heterogeneous CPUs, we need to make sure that we
92 * detect the capabilities correctly on the system and take appropriate
93 * measures to ensure there are no incompatibilities.
95 * This comment tries to explain how we treat the capabilities.
96 * Each capability has the following list of attributes :
98 * 1) Scope of Detection : The system detects a given capability by
99 * performing some checks at runtime. This could be, e.g, checking the
100 * value of a field in CPU ID feature register or checking the cpu
101 * model. The capability provides a call back ( @matches() ) to
102 * perform the check. Scope defines how the checks should be performed.
103 * There are three cases:
105 * a) SCOPE_LOCAL_CPU: check all the CPUs and "detect" if at least one
106 * matches. This implies, we have to run the check on all the
107 * booting CPUs, until the system decides that state of the
108 * capability is finalised. (See section 2 below)
110 * b) SCOPE_SYSTEM: check all the CPUs and "detect" if all the CPUs
111 * matches. This implies, we run the check only once, when the
112 * system decides to finalise the state of the capability. If the
113 * capability relies on a field in one of the CPU ID feature
114 * registers, we use the sanitised value of the register from the
115 * CPU feature infrastructure to make the decision.
117 * c) SCOPE_BOOT_CPU: Check only on the primary boot CPU to detect the
118 * feature. This category is for features that are "finalised"
119 * (or used) by the kernel very early even before the SMP cpus
122 * The process of detection is usually denoted by "update" capability
125 * 2) Finalise the state : The kernel should finalise the state of a
126 * capability at some point during its execution and take necessary
127 * actions if any. Usually, this is done, after all the boot-time
128 * enabled CPUs are brought up by the kernel, so that it can make
129 * better decision based on the available set of CPUs. However, there
130 * are some special cases, where the action is taken during the early
131 * boot by the primary boot CPU. (e.g, running the kernel at EL2 with
132 * Virtualisation Host Extensions). The kernel usually disallows any
133 * changes to the state of a capability once it finalises the capability
134 * and takes any action, as it may be impossible to execute the actions
135 * safely. A CPU brought up after a capability is "finalised" is
136 * referred to as "Late CPU" w.r.t the capability. e.g, all secondary
137 * CPUs are treated "late CPUs" for capabilities determined by the boot
140 * At the moment there are two passes of finalising the capabilities.
141 * a) Boot CPU scope capabilities - Finalised by primary boot CPU via
142 * setup_boot_cpu_capabilities().
143 * b) Everything except (a) - Run via setup_system_capabilities().
145 * 3) Verification: When a CPU is brought online (e.g, by user or by the
146 * kernel), the kernel should make sure that it is safe to use the CPU,
147 * by verifying that the CPU is compliant with the state of the
148 * capabilities finalised already. This happens via :
150 * secondary_start_kernel()-> check_local_cpu_capabilities()
152 * As explained in (2) above, capabilities could be finalised at
153 * different points in the execution. Each newly booted CPU is verified
154 * against the capabilities that have been finalised by the time it
157 * a) SCOPE_BOOT_CPU : All CPUs are verified against the capability
158 * except for the primary boot CPU.
160 * b) SCOPE_LOCAL_CPU, SCOPE_SYSTEM: All CPUs hotplugged on by the
161 * user after the kernel boot are verified against the capability.
163 * If there is a conflict, the kernel takes an action, based on the
164 * severity (e.g, a CPU could be prevented from booting or cause a
165 * kernel panic). The CPU is allowed to "affect" the state of the
166 * capability, if it has not been finalised already. See section 5
167 * for more details on conflicts.
169 * 4) Action: As mentioned in (2), the kernel can take an action for each
170 * detected capability, on all CPUs on the system. Appropriate actions
171 * include, turning on an architectural feature, modifying the control
172 * registers (e.g, SCTLR, TCR etc.) or patching the kernel via
173 * alternatives. The kernel patching is batched and performed at later
174 * point. The actions are always initiated only after the capability
175 * is finalised. This is usally denoted by "enabling" the capability.
176 * The actions are initiated as follows :
177 * a) Action is triggered on all online CPUs, after the capability is
178 * finalised, invoked within the stop_machine() context from
179 * enable_cpu_capabilitie().
181 * b) Any late CPU, brought up after (1), the action is triggered via:
183 * check_local_cpu_capabilities() -> verify_local_cpu_capabilities()
185 * 5) Conflicts: Based on the state of the capability on a late CPU vs.
186 * the system state, we could have the following combinations :
188 * x-----------------------------x
189 * | Type | System | Late CPU |
190 * |-----------------------------|
192 * |-----------------------------|
194 * x-----------------------------x
196 * Two separate flag bits are defined to indicate whether each kind of
197 * conflict can be allowed:
198 * ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU - Case(a) is allowed
199 * ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU - Case(b) is allowed
201 * Case (a) is not permitted for a capability that the system requires
202 * all CPUs to have in order for the capability to be enabled. This is
203 * typical for capabilities that represent enhanced functionality.
205 * Case (b) is not permitted for a capability that must be enabled
206 * during boot if any CPU in the system requires it in order to run
207 * safely. This is typical for erratum work arounds that cannot be
208 * enabled after the corresponding capability is finalised.
210 * In some non-typical cases either both (a) and (b), or neither,
211 * should be permitted. This can be described by including neither
212 * or both flags in the capability's type field.
217 * Decide how the capability is detected.
218 * On any local CPU vs System wide vs the primary boot CPU
220 #define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0))
221 #define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1))
223 * The capabilitiy is detected on the Boot CPU and is used by kernel
224 * during early boot. i.e, the capability should be "detected" and
225 * "enabled" as early as possibly on all booting CPUs.
227 #define ARM64_CPUCAP_SCOPE_BOOT_CPU ((u16)BIT(2))
228 #define ARM64_CPUCAP_SCOPE_MASK \
229 (ARM64_CPUCAP_SCOPE_SYSTEM | \
230 ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
231 ARM64_CPUCAP_SCOPE_BOOT_CPU)
233 #define SCOPE_SYSTEM ARM64_CPUCAP_SCOPE_SYSTEM
234 #define SCOPE_LOCAL_CPU ARM64_CPUCAP_SCOPE_LOCAL_CPU
235 #define SCOPE_BOOT_CPU ARM64_CPUCAP_SCOPE_BOOT_CPU
236 #define SCOPE_ALL ARM64_CPUCAP_SCOPE_MASK
239 * Is it permitted for a late CPU to have this capability when system
240 * hasn't already enabled it ?
242 #define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4))
243 /* Is it safe for a late CPU to miss this capability when system has it */
244 #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
247 * CPU errata workarounds that need to be enabled at boot time if one or
248 * more CPUs in the system requires it. When one of these capabilities
249 * has been enabled, it is safe to allow any CPU to boot that doesn't
250 * require the workaround. However, it is not safe if a "late" CPU
251 * requires a workaround and the system hasn't enabled it already.
253 #define ARM64_CPUCAP_LOCAL_CPU_ERRATUM \
254 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
256 * CPU feature detected at boot time based on system-wide value of a
257 * feature. It is safe for a late CPU to have this feature even though
258 * the system hasn't enabled it, although the feature will not be used
259 * by Linux in this case. If the system has enabled this feature already,
260 * then every late CPU must have it.
262 #define ARM64_CPUCAP_SYSTEM_FEATURE \
263 (ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
265 * CPU feature detected at boot time based on feature of one or more CPUs.
266 * All possible conflicts for a late CPU are ignored.
268 #define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \
269 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
270 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \
271 ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
274 * CPU feature detected at boot time, on one or more CPUs. A late CPU
275 * is not allowed to have the capability when the system doesn't have it.
276 * It is Ok for a late CPU to miss the feature.
278 #define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE \
279 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
280 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
283 * CPU feature used early in the boot based on the boot CPU. All secondary
284 * CPUs must match the state of the capability as detected by the boot CPU.
286 #define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU
288 struct arm64_cpu_capabilities {
292 bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
294 * Take the appropriate actions to enable this capability for this CPU.
295 * For each successfully booted CPU, this method is called for each
296 * globally detected capability.
298 void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
300 struct { /* To be used for erratum handling only */
301 struct midr_range midr_range;
302 const struct arm64_midr_revidr {
303 u32 midr_rv; /* revision/variant */
305 } * const fixed_revs;
308 const struct midr_range *midr_range_list;
309 struct { /* Feature register checking */
320 * An optional list of "matches/cpu_enable" pair for the same
321 * "capability" of the same "type" as described by the parent.
322 * Only matches(), cpu_enable() and fields relevant to these
323 * methods are significant in the list. The cpu_enable is
324 * invoked only if the corresponding entry "matches()".
325 * However, if a cpu_enable() method is associated
326 * with multiple matches(), care should be taken that either
327 * the match criteria are mutually exclusive, or that the
328 * method is robust against being called multiple times.
330 const struct arm64_cpu_capabilities *match_list;
333 static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
335 return cap->type & ARM64_CPUCAP_SCOPE_MASK;
339 cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
341 return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
345 cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
347 return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
351 * Generic helper for handling capabilties with multiple (match,enable) pairs
352 * of call backs, sharing the same capability bit.
353 * Iterate over each entry to see if at least one matches.
356 cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
359 const struct arm64_cpu_capabilities *caps;
361 for (caps = entry->match_list; caps->matches; caps++)
362 if (caps->matches(caps, scope))
369 * Take appropriate action for all matching entries in the shared capability
373 cpucap_multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
375 const struct arm64_cpu_capabilities *caps;
377 for (caps = entry->match_list; caps->matches; caps++)
378 if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
380 caps->cpu_enable(caps);
383 extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
384 extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
385 extern struct static_key_false arm64_const_caps_ready;
387 /* ARM64 CAPS + alternative_cb */
388 #define ARM64_NPATCHABLE (ARM64_NCAPS + 1)
389 extern DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
391 #define for_each_available_cap(cap) \
392 for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS)
394 bool this_cpu_has_cap(unsigned int cap);
395 void cpu_set_feature(unsigned int num);
396 bool cpu_have_feature(unsigned int num);
397 unsigned long cpu_get_elf_hwcap(void);
398 unsigned long cpu_get_elf_hwcap2(void);
400 #define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name))
401 #define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name))
403 /* System capability check for constant caps */
404 static __always_inline bool __cpus_have_const_cap(int num)
406 if (num >= ARM64_NCAPS)
408 return static_branch_unlikely(&cpu_hwcap_keys[num]);
411 static inline bool cpus_have_cap(unsigned int num)
413 if (num >= ARM64_NCAPS)
415 return test_bit(num, cpu_hwcaps);
418 static __always_inline bool cpus_have_const_cap(int num)
420 if (static_branch_likely(&arm64_const_caps_ready))
421 return __cpus_have_const_cap(num);
423 return cpus_have_cap(num);
426 static inline void cpus_set_cap(unsigned int num)
428 if (num >= ARM64_NCAPS) {
429 pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n",
432 __set_bit(num, cpu_hwcaps);
436 static inline int __attribute_const__
437 cpuid_feature_extract_signed_field_width(u64 features, int field, int width)
439 return (s64)(features << (64 - width - field)) >> (64 - width);
442 static inline int __attribute_const__
443 cpuid_feature_extract_signed_field(u64 features, int field)
445 return cpuid_feature_extract_signed_field_width(features, field, 4);
448 static inline unsigned int __attribute_const__
449 cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
451 return (u64)(features << (64 - width - field)) >> (64 - width);
454 static inline unsigned int __attribute_const__
455 cpuid_feature_extract_unsigned_field(u64 features, int field)
457 return cpuid_feature_extract_unsigned_field_width(features, field, 4);
460 static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
462 return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
465 static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg)
467 return (reg->user_val | (reg->sys_val & reg->user_mask));
470 static inline int __attribute_const__
471 cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign)
474 cpuid_feature_extract_signed_field_width(features, field, width) :
475 cpuid_feature_extract_unsigned_field_width(features, field, width);
478 static inline int __attribute_const__
479 cpuid_feature_extract_field(u64 features, int field, bool sign)
481 return cpuid_feature_extract_field_width(features, field, 4, sign);
484 static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
486 return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign);
489 static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
491 return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 ||
492 cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1;
495 static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
497 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
499 return val == ID_AA64PFR0_EL0_32BIT_64BIT;
502 static inline bool id_aa64pfr0_sve(u64 pfr0)
504 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT);
509 void __init setup_cpu_features(void);
510 void check_local_cpu_capabilities(void);
512 u64 read_sanitised_ftr_reg(u32 id);
514 static inline bool cpu_supports_mixed_endian_el0(void)
516 return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
519 static inline bool system_supports_32bit_el0(void)
521 return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
524 static inline bool system_supports_4kb_granule(void)
529 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
530 val = cpuid_feature_extract_unsigned_field(mmfr0,
531 ID_AA64MMFR0_TGRAN4_SHIFT);
533 return val == ID_AA64MMFR0_TGRAN4_SUPPORTED;
536 static inline bool system_supports_64kb_granule(void)
541 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
542 val = cpuid_feature_extract_unsigned_field(mmfr0,
543 ID_AA64MMFR0_TGRAN64_SHIFT);
545 return val == ID_AA64MMFR0_TGRAN64_SUPPORTED;
548 static inline bool system_supports_16kb_granule(void)
553 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
554 val = cpuid_feature_extract_unsigned_field(mmfr0,
555 ID_AA64MMFR0_TGRAN16_SHIFT);
557 return val == ID_AA64MMFR0_TGRAN16_SUPPORTED;
560 static inline bool system_supports_mixed_endian_el0(void)
562 return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1));
565 static inline bool system_supports_mixed_endian(void)
570 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
571 val = cpuid_feature_extract_unsigned_field(mmfr0,
572 ID_AA64MMFR0_BIGENDEL_SHIFT);
577 static inline bool system_supports_fpsimd(void)
579 return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
582 static inline bool system_uses_ttbr0_pan(void)
584 return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
585 !cpus_have_const_cap(ARM64_HAS_PAN);
588 static inline bool system_supports_sve(void)
590 return IS_ENABLED(CONFIG_ARM64_SVE) &&
591 cpus_have_const_cap(ARM64_SVE);
594 static inline bool system_supports_cnp(void)
596 return IS_ENABLED(CONFIG_ARM64_CNP) &&
597 cpus_have_const_cap(ARM64_HAS_CNP);
600 static inline bool system_supports_address_auth(void)
602 return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
603 (cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
604 cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF));
607 static inline bool system_supports_generic_auth(void)
609 return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
610 (cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_ARCH) ||
611 cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF));
614 static inline bool system_uses_irq_prio_masking(void)
616 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
617 cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
620 #define ARM64_SSBD_UNKNOWN -1
621 #define ARM64_SSBD_FORCE_DISABLE 0
622 #define ARM64_SSBD_KERNEL 1
623 #define ARM64_SSBD_FORCE_ENABLE 2
624 #define ARM64_SSBD_MITIGATED 3
626 static inline int arm64_get_ssbd_state(void)
628 #ifdef CONFIG_ARM64_SSBD
629 extern int ssbd_state;
632 return ARM64_SSBD_UNKNOWN;
636 void arm64_set_ssbd_mitigation(bool state);
638 extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
640 static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
651 * A future PE could use a value unknown to the kernel.
652 * However, by the "D10.1.4 Principles of the ID scheme
653 * for fields in ID registers", ARM DDI 0487C.a, any new
654 * value is guaranteed to be higher than what we know already.
655 * As a safe limit, we return the limit supported by the kernel.
657 default: return CONFIG_ARM64_PA_BITS;
660 #endif /* __ASSEMBLY__ */