1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
6 #ifndef __ASM_CPUFEATURE_H
7 #define __ASM_CPUFEATURE_H
9 #include <asm/alternative-macros.h>
10 #include <asm/cpucaps.h>
11 #include <asm/cputype.h>
12 #include <asm/hwcap.h>
13 #include <asm/sysreg.h>
15 #define MAX_CPU_FEATURES 128
16 #define cpu_feature(x) KERNEL_HWCAP_ ## x
18 #define ARM64_SW_FEATURE_OVERRIDE_NOKASLR 0
19 #define ARM64_SW_FEATURE_OVERRIDE_HVHE 4
20 #define ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF 8
24 #include <linux/bug.h>
25 #include <linux/jump_label.h>
26 #include <linux/kernel.h>
27 #include <linux/cpumask.h>
30 * CPU feature register tracking
32 * The safe value of a CPUID feature field is dependent on the implications
33 * of the values assigned to it by the architecture. Based on the relationship
34 * between the values, the features are classified into 3 types - LOWER_SAFE,
35 * HIGHER_SAFE and EXACT.
37 * The lowest value of all the CPUs is chosen for LOWER_SAFE and highest
38 * for HIGHER_SAFE. It is expected that all CPUs have the same value for
39 * a field when EXACT is specified, failing which, the safe value specified
40 * in the table is chosen.
44 FTR_EXACT, /* Use a predefined safe value */
45 FTR_LOWER_SAFE, /* Smaller value is safe */
46 FTR_HIGHER_SAFE, /* Bigger value is safe */
47 FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
50 #define FTR_STRICT true /* SANITY check strict matching required */
51 #define FTR_NONSTRICT false /* SANITY check ignored */
53 #define FTR_SIGNED true /* Value should be treated as signed */
54 #define FTR_UNSIGNED false /* Value should be treated as unsigned */
56 #define FTR_VISIBLE true /* Feature visible to the user space */
57 #define FTR_HIDDEN false /* Feature is hidden from the user */
59 #define FTR_VISIBLE_IF_IS_ENABLED(config) \
60 (IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN)
62 struct arm64_ftr_bits {
63 bool sign; /* Value is signed ? */
65 bool strict; /* CPU Sanity check: strict matching required ? */
69 s64 safe_val; /* safe value for FTR_EXACT features */
73 * Describe the early feature override to the core override code:
75 * @val Values that are to be merged into the final
76 * sanitised value of the register. Only the bitfields
77 * set to 1 in @mask are valid
78 * @mask Mask of the features that are overridden by @val
80 * A @mask field set to full-1 indicates that the corresponding field
81 * in @val is a valid override.
83 * A @mask field set to full-0 with the corresponding @val field set
84 * to full-0 denotes that this field has no override
86 * A @mask field set to full-0 with the corresponding @val field set
87 * to full-1 denotes that this field has an invalid override.
89 struct arm64_ftr_override {
95 * @arm64_ftr_reg - Feature register
96 * @strict_mask Bits which should match across all CPUs for sanity.
97 * @sys_val Safe value across the CPUs (system view)
99 struct arm64_ftr_reg {
105 struct arm64_ftr_override *override;
106 const struct arm64_ftr_bits *ftr_bits;
109 extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
114 * We use arm64_cpu_capabilities to represent system features, errata work
115 * arounds (both used internally by kernel and tracked in system_cpucaps) and
116 * ELF HWCAPs (which are exposed to user).
118 * To support systems with heterogeneous CPUs, we need to make sure that we
119 * detect the capabilities correctly on the system and take appropriate
120 * measures to ensure there are no incompatibilities.
122 * This comment tries to explain how we treat the capabilities.
123 * Each capability has the following list of attributes :
125 * 1) Scope of Detection : The system detects a given capability by
126 * performing some checks at runtime. This could be, e.g, checking the
127 * value of a field in CPU ID feature register or checking the cpu
128 * model. The capability provides a call back ( @matches() ) to
129 * perform the check. Scope defines how the checks should be performed.
130 * There are three cases:
132 * a) SCOPE_LOCAL_CPU: check all the CPUs and "detect" if at least one
133 * matches. This implies, we have to run the check on all the
134 * booting CPUs, until the system decides that state of the
135 * capability is finalised. (See section 2 below)
137 * b) SCOPE_SYSTEM: check all the CPUs and "detect" if all the CPUs
138 * matches. This implies, we run the check only once, when the
139 * system decides to finalise the state of the capability. If the
140 * capability relies on a field in one of the CPU ID feature
141 * registers, we use the sanitised value of the register from the
142 * CPU feature infrastructure to make the decision.
144 * c) SCOPE_BOOT_CPU: Check only on the primary boot CPU to detect the
145 * feature. This category is for features that are "finalised"
146 * (or used) by the kernel very early even before the SMP cpus
149 * The process of detection is usually denoted by "update" capability
152 * 2) Finalise the state : The kernel should finalise the state of a
153 * capability at some point during its execution and take necessary
154 * actions if any. Usually, this is done, after all the boot-time
155 * enabled CPUs are brought up by the kernel, so that it can make
156 * better decision based on the available set of CPUs. However, there
157 * are some special cases, where the action is taken during the early
158 * boot by the primary boot CPU. (e.g, running the kernel at EL2 with
159 * Virtualisation Host Extensions). The kernel usually disallows any
160 * changes to the state of a capability once it finalises the capability
161 * and takes any action, as it may be impossible to execute the actions
162 * safely. A CPU brought up after a capability is "finalised" is
163 * referred to as "Late CPU" w.r.t the capability. e.g, all secondary
164 * CPUs are treated "late CPUs" for capabilities determined by the boot
167 * At the moment there are two passes of finalising the capabilities.
168 * a) Boot CPU scope capabilities - Finalised by primary boot CPU via
169 * setup_boot_cpu_capabilities().
170 * b) Everything except (a) - Run via setup_system_capabilities().
172 * 3) Verification: When a CPU is brought online (e.g, by user or by the
173 * kernel), the kernel should make sure that it is safe to use the CPU,
174 * by verifying that the CPU is compliant with the state of the
175 * capabilities finalised already. This happens via :
177 * secondary_start_kernel()-> check_local_cpu_capabilities()
179 * As explained in (2) above, capabilities could be finalised at
180 * different points in the execution. Each newly booted CPU is verified
181 * against the capabilities that have been finalised by the time it
184 * a) SCOPE_BOOT_CPU : All CPUs are verified against the capability
185 * except for the primary boot CPU.
187 * b) SCOPE_LOCAL_CPU, SCOPE_SYSTEM: All CPUs hotplugged on by the
188 * user after the kernel boot are verified against the capability.
190 * If there is a conflict, the kernel takes an action, based on the
191 * severity (e.g, a CPU could be prevented from booting or cause a
192 * kernel panic). The CPU is allowed to "affect" the state of the
193 * capability, if it has not been finalised already. See section 5
194 * for more details on conflicts.
196 * 4) Action: As mentioned in (2), the kernel can take an action for each
197 * detected capability, on all CPUs on the system. Appropriate actions
198 * include, turning on an architectural feature, modifying the control
199 * registers (e.g, SCTLR, TCR etc.) or patching the kernel via
200 * alternatives. The kernel patching is batched and performed at later
201 * point. The actions are always initiated only after the capability
202 * is finalised. This is usally denoted by "enabling" the capability.
203 * The actions are initiated as follows :
204 * a) Action is triggered on all online CPUs, after the capability is
205 * finalised, invoked within the stop_machine() context from
206 * enable_cpu_capabilitie().
208 * b) Any late CPU, brought up after (1), the action is triggered via:
210 * check_local_cpu_capabilities() -> verify_local_cpu_capabilities()
212 * 5) Conflicts: Based on the state of the capability on a late CPU vs.
213 * the system state, we could have the following combinations :
215 * x-----------------------------x
216 * | Type | System | Late CPU |
217 * |-----------------------------|
219 * |-----------------------------|
221 * x-----------------------------x
223 * Two separate flag bits are defined to indicate whether each kind of
224 * conflict can be allowed:
225 * ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU - Case(a) is allowed
226 * ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU - Case(b) is allowed
228 * Case (a) is not permitted for a capability that the system requires
229 * all CPUs to have in order for the capability to be enabled. This is
230 * typical for capabilities that represent enhanced functionality.
232 * Case (b) is not permitted for a capability that must be enabled
233 * during boot if any CPU in the system requires it in order to run
234 * safely. This is typical for erratum work arounds that cannot be
235 * enabled after the corresponding capability is finalised.
237 * In some non-typical cases either both (a) and (b), or neither,
238 * should be permitted. This can be described by including neither
239 * or both flags in the capability's type field.
241 * In case of a conflict, the CPU is prevented from booting. If the
242 * ARM64_CPUCAP_PANIC_ON_CONFLICT flag is specified for the capability,
243 * then a kernel panic is triggered.
248 * Decide how the capability is detected.
249 * On any local CPU vs System wide vs the primary boot CPU
251 #define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0))
252 #define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1))
254 * The capabilitiy is detected on the Boot CPU and is used by kernel
255 * during early boot. i.e, the capability should be "detected" and
256 * "enabled" as early as possibly on all booting CPUs.
258 #define ARM64_CPUCAP_SCOPE_BOOT_CPU ((u16)BIT(2))
259 #define ARM64_CPUCAP_SCOPE_MASK \
260 (ARM64_CPUCAP_SCOPE_SYSTEM | \
261 ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
262 ARM64_CPUCAP_SCOPE_BOOT_CPU)
264 #define SCOPE_SYSTEM ARM64_CPUCAP_SCOPE_SYSTEM
265 #define SCOPE_LOCAL_CPU ARM64_CPUCAP_SCOPE_LOCAL_CPU
266 #define SCOPE_BOOT_CPU ARM64_CPUCAP_SCOPE_BOOT_CPU
267 #define SCOPE_ALL ARM64_CPUCAP_SCOPE_MASK
270 * Is it permitted for a late CPU to have this capability when system
271 * hasn't already enabled it ?
273 #define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4))
274 /* Is it safe for a late CPU to miss this capability when system has it */
275 #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
276 /* Panic when a conflict is detected */
277 #define ARM64_CPUCAP_PANIC_ON_CONFLICT ((u16)BIT(6))
280 * CPU errata workarounds that need to be enabled at boot time if one or
281 * more CPUs in the system requires it. When one of these capabilities
282 * has been enabled, it is safe to allow any CPU to boot that doesn't
283 * require the workaround. However, it is not safe if a "late" CPU
284 * requires a workaround and the system hasn't enabled it already.
286 #define ARM64_CPUCAP_LOCAL_CPU_ERRATUM \
287 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
289 * CPU feature detected at boot time based on system-wide value of a
290 * feature. It is safe for a late CPU to have this feature even though
291 * the system hasn't enabled it, although the feature will not be used
292 * by Linux in this case. If the system has enabled this feature already,
293 * then every late CPU must have it.
295 #define ARM64_CPUCAP_SYSTEM_FEATURE \
296 (ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
298 * CPU feature detected at boot time based on feature of one or more CPUs.
299 * All possible conflicts for a late CPU are ignored.
300 * NOTE: this means that a late CPU with the feature will *not* cause the
301 * capability to be advertised by cpus_have_*cap()!
303 #define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \
304 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
305 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \
306 ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
309 * CPU feature detected at boot time, on one or more CPUs. A late CPU
310 * is not allowed to have the capability when the system doesn't have it.
311 * It is Ok for a late CPU to miss the feature.
313 #define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE \
314 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
315 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
318 * CPU feature used early in the boot based on the boot CPU. All secondary
319 * CPUs must match the state of the capability as detected by the boot CPU. In
320 * case of a conflict, a kernel panic is triggered.
322 #define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE \
323 (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT)
326 * CPU feature used early in the boot based on the boot CPU. It is safe for a
327 * late CPU to have this feature even though the boot CPU hasn't enabled it,
328 * although the feature will not be used by Linux in this case. If the boot CPU
329 * has enabled this feature already, then every late CPU must have it.
331 #define ARM64_CPUCAP_BOOT_CPU_FEATURE \
332 (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
334 struct arm64_cpu_capabilities {
338 bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
340 * Take the appropriate actions to configure this capability
341 * for this CPU. If the capability is detected by the kernel
342 * this will be called on all the CPUs in the system,
343 * including the hotplugged CPUs, regardless of whether the
344 * capability is available on that specific CPU. This is
345 * useful for some capabilities (e.g, working around CPU
346 * errata), where all the CPUs must take some action (e.g,
347 * changing system control/configuration). Thus, if an action
348 * is required only if the CPU has the capability, then the
349 * routine must check it before taking any action.
351 void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
353 struct { /* To be used for erratum handling only */
354 struct midr_range midr_range;
355 const struct arm64_midr_revidr {
356 u32 midr_rv; /* revision/variant */
358 } * const fixed_revs;
361 const struct midr_range *midr_range_list;
362 struct { /* Feature register checking */
375 * An optional list of "matches/cpu_enable" pair for the same
376 * "capability" of the same "type" as described by the parent.
377 * Only matches(), cpu_enable() and fields relevant to these
378 * methods are significant in the list. The cpu_enable is
379 * invoked only if the corresponding entry "matches()".
380 * However, if a cpu_enable() method is associated
381 * with multiple matches(), care should be taken that either
382 * the match criteria are mutually exclusive, or that the
383 * method is robust against being called multiple times.
385 const struct arm64_cpu_capabilities *match_list;
386 const struct cpumask *cpus;
389 static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
391 return cap->type & ARM64_CPUCAP_SCOPE_MASK;
395 * Generic helper for handling capabilities with multiple (match,enable) pairs
396 * of call backs, sharing the same capability bit.
397 * Iterate over each entry to see if at least one matches.
400 cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
403 const struct arm64_cpu_capabilities *caps;
405 for (caps = entry->match_list; caps->matches; caps++)
406 if (caps->matches(caps, scope))
412 static __always_inline bool is_vhe_hyp_code(void)
414 /* Only defined for code run in VHE hyp context */
415 return __is_defined(__KVM_VHE_HYPERVISOR__);
418 static __always_inline bool is_nvhe_hyp_code(void)
420 /* Only defined for code run in NVHE hyp context */
421 return __is_defined(__KVM_NVHE_HYPERVISOR__);
424 static __always_inline bool is_hyp_code(void)
426 return is_vhe_hyp_code() || is_nvhe_hyp_code();
429 extern DECLARE_BITMAP(system_cpucaps, ARM64_NCAPS);
431 extern DECLARE_BITMAP(boot_cpucaps, ARM64_NCAPS);
433 #define for_each_available_cap(cap) \
434 for_each_set_bit(cap, system_cpucaps, ARM64_NCAPS)
436 bool this_cpu_has_cap(unsigned int cap);
437 void cpu_set_feature(unsigned int num);
438 bool cpu_have_feature(unsigned int num);
439 unsigned long cpu_get_elf_hwcap(void);
440 unsigned long cpu_get_elf_hwcap2(void);
442 #define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name))
443 #define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name))
445 static __always_inline bool boot_capabilities_finalized(void)
447 return alternative_has_cap_likely(ARM64_ALWAYS_BOOT);
450 static __always_inline bool system_capabilities_finalized(void)
452 return alternative_has_cap_likely(ARM64_ALWAYS_SYSTEM);
456 * Test for a capability with a runtime check.
458 * Before the capability is detected, this returns false.
460 static __always_inline bool cpus_have_cap(unsigned int num)
462 if (__builtin_constant_p(num) && !cpucap_is_possible(num))
464 if (num >= ARM64_NCAPS)
466 return arch_test_bit(num, system_cpucaps);
470 * Test for a capability without a runtime check.
472 * Before boot capabilities are finalized, this will BUG().
473 * After boot capabilities are finalized, this is patched to avoid a runtime
476 * @num must be a compile-time constant.
478 static __always_inline bool cpus_have_final_boot_cap(int num)
480 if (boot_capabilities_finalized())
481 return alternative_has_cap_unlikely(num);
487 * Test for a capability without a runtime check.
489 * Before system capabilities are finalized, this will BUG().
490 * After system capabilities are finalized, this is patched to avoid a runtime
493 * @num must be a compile-time constant.
495 static __always_inline bool cpus_have_final_cap(int num)
497 if (system_capabilities_finalized())
498 return alternative_has_cap_unlikely(num);
503 static inline int __attribute_const__
504 cpuid_feature_extract_signed_field_width(u64 features, int field, int width)
506 return (s64)(features << (64 - width - field)) >> (64 - width);
509 static inline int __attribute_const__
510 cpuid_feature_extract_signed_field(u64 features, int field)
512 return cpuid_feature_extract_signed_field_width(features, field, 4);
515 static __always_inline unsigned int __attribute_const__
516 cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
518 return (u64)(features << (64 - width - field)) >> (64 - width);
521 static __always_inline unsigned int __attribute_const__
522 cpuid_feature_extract_unsigned_field(u64 features, int field)
524 return cpuid_feature_extract_unsigned_field_width(features, field, 4);
528 * Fields that identify the version of the Performance Monitors Extension do
529 * not follow the standard ID scheme. See ARM DDI 0487E.a page D13-2825,
530 * "Alternative ID scheme used for the Performance Monitors Extension version".
532 static inline u64 __attribute_const__
533 cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
535 u64 val = cpuid_feature_extract_unsigned_field(features, field);
536 u64 mask = GENMASK_ULL(field + 3, field);
538 /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
539 if (val == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
544 features |= (cap << field) & mask;
550 static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
552 return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
555 static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg)
557 return (reg->user_val | (reg->sys_val & reg->user_mask));
560 static inline int __attribute_const__
561 cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign)
563 if (WARN_ON_ONCE(!width))
566 cpuid_feature_extract_signed_field_width(features, field, width) :
567 cpuid_feature_extract_unsigned_field_width(features, field, width);
570 static inline int __attribute_const__
571 cpuid_feature_extract_field(u64 features, int field, bool sign)
573 return cpuid_feature_extract_field_width(features, field, 4, sign);
576 static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
578 return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign);
581 static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
583 return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGEND_SHIFT) == 0x1 ||
584 cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT) == 0x1;
587 static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
589 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL1_SHIFT);
591 return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
594 static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
596 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL0_SHIFT);
598 return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
601 static inline bool id_aa64pfr0_sve(u64 pfr0)
603 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SVE_SHIFT);
608 static inline bool id_aa64pfr1_sme(u64 pfr1)
610 u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_SME_SHIFT);
615 static inline bool id_aa64pfr1_mte(u64 pfr1)
617 u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT);
619 return val >= ID_AA64PFR1_EL1_MTE_MTE2;
622 void __init setup_boot_cpu_features(void);
623 void __init setup_system_features(void);
624 void __init setup_user_features(void);
626 void check_local_cpu_capabilities(void);
628 u64 read_sanitised_ftr_reg(u32 id);
629 u64 __read_sysreg_by_encoding(u32 sys_id);
631 static inline bool cpu_supports_mixed_endian_el0(void)
633 return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
637 static inline bool supports_csv2p3(int scope)
642 if (scope == SCOPE_LOCAL_CPU)
643 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
645 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
647 csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
648 ID_AA64PFR0_EL1_CSV2_SHIFT);
649 return csv2_val == 3;
652 static inline bool supports_clearbhb(int scope)
656 if (scope == SCOPE_LOCAL_CPU)
657 isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
659 isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
661 return cpuid_feature_extract_unsigned_field(isar2,
662 ID_AA64ISAR2_EL1_CLRBHB_SHIFT);
665 const struct cpumask *system_32bit_el0_cpumask(void);
666 DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
668 static inline bool system_supports_32bit_el0(void)
670 u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
672 return static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
673 id_aa64pfr0_32bit_el0(pfr0);
676 static inline bool system_supports_4kb_granule(void)
681 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
682 val = cpuid_feature_extract_unsigned_field(mmfr0,
683 ID_AA64MMFR0_EL1_TGRAN4_SHIFT);
685 return (val >= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN) &&
686 (val <= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX);
689 static inline bool system_supports_64kb_granule(void)
694 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
695 val = cpuid_feature_extract_unsigned_field(mmfr0,
696 ID_AA64MMFR0_EL1_TGRAN64_SHIFT);
698 return (val >= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN) &&
699 (val <= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX);
702 static inline bool system_supports_16kb_granule(void)
707 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
708 val = cpuid_feature_extract_unsigned_field(mmfr0,
709 ID_AA64MMFR0_EL1_TGRAN16_SHIFT);
711 return (val >= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN) &&
712 (val <= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX);
715 static inline bool system_supports_mixed_endian_el0(void)
717 return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1));
720 static inline bool system_supports_mixed_endian(void)
725 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
726 val = cpuid_feature_extract_unsigned_field(mmfr0,
727 ID_AA64MMFR0_EL1_BIGEND_SHIFT);
732 static __always_inline bool system_supports_fpsimd(void)
734 return alternative_has_cap_likely(ARM64_HAS_FPSIMD);
737 static inline bool system_uses_hw_pan(void)
739 return alternative_has_cap_unlikely(ARM64_HAS_PAN);
742 static inline bool system_uses_ttbr0_pan(void)
744 return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
745 !system_uses_hw_pan();
748 static __always_inline bool system_supports_sve(void)
750 return alternative_has_cap_unlikely(ARM64_SVE);
753 static __always_inline bool system_supports_sme(void)
755 return alternative_has_cap_unlikely(ARM64_SME);
758 static __always_inline bool system_supports_sme2(void)
760 return alternative_has_cap_unlikely(ARM64_SME2);
763 static __always_inline bool system_supports_fa64(void)
765 return alternative_has_cap_unlikely(ARM64_SME_FA64);
768 static __always_inline bool system_supports_tpidr2(void)
770 return system_supports_sme();
773 static __always_inline bool system_supports_fpmr(void)
775 return alternative_has_cap_unlikely(ARM64_HAS_FPMR);
778 static __always_inline bool system_supports_cnp(void)
780 return alternative_has_cap_unlikely(ARM64_HAS_CNP);
783 static inline bool system_supports_address_auth(void)
785 return cpus_have_final_boot_cap(ARM64_HAS_ADDRESS_AUTH);
788 static inline bool system_supports_generic_auth(void)
790 return alternative_has_cap_unlikely(ARM64_HAS_GENERIC_AUTH);
793 static inline bool system_has_full_ptr_auth(void)
795 return system_supports_address_auth() && system_supports_generic_auth();
798 static __always_inline bool system_uses_irq_prio_masking(void)
800 return alternative_has_cap_unlikely(ARM64_HAS_GIC_PRIO_MASKING);
803 static inline bool system_supports_mte(void)
805 return alternative_has_cap_unlikely(ARM64_MTE);
808 static inline bool system_has_prio_mask_debugging(void)
810 return IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING) &&
811 system_uses_irq_prio_masking();
814 static inline bool system_supports_bti(void)
816 return cpus_have_final_cap(ARM64_BTI);
819 static inline bool system_supports_bti_kernel(void)
821 return IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) &&
822 cpus_have_final_boot_cap(ARM64_BTI);
825 static inline bool system_supports_tlb_range(void)
827 return alternative_has_cap_unlikely(ARM64_HAS_TLB_RANGE);
830 static inline bool system_supports_lpa2(void)
832 return cpus_have_final_cap(ARM64_HAS_LPA2);
835 int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
836 bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
838 static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
841 case ID_AA64MMFR0_EL1_PARANGE_32: return 32;
842 case ID_AA64MMFR0_EL1_PARANGE_36: return 36;
843 case ID_AA64MMFR0_EL1_PARANGE_40: return 40;
844 case ID_AA64MMFR0_EL1_PARANGE_42: return 42;
845 case ID_AA64MMFR0_EL1_PARANGE_44: return 44;
846 case ID_AA64MMFR0_EL1_PARANGE_48: return 48;
847 case ID_AA64MMFR0_EL1_PARANGE_52: return 52;
849 * A future PE could use a value unknown to the kernel.
850 * However, by the "D10.1.4 Principles of the ID scheme
851 * for fields in ID registers", ARM DDI 0487C.a, any new
852 * value is guaranteed to be higher than what we know already.
853 * As a safe limit, we return the limit supported by the kernel.
855 default: return CONFIG_ARM64_PA_BITS;
859 /* Check whether hardware update of the Access flag is supported */
860 static inline bool cpu_has_hw_af(void)
864 if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM))
868 * Use cached version to avoid emulated msr operation on KVM
871 mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
872 return cpuid_feature_extract_unsigned_field(mmfr1,
873 ID_AA64MMFR1_EL1_HAFDBS_SHIFT);
876 static inline bool cpu_has_pan(void)
878 u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
879 return cpuid_feature_extract_unsigned_field(mmfr1,
880 ID_AA64MMFR1_EL1_PAN_SHIFT);
883 #ifdef CONFIG_ARM64_AMU_EXTN
884 /* Check whether the cpu supports the Activity Monitors Unit (AMU) */
885 extern bool cpu_has_amu_feat(int cpu);
887 static inline bool cpu_has_amu_feat(int cpu)
893 /* Get a cpu that supports the Activity Monitors Unit (AMU) */
894 extern int get_cpu_with_amu_feat(void);
896 static inline unsigned int get_vmid_bits(u64 mmfr1)
900 vmid_bits = cpuid_feature_extract_unsigned_field(mmfr1,
901 ID_AA64MMFR1_EL1_VMIDBits_SHIFT);
902 if (vmid_bits == ID_AA64MMFR1_EL1_VMIDBits_16)
906 * Return the default here even if any reserved
907 * value is fetched from the system register.
912 s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, s64 cur);
913 struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id);
915 extern struct arm64_ftr_override id_aa64mmfr0_override;
916 extern struct arm64_ftr_override id_aa64mmfr1_override;
917 extern struct arm64_ftr_override id_aa64mmfr2_override;
918 extern struct arm64_ftr_override id_aa64pfr0_override;
919 extern struct arm64_ftr_override id_aa64pfr1_override;
920 extern struct arm64_ftr_override id_aa64zfr0_override;
921 extern struct arm64_ftr_override id_aa64smfr0_override;
922 extern struct arm64_ftr_override id_aa64isar1_override;
923 extern struct arm64_ftr_override id_aa64isar2_override;
925 extern struct arm64_ftr_override arm64_sw_feature_override;
928 u64 arm64_apply_feature_override(u64 val, int feat, int width,
929 const struct arm64_ftr_override *override)
931 u64 oval = override->val;
934 * When it encounters an invalid override (e.g., an override that
935 * cannot be honoured due to a missing CPU feature), the early idreg
936 * override code will set the mask to 0x0 and the value to non-zero for
937 * the field in question. In order to determine whether the override is
938 * valid or not for the field we are interested in, we first need to
939 * disregard bits belonging to other fields.
941 oval &= GENMASK_ULL(feat + width - 1, feat);
944 * The override is valid if all value bits are accounted for in the
945 * mask. If so, replace the masked bits with the override value.
947 if (oval == (oval & override->mask)) {
948 val &= ~override->mask;
952 /* Extract the field from the updated value */
953 return cpuid_feature_extract_unsigned_field(val, feat);
956 static inline bool arm64_test_sw_feature_override(int feat)
959 * Software features are pseudo CPU features that have no underlying
960 * CPUID system register value to apply the override to.
962 return arm64_apply_feature_override(0, feat, 4,
963 &arm64_sw_feature_override);
966 static inline bool kaslr_disabled_cmdline(void)
968 return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOKASLR);
971 u32 get_kvm_ipa_limit(void);
972 void dump_cpu_features(void);
974 static inline bool cpu_has_bti(void)
976 if (!IS_ENABLED(CONFIG_ARM64_BTI))
979 return arm64_apply_feature_override(read_cpuid(ID_AA64PFR1_EL1),
980 ID_AA64PFR1_EL1_BT_SHIFT, 4,
981 &id_aa64pfr1_override);
984 static inline bool cpu_has_pac(void)
988 if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH))
991 isar1 = read_cpuid(ID_AA64ISAR1_EL1);
992 isar2 = read_cpuid(ID_AA64ISAR2_EL1);
994 if (arm64_apply_feature_override(isar1, ID_AA64ISAR1_EL1_APA_SHIFT, 4,
995 &id_aa64isar1_override))
998 if (arm64_apply_feature_override(isar1, ID_AA64ISAR1_EL1_API_SHIFT, 4,
999 &id_aa64isar1_override))
1002 return arm64_apply_feature_override(isar2, ID_AA64ISAR2_EL1_APA3_SHIFT, 4,
1003 &id_aa64isar2_override);
1006 static inline bool cpu_has_lva(void)
1010 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
1011 mmfr2 &= ~id_aa64mmfr2_override.mask;
1012 mmfr2 |= id_aa64mmfr2_override.val;
1013 return cpuid_feature_extract_unsigned_field(mmfr2,
1014 ID_AA64MMFR2_EL1_VARange_SHIFT);
1017 static inline bool cpu_has_lpa2(void)
1019 #ifdef CONFIG_ARM64_LPA2
1023 mmfr0 = read_sysreg(id_aa64mmfr0_el1);
1024 mmfr0 &= ~id_aa64mmfr0_override.mask;
1025 mmfr0 |= id_aa64mmfr0_override.val;
1026 feat = cpuid_feature_extract_signed_field(mmfr0,
1027 ID_AA64MMFR0_EL1_TGRAN_SHIFT);
1029 return feat >= ID_AA64MMFR0_EL1_TGRAN_LPA2;
1035 #endif /* __ASSEMBLY__ */