1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
6 #ifndef __ASM_CPUFEATURE_H
7 #define __ASM_CPUFEATURE_H
9 #include <asm/cpucaps.h>
10 #include <asm/cputype.h>
11 #include <asm/hwcap.h>
12 #include <asm/sysreg.h>
14 #define MAX_CPU_FEATURES 64
15 #define cpu_feature(x) KERNEL_HWCAP_ ## x
19 #include <linux/bug.h>
20 #include <linux/jump_label.h>
21 #include <linux/kernel.h>
24 * CPU feature register tracking
26 * The safe value of a CPUID feature field is dependent on the implications
27 * of the values assigned to it by the architecture. Based on the relationship
28 * between the values, the features are classified into 3 types - LOWER_SAFE,
29 * HIGHER_SAFE and EXACT.
31 * The lowest value of all the CPUs is chosen for LOWER_SAFE and highest
32 * for HIGHER_SAFE. It is expected that all CPUs have the same value for
33 * a field when EXACT is specified, failing which, the safe value specified
34 * in the table is chosen.
38 FTR_EXACT, /* Use a predefined safe value */
39 FTR_LOWER_SAFE, /* Smaller value is safe */
40 FTR_HIGHER_SAFE, /* Bigger value is safe */
41 FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
44 #define FTR_STRICT true /* SANITY check strict matching required */
45 #define FTR_NONSTRICT false /* SANITY check ignored */
47 #define FTR_SIGNED true /* Value should be treated as signed */
48 #define FTR_UNSIGNED false /* Value should be treated as unsigned */
50 #define FTR_VISIBLE true /* Feature visible to the user space */
51 #define FTR_HIDDEN false /* Feature is hidden from the user */
53 #define FTR_VISIBLE_IF_IS_ENABLED(config) \
54 (IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN)
56 struct arm64_ftr_bits {
57 bool sign; /* Value is signed ? */
59 bool strict; /* CPU Sanity check: strict matching required ? */
63 s64 safe_val; /* safe value for FTR_EXACT features */
67 * Describe the early feature override to the core override code:
69 * @val Values that are to be merged into the final
70 * sanitised value of the register. Only the bitfields
71 * set to 1 in @mask are valid
72 * @mask Mask of the features that are overridden by @val
74 * A @mask field set to full-1 indicates that the corresponding field
75 * in @val is a valid override.
77 * A @mask field set to full-0 with the corresponding @val field set
78 * to full-0 denotes that this field has no override
80 * A @mask field set to full-0 with the corresponding @val field set
81 * to full-1 denotes thath this field has an invalid override.
83 struct arm64_ftr_override {
89 * @arm64_ftr_reg - Feature register
90 * @strict_mask Bits which should match across all CPUs for sanity.
91 * @sys_val Safe value across the CPUs (system view)
93 struct arm64_ftr_reg {
99 struct arm64_ftr_override *override;
100 const struct arm64_ftr_bits *ftr_bits;
103 extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
108 * We use arm64_cpu_capabilities to represent system features, errata work
109 * arounds (both used internally by kernel and tracked in cpu_hwcaps) and
110 * ELF HWCAPs (which are exposed to user).
112 * To support systems with heterogeneous CPUs, we need to make sure that we
113 * detect the capabilities correctly on the system and take appropriate
114 * measures to ensure there are no incompatibilities.
116 * This comment tries to explain how we treat the capabilities.
117 * Each capability has the following list of attributes :
119 * 1) Scope of Detection : The system detects a given capability by
120 * performing some checks at runtime. This could be, e.g, checking the
121 * value of a field in CPU ID feature register or checking the cpu
122 * model. The capability provides a call back ( @matches() ) to
123 * perform the check. Scope defines how the checks should be performed.
124 * There are three cases:
126 * a) SCOPE_LOCAL_CPU: check all the CPUs and "detect" if at least one
127 * matches. This implies, we have to run the check on all the
128 * booting CPUs, until the system decides that state of the
129 * capability is finalised. (See section 2 below)
131 * b) SCOPE_SYSTEM: check all the CPUs and "detect" if all the CPUs
132 * matches. This implies, we run the check only once, when the
133 * system decides to finalise the state of the capability. If the
134 * capability relies on a field in one of the CPU ID feature
135 * registers, we use the sanitised value of the register from the
136 * CPU feature infrastructure to make the decision.
138 * c) SCOPE_BOOT_CPU: Check only on the primary boot CPU to detect the
139 * feature. This category is for features that are "finalised"
140 * (or used) by the kernel very early even before the SMP cpus
143 * The process of detection is usually denoted by "update" capability
146 * 2) Finalise the state : The kernel should finalise the state of a
147 * capability at some point during its execution and take necessary
148 * actions if any. Usually, this is done, after all the boot-time
149 * enabled CPUs are brought up by the kernel, so that it can make
150 * better decision based on the available set of CPUs. However, there
151 * are some special cases, where the action is taken during the early
152 * boot by the primary boot CPU. (e.g, running the kernel at EL2 with
153 * Virtualisation Host Extensions). The kernel usually disallows any
154 * changes to the state of a capability once it finalises the capability
155 * and takes any action, as it may be impossible to execute the actions
156 * safely. A CPU brought up after a capability is "finalised" is
157 * referred to as "Late CPU" w.r.t the capability. e.g, all secondary
158 * CPUs are treated "late CPUs" for capabilities determined by the boot
161 * At the moment there are two passes of finalising the capabilities.
162 * a) Boot CPU scope capabilities - Finalised by primary boot CPU via
163 * setup_boot_cpu_capabilities().
164 * b) Everything except (a) - Run via setup_system_capabilities().
166 * 3) Verification: When a CPU is brought online (e.g, by user or by the
167 * kernel), the kernel should make sure that it is safe to use the CPU,
168 * by verifying that the CPU is compliant with the state of the
169 * capabilities finalised already. This happens via :
171 * secondary_start_kernel()-> check_local_cpu_capabilities()
173 * As explained in (2) above, capabilities could be finalised at
174 * different points in the execution. Each newly booted CPU is verified
175 * against the capabilities that have been finalised by the time it
178 * a) SCOPE_BOOT_CPU : All CPUs are verified against the capability
179 * except for the primary boot CPU.
181 * b) SCOPE_LOCAL_CPU, SCOPE_SYSTEM: All CPUs hotplugged on by the
182 * user after the kernel boot are verified against the capability.
184 * If there is a conflict, the kernel takes an action, based on the
185 * severity (e.g, a CPU could be prevented from booting or cause a
186 * kernel panic). The CPU is allowed to "affect" the state of the
187 * capability, if it has not been finalised already. See section 5
188 * for more details on conflicts.
190 * 4) Action: As mentioned in (2), the kernel can take an action for each
191 * detected capability, on all CPUs on the system. Appropriate actions
192 * include, turning on an architectural feature, modifying the control
193 * registers (e.g, SCTLR, TCR etc.) or patching the kernel via
194 * alternatives. The kernel patching is batched and performed at later
195 * point. The actions are always initiated only after the capability
196 * is finalised. This is usally denoted by "enabling" the capability.
197 * The actions are initiated as follows :
198 * a) Action is triggered on all online CPUs, after the capability is
199 * finalised, invoked within the stop_machine() context from
200 * enable_cpu_capabilitie().
202 * b) Any late CPU, brought up after (1), the action is triggered via:
204 * check_local_cpu_capabilities() -> verify_local_cpu_capabilities()
206 * 5) Conflicts: Based on the state of the capability on a late CPU vs.
207 * the system state, we could have the following combinations :
209 * x-----------------------------x
210 * | Type | System | Late CPU |
211 * |-----------------------------|
213 * |-----------------------------|
215 * x-----------------------------x
217 * Two separate flag bits are defined to indicate whether each kind of
218 * conflict can be allowed:
219 * ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU - Case(a) is allowed
220 * ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU - Case(b) is allowed
222 * Case (a) is not permitted for a capability that the system requires
223 * all CPUs to have in order for the capability to be enabled. This is
224 * typical for capabilities that represent enhanced functionality.
226 * Case (b) is not permitted for a capability that must be enabled
227 * during boot if any CPU in the system requires it in order to run
228 * safely. This is typical for erratum work arounds that cannot be
229 * enabled after the corresponding capability is finalised.
231 * In some non-typical cases either both (a) and (b), or neither,
232 * should be permitted. This can be described by including neither
233 * or both flags in the capability's type field.
235 * In case of a conflict, the CPU is prevented from booting. If the
236 * ARM64_CPUCAP_PANIC_ON_CONFLICT flag is specified for the capability,
237 * then a kernel panic is triggered.
242 * Decide how the capability is detected.
243 * On any local CPU vs System wide vs the primary boot CPU
245 #define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0))
246 #define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1))
248 * The capabilitiy is detected on the Boot CPU and is used by kernel
249 * during early boot. i.e, the capability should be "detected" and
250 * "enabled" as early as possibly on all booting CPUs.
252 #define ARM64_CPUCAP_SCOPE_BOOT_CPU ((u16)BIT(2))
253 #define ARM64_CPUCAP_SCOPE_MASK \
254 (ARM64_CPUCAP_SCOPE_SYSTEM | \
255 ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
256 ARM64_CPUCAP_SCOPE_BOOT_CPU)
258 #define SCOPE_SYSTEM ARM64_CPUCAP_SCOPE_SYSTEM
259 #define SCOPE_LOCAL_CPU ARM64_CPUCAP_SCOPE_LOCAL_CPU
260 #define SCOPE_BOOT_CPU ARM64_CPUCAP_SCOPE_BOOT_CPU
261 #define SCOPE_ALL ARM64_CPUCAP_SCOPE_MASK
264 * Is it permitted for a late CPU to have this capability when system
265 * hasn't already enabled it ?
267 #define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4))
268 /* Is it safe for a late CPU to miss this capability when system has it */
269 #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
270 /* Panic when a conflict is detected */
271 #define ARM64_CPUCAP_PANIC_ON_CONFLICT ((u16)BIT(6))
274 * CPU errata workarounds that need to be enabled at boot time if one or
275 * more CPUs in the system requires it. When one of these capabilities
276 * has been enabled, it is safe to allow any CPU to boot that doesn't
277 * require the workaround. However, it is not safe if a "late" CPU
278 * requires a workaround and the system hasn't enabled it already.
280 #define ARM64_CPUCAP_LOCAL_CPU_ERRATUM \
281 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
283 * CPU feature detected at boot time based on system-wide value of a
284 * feature. It is safe for a late CPU to have this feature even though
285 * the system hasn't enabled it, although the feature will not be used
286 * by Linux in this case. If the system has enabled this feature already,
287 * then every late CPU must have it.
289 #define ARM64_CPUCAP_SYSTEM_FEATURE \
290 (ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
292 * CPU feature detected at boot time based on feature of one or more CPUs.
293 * All possible conflicts for a late CPU are ignored.
294 * NOTE: this means that a late CPU with the feature will *not* cause the
295 * capability to be advertised by cpus_have_*cap()!
297 #define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \
298 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
299 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \
300 ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
303 * CPU feature detected at boot time, on one or more CPUs. A late CPU
304 * is not allowed to have the capability when the system doesn't have it.
305 * It is Ok for a late CPU to miss the feature.
307 #define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE \
308 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
309 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
312 * CPU feature used early in the boot based on the boot CPU. All secondary
313 * CPUs must match the state of the capability as detected by the boot CPU. In
314 * case of a conflict, a kernel panic is triggered.
316 #define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE \
317 (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT)
320 * CPU feature used early in the boot based on the boot CPU. It is safe for a
321 * late CPU to have this feature even though the boot CPU hasn't enabled it,
322 * although the feature will not be used by Linux in this case. If the boot CPU
323 * has enabled this feature already, then every late CPU must have it.
325 #define ARM64_CPUCAP_BOOT_CPU_FEATURE \
326 (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
328 struct arm64_cpu_capabilities {
332 bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
334 * Take the appropriate actions to configure this capability
335 * for this CPU. If the capability is detected by the kernel
336 * this will be called on all the CPUs in the system,
337 * including the hotplugged CPUs, regardless of whether the
338 * capability is available on that specific CPU. This is
339 * useful for some capabilities (e.g, working around CPU
340 * errata), where all the CPUs must take some action (e.g,
341 * changing system control/configuration). Thus, if an action
342 * is required only if the CPU has the capability, then the
343 * routine must check it before taking any action.
345 void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
347 struct { /* To be used for erratum handling only */
348 struct midr_range midr_range;
349 const struct arm64_midr_revidr {
350 u32 midr_rv; /* revision/variant */
352 } * const fixed_revs;
355 const struct midr_range *midr_range_list;
356 struct { /* Feature register checking */
367 * An optional list of "matches/cpu_enable" pair for the same
368 * "capability" of the same "type" as described by the parent.
369 * Only matches(), cpu_enable() and fields relevant to these
370 * methods are significant in the list. The cpu_enable is
371 * invoked only if the corresponding entry "matches()".
372 * However, if a cpu_enable() method is associated
373 * with multiple matches(), care should be taken that either
374 * the match criteria are mutually exclusive, or that the
375 * method is robust against being called multiple times.
377 const struct arm64_cpu_capabilities *match_list;
380 static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
382 return cap->type & ARM64_CPUCAP_SCOPE_MASK;
386 * Generic helper for handling capabilities with multiple (match,enable) pairs
387 * of call backs, sharing the same capability bit.
388 * Iterate over each entry to see if at least one matches.
391 cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
394 const struct arm64_cpu_capabilities *caps;
396 for (caps = entry->match_list; caps->matches; caps++)
397 if (caps->matches(caps, scope))
403 static __always_inline bool is_vhe_hyp_code(void)
405 /* Only defined for code run in VHE hyp context */
406 return __is_defined(__KVM_VHE_HYPERVISOR__);
409 static __always_inline bool is_nvhe_hyp_code(void)
411 /* Only defined for code run in NVHE hyp context */
412 return __is_defined(__KVM_NVHE_HYPERVISOR__);
415 static __always_inline bool is_hyp_code(void)
417 return is_vhe_hyp_code() || is_nvhe_hyp_code();
420 extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
421 extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
422 extern struct static_key_false arm64_const_caps_ready;
424 /* ARM64 CAPS + alternative_cb */
425 #define ARM64_NPATCHABLE (ARM64_NCAPS + 1)
426 extern DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
428 #define for_each_available_cap(cap) \
429 for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS)
431 bool this_cpu_has_cap(unsigned int cap);
432 void cpu_set_feature(unsigned int num);
433 bool cpu_have_feature(unsigned int num);
434 unsigned long cpu_get_elf_hwcap(void);
435 unsigned long cpu_get_elf_hwcap2(void);
437 #define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name))
438 #define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name))
440 static __always_inline bool system_capabilities_finalized(void)
442 return static_branch_likely(&arm64_const_caps_ready);
446 * Test for a capability with a runtime check.
448 * Before the capability is detected, this returns false.
450 static inline bool cpus_have_cap(unsigned int num)
452 if (num >= ARM64_NCAPS)
454 return test_bit(num, cpu_hwcaps);
458 * Test for a capability without a runtime check.
460 * Before capabilities are finalized, this returns false.
461 * After capabilities are finalized, this is patched to avoid a runtime check.
463 * @num must be a compile-time constant.
465 static __always_inline bool __cpus_have_const_cap(int num)
467 if (num >= ARM64_NCAPS)
469 return static_branch_unlikely(&cpu_hwcap_keys[num]);
473 * Test for a capability without a runtime check.
475 * Before capabilities are finalized, this will BUG().
476 * After capabilities are finalized, this is patched to avoid a runtime check.
478 * @num must be a compile-time constant.
480 static __always_inline bool cpus_have_final_cap(int num)
482 if (system_capabilities_finalized())
483 return __cpus_have_const_cap(num);
489 * Test for a capability, possibly with a runtime check for non-hyp code.
491 * For hyp code, this behaves the same as cpus_have_final_cap().
494 * Before capabilities are finalized, this behaves as cpus_have_cap().
495 * After capabilities are finalized, this is patched to avoid a runtime check.
497 * @num must be a compile-time constant.
499 static __always_inline bool cpus_have_const_cap(int num)
502 return cpus_have_final_cap(num);
503 else if (system_capabilities_finalized())
504 return __cpus_have_const_cap(num);
506 return cpus_have_cap(num);
509 static inline void cpus_set_cap(unsigned int num)
511 if (num >= ARM64_NCAPS) {
512 pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n",
515 __set_bit(num, cpu_hwcaps);
519 static inline int __attribute_const__
520 cpuid_feature_extract_signed_field_width(u64 features, int field, int width)
522 return (s64)(features << (64 - width - field)) >> (64 - width);
525 static inline int __attribute_const__
526 cpuid_feature_extract_signed_field(u64 features, int field)
528 return cpuid_feature_extract_signed_field_width(features, field, 4);
531 static __always_inline unsigned int __attribute_const__
532 cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
534 return (u64)(features << (64 - width - field)) >> (64 - width);
537 static __always_inline unsigned int __attribute_const__
538 cpuid_feature_extract_unsigned_field(u64 features, int field)
540 return cpuid_feature_extract_unsigned_field_width(features, field, 4);
544 * Fields that identify the version of the Performance Monitors Extension do
545 * not follow the standard ID scheme. See ARM DDI 0487E.a page D13-2825,
546 * "Alternative ID scheme used for the Performance Monitors Extension version".
548 static inline u64 __attribute_const__
549 cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
551 u64 val = cpuid_feature_extract_unsigned_field(features, field);
552 u64 mask = GENMASK_ULL(field + 3, field);
554 /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
560 features |= (cap << field) & mask;
566 static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
568 return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
571 static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg)
573 return (reg->user_val | (reg->sys_val & reg->user_mask));
576 static inline int __attribute_const__
577 cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign)
580 cpuid_feature_extract_signed_field_width(features, field, width) :
581 cpuid_feature_extract_unsigned_field_width(features, field, width);
584 static inline int __attribute_const__
585 cpuid_feature_extract_field(u64 features, int field, bool sign)
587 return cpuid_feature_extract_field_width(features, field, 4, sign);
590 static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
592 return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign);
595 static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
597 return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 ||
598 cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1;
601 static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
603 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SHIFT);
605 return val == ID_AA64PFR0_EL1_32BIT_64BIT;
608 static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
610 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
612 return val == ID_AA64PFR0_EL0_32BIT_64BIT;
615 static inline bool id_aa64pfr0_sve(u64 pfr0)
617 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT);
622 static inline bool id_aa64pfr1_mte(u64 pfr1)
624 u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_MTE_SHIFT);
626 return val >= ID_AA64PFR1_MTE;
629 void __init setup_cpu_features(void);
630 void check_local_cpu_capabilities(void);
632 u64 read_sanitised_ftr_reg(u32 id);
633 u64 __read_sysreg_by_encoding(u32 sys_id);
635 static inline bool cpu_supports_mixed_endian_el0(void)
637 return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
640 const struct cpumask *system_32bit_el0_cpumask(void);
641 DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
643 static inline bool system_supports_32bit_el0(void)
645 u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
647 return static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
648 id_aa64pfr0_32bit_el0(pfr0);
651 static inline bool system_supports_4kb_granule(void)
656 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
657 val = cpuid_feature_extract_unsigned_field(mmfr0,
658 ID_AA64MMFR0_TGRAN4_SHIFT);
660 return val == ID_AA64MMFR0_TGRAN4_SUPPORTED;
663 static inline bool system_supports_64kb_granule(void)
668 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
669 val = cpuid_feature_extract_unsigned_field(mmfr0,
670 ID_AA64MMFR0_TGRAN64_SHIFT);
672 return val == ID_AA64MMFR0_TGRAN64_SUPPORTED;
675 static inline bool system_supports_16kb_granule(void)
680 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
681 val = cpuid_feature_extract_unsigned_field(mmfr0,
682 ID_AA64MMFR0_TGRAN16_SHIFT);
684 return val == ID_AA64MMFR0_TGRAN16_SUPPORTED;
687 static inline bool system_supports_mixed_endian_el0(void)
689 return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1));
692 static inline bool system_supports_mixed_endian(void)
697 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
698 val = cpuid_feature_extract_unsigned_field(mmfr0,
699 ID_AA64MMFR0_BIGENDEL_SHIFT);
704 static __always_inline bool system_supports_fpsimd(void)
706 return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
709 static inline bool system_uses_hw_pan(void)
711 return IS_ENABLED(CONFIG_ARM64_PAN) &&
712 cpus_have_const_cap(ARM64_HAS_PAN);
715 static inline bool system_uses_ttbr0_pan(void)
717 return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
718 !system_uses_hw_pan();
721 static __always_inline bool system_supports_sve(void)
723 return IS_ENABLED(CONFIG_ARM64_SVE) &&
724 cpus_have_const_cap(ARM64_SVE);
727 static __always_inline bool system_supports_cnp(void)
729 return IS_ENABLED(CONFIG_ARM64_CNP) &&
730 cpus_have_const_cap(ARM64_HAS_CNP);
733 static inline bool system_supports_address_auth(void)
735 return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
736 cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH);
739 static inline bool system_supports_generic_auth(void)
741 return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
742 cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
745 static inline bool system_has_full_ptr_auth(void)
747 return system_supports_address_auth() && system_supports_generic_auth();
750 static __always_inline bool system_uses_irq_prio_masking(void)
752 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
753 cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
756 static inline bool system_supports_mte(void)
758 return IS_ENABLED(CONFIG_ARM64_MTE) &&
759 cpus_have_const_cap(ARM64_MTE);
762 static inline bool system_has_prio_mask_debugging(void)
764 return IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING) &&
765 system_uses_irq_prio_masking();
768 static inline bool system_supports_bti(void)
770 return IS_ENABLED(CONFIG_ARM64_BTI) && cpus_have_const_cap(ARM64_BTI);
773 static inline bool system_supports_tlb_range(void)
775 return IS_ENABLED(CONFIG_ARM64_TLB_RANGE) &&
776 cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
779 extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
781 static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
792 * A future PE could use a value unknown to the kernel.
793 * However, by the "D10.1.4 Principles of the ID scheme
794 * for fields in ID registers", ARM DDI 0487C.a, any new
795 * value is guaranteed to be higher than what we know already.
796 * As a safe limit, we return the limit supported by the kernel.
798 default: return CONFIG_ARM64_PA_BITS;
802 /* Check whether hardware update of the Access flag is supported */
803 static inline bool cpu_has_hw_af(void)
807 if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM))
810 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
811 return cpuid_feature_extract_unsigned_field(mmfr1,
812 ID_AA64MMFR1_HADBS_SHIFT);
815 static inline bool cpu_has_pan(void)
817 u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
818 return cpuid_feature_extract_unsigned_field(mmfr1,
819 ID_AA64MMFR1_PAN_SHIFT);
822 #ifdef CONFIG_ARM64_AMU_EXTN
823 /* Check whether the cpu supports the Activity Monitors Unit (AMU) */
824 extern bool cpu_has_amu_feat(int cpu);
826 static inline bool cpu_has_amu_feat(int cpu)
832 /* Get a cpu that supports the Activity Monitors Unit (AMU) */
833 extern int get_cpu_with_amu_feat(void);
835 static inline unsigned int get_vmid_bits(u64 mmfr1)
839 vmid_bits = cpuid_feature_extract_unsigned_field(mmfr1,
840 ID_AA64MMFR1_VMIDBITS_SHIFT);
841 if (vmid_bits == ID_AA64MMFR1_VMIDBITS_16)
845 * Return the default here even if any reserved
846 * value is fetched from the system register.
851 extern struct arm64_ftr_override id_aa64mmfr1_override;
852 extern struct arm64_ftr_override id_aa64pfr1_override;
853 extern struct arm64_ftr_override id_aa64isar1_override;
855 u32 get_kvm_ipa_limit(void);
856 void dump_cpu_features(void);
858 #endif /* __ASSEMBLY__ */