2 * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #ifndef __ASM_CPUFEATURE_H
10 #define __ASM_CPUFEATURE_H
12 #include <asm/cpucaps.h>
13 #include <asm/fpsimd.h>
14 #include <asm/hwcap.h>
15 #include <asm/sigcontext.h>
16 #include <asm/sysreg.h>
19 * In the arm64 world (as in the ARM world), elf_hwcap is used both internally
20 * in the kernel and for user space to keep track of which optional features
21 * are supported by the current system. So let's map feature 'x' to HWCAP_x.
22 * Note that HWCAP_x constants are bit fields so we need to take the log.
25 #define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
26 #define cpu_feature(x) ilog2(HWCAP_ ## x)
30 #include <linux/bug.h>
31 #include <linux/jump_label.h>
32 #include <linux/kernel.h>
35 * CPU feature register tracking
37 * The safe value of a CPUID feature field is dependent on the implications
38 * of the values assigned to it by the architecture. Based on the relationship
39 * between the values, the features are classified into 3 types - LOWER_SAFE,
40 * HIGHER_SAFE and EXACT.
42 * The lowest value of all the CPUs is chosen for LOWER_SAFE and highest
43 * for HIGHER_SAFE. It is expected that all CPUs have the same value for
44 * a field when EXACT is specified, failing which, the safe value specified
45 * in the table is chosen.
49 FTR_EXACT, /* Use a predefined safe value */
50 FTR_LOWER_SAFE, /* Smaller value is safe */
51 FTR_HIGHER_SAFE,/* Bigger value is safe */
54 #define FTR_STRICT true /* SANITY check strict matching required */
55 #define FTR_NONSTRICT false /* SANITY check ignored */
57 #define FTR_SIGNED true /* Value should be treated as signed */
58 #define FTR_UNSIGNED false /* Value should be treated as unsigned */
60 #define FTR_VISIBLE true /* Feature visible to the user space */
61 #define FTR_HIDDEN false /* Feature is hidden from the user */
63 struct arm64_ftr_bits {
64 bool sign; /* Value is signed ? */
66 bool strict; /* CPU Sanity check: strict matching required ? */
70 s64 safe_val; /* safe value for FTR_EXACT features */
74 * @arm64_ftr_reg - Feature register
75 * @strict_mask Bits which should match across all CPUs for sanity.
76 * @sys_val Safe value across the CPUs (system view)
78 struct arm64_ftr_reg {
84 const struct arm64_ftr_bits *ftr_bits;
87 extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
89 /* scope of capability check */
95 struct arm64_cpu_capabilities {
98 int def_scope; /* default scope */
99 bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
100 int (*enable)(void *); /* Called on all active CPUs */
102 struct { /* To be used for erratum handling only */
104 u32 midr_range_min, midr_range_max;
107 struct { /* Feature register checking */
118 extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
119 extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
120 extern struct static_key_false arm64_const_caps_ready;
122 bool this_cpu_has_cap(unsigned int cap);
124 static inline bool cpu_have_feature(unsigned int num)
126 return elf_hwcap & (1UL << num);
129 /* System capability check for constant caps */
130 static inline bool __cpus_have_const_cap(int num)
132 if (num >= ARM64_NCAPS)
134 return static_branch_unlikely(&cpu_hwcap_keys[num]);
137 static inline bool cpus_have_cap(unsigned int num)
139 if (num >= ARM64_NCAPS)
141 return test_bit(num, cpu_hwcaps);
144 static inline bool cpus_have_const_cap(int num)
146 if (static_branch_likely(&arm64_const_caps_ready))
147 return __cpus_have_const_cap(num);
149 return cpus_have_cap(num);
152 static inline void cpus_set_cap(unsigned int num)
154 if (num >= ARM64_NCAPS) {
155 pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n",
158 __set_bit(num, cpu_hwcaps);
162 static inline int __attribute_const__
163 cpuid_feature_extract_signed_field_width(u64 features, int field, int width)
165 return (s64)(features << (64 - width - field)) >> (64 - width);
168 static inline int __attribute_const__
169 cpuid_feature_extract_signed_field(u64 features, int field)
171 return cpuid_feature_extract_signed_field_width(features, field, 4);
174 static inline unsigned int __attribute_const__
175 cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
177 return (u64)(features << (64 - width - field)) >> (64 - width);
180 static inline unsigned int __attribute_const__
181 cpuid_feature_extract_unsigned_field(u64 features, int field)
183 return cpuid_feature_extract_unsigned_field_width(features, field, 4);
186 static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
188 return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
191 static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg)
193 return (reg->user_val | (reg->sys_val & reg->user_mask));
196 static inline int __attribute_const__
197 cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign)
200 cpuid_feature_extract_signed_field_width(features, field, width) :
201 cpuid_feature_extract_unsigned_field_width(features, field, width);
204 static inline int __attribute_const__
205 cpuid_feature_extract_field(u64 features, int field, bool sign)
207 return cpuid_feature_extract_field_width(features, field, 4, sign);
210 static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
212 return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign);
215 static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
217 return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 ||
218 cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1;
221 static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
223 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
225 return val == ID_AA64PFR0_EL0_32BIT_64BIT;
228 static inline bool id_aa64pfr0_sve(u64 pfr0)
230 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT);
235 void __init setup_cpu_features(void);
237 void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
239 void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps);
240 void check_local_cpu_capabilities(void);
242 void update_cpu_errata_workarounds(void);
243 void __init enable_errata_workarounds(void);
244 void verify_local_cpu_errata_workarounds(void);
246 u64 read_sanitised_ftr_reg(u32 id);
248 static inline bool cpu_supports_mixed_endian_el0(void)
250 return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
253 static inline bool system_supports_32bit_el0(void)
255 return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
258 static inline bool system_supports_mixed_endian_el0(void)
260 return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1));
263 static inline bool system_supports_fpsimd(void)
265 return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
268 static inline bool system_uses_ttbr0_pan(void)
270 return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
271 !cpus_have_const_cap(ARM64_HAS_PAN);
274 static inline bool system_supports_sve(void)
280 * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE
283 * Use only if SVE is present.
284 * This function clobbers the SVE vector length.
286 static inline u64 read_zcr_features(void)
292 * Set the maximum possible VL, and write zeroes to all other
293 * bits to see if they stick.
295 sve_kernel_enable(NULL);
296 write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1);
298 zcr = read_sysreg_s(SYS_ZCR_EL1);
299 zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */
300 vq_max = sve_vq_from_vl(sve_get_vl());
301 zcr |= vq_max - 1; /* set LEN field to maximum effective value */
306 #endif /* __ASSEMBLY__ */