Merge branch 'for-next/stage1-lpa2' into for-next/core
[linux-2.6-microblaze.git] / arch / arm64 / include / asm / cpufeature.h
index 34fcdbc..66ba080 100644 (file)
@@ -17,6 +17,8 @@
 
 #define ARM64_SW_FEATURE_OVERRIDE_NOKASLR      0
 #define ARM64_SW_FEATURE_OVERRIDE_HVHE         4
+#define ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF   8
+#define ARM64_SW_FEATURE_OVERRIDE_NOWXN                12
 
 #ifndef __ASSEMBLY__
 
@@ -910,7 +912,9 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
 s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, s64 cur);
 struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id);
 
+extern struct arm64_ftr_override id_aa64mmfr0_override;
 extern struct arm64_ftr_override id_aa64mmfr1_override;
+extern struct arm64_ftr_override id_aa64mmfr2_override;
 extern struct arm64_ftr_override id_aa64pfr0_override;
 extern struct arm64_ftr_override id_aa64pfr1_override;
 extern struct arm64_ftr_override id_aa64zfr0_override;
@@ -920,9 +924,121 @@ extern struct arm64_ftr_override id_aa64isar2_override;
 
 extern struct arm64_ftr_override arm64_sw_feature_override;
 
+static inline
+u64 arm64_apply_feature_override(u64 val, int feat, int width,
+                                const struct arm64_ftr_override *override)
+{
+       u64 oval = override->val;
+
+       /*
+        * When it encounters an invalid override (e.g., an override that
+        * cannot be honoured due to a missing CPU feature), the early idreg
+        * override code will set the mask to 0x0 and the value to non-zero for
+        * the field in question. In order to determine whether the override is
+        * valid or not for the field we are interested in, we first need to
+        * disregard bits belonging to other fields.
+        */
+       oval &= GENMASK_ULL(feat + width - 1, feat);
+
+       /*
+        * The override is valid if all value bits are accounted for in the
+        * mask. If so, replace the masked bits with the override value.
+        */
+       if (oval == (oval & override->mask)) {
+               val &= ~override->mask;
+               val |= oval;
+       }
+
+       /* Extract the field from the updated value */
+       return cpuid_feature_extract_unsigned_field(val, feat);
+}
+
+static inline bool arm64_test_sw_feature_override(int feat)
+{
+       /*
+        * Software features are pseudo CPU features that have no underlying
+        * CPUID system register value to apply the override to.
+        */
+       return arm64_apply_feature_override(0, feat, 4,
+                                           &arm64_sw_feature_override);
+}
+
+static inline bool kaslr_disabled_cmdline(void)
+{
+       return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOKASLR);
+}
+
+static inline bool arm64_wxn_enabled(void)
+{
+       if (!IS_ENABLED(CONFIG_ARM64_WXN))
+               return false;
+       return !arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOWXN);
+}
+
 u32 get_kvm_ipa_limit(void);
 void dump_cpu_features(void);
 
+static inline bool cpu_has_bti(void)
+{
+       if (!IS_ENABLED(CONFIG_ARM64_BTI))
+               return false;
+
+       return arm64_apply_feature_override(read_cpuid(ID_AA64PFR1_EL1),
+                                           ID_AA64PFR1_EL1_BT_SHIFT, 4,
+                                           &id_aa64pfr1_override);
+}
+
+static inline bool cpu_has_pac(void)
+{
+       u64 isar1, isar2;
+
+       if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH))
+               return false;
+
+       isar1 = read_cpuid(ID_AA64ISAR1_EL1);
+       isar2 = read_cpuid(ID_AA64ISAR2_EL1);
+
+       if (arm64_apply_feature_override(isar1, ID_AA64ISAR1_EL1_APA_SHIFT, 4,
+                                        &id_aa64isar1_override))
+               return true;
+
+       if (arm64_apply_feature_override(isar1, ID_AA64ISAR1_EL1_API_SHIFT, 4,
+                                        &id_aa64isar1_override))
+               return true;
+
+       return arm64_apply_feature_override(isar2, ID_AA64ISAR2_EL1_APA3_SHIFT, 4,
+                                           &id_aa64isar2_override);
+}
+
+static inline bool cpu_has_lva(void)
+{
+       u64 mmfr2;
+
+       mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
+       mmfr2 &= ~id_aa64mmfr2_override.mask;
+       mmfr2 |= id_aa64mmfr2_override.val;
+       return cpuid_feature_extract_unsigned_field(mmfr2,
+                                                   ID_AA64MMFR2_EL1_VARange_SHIFT);
+}
+
+static inline bool cpu_has_lpa2(void)
+{
+#ifdef CONFIG_ARM64_LPA2
+       u64 mmfr0;
+       int feat;
+
+       mmfr0 = read_sysreg(id_aa64mmfr0_el1);
+       mmfr0 &= ~id_aa64mmfr0_override.mask;
+       mmfr0 |= id_aa64mmfr0_override.val;
+       feat = cpuid_feature_extract_signed_field(mmfr0,
+                                                 ID_AA64MMFR0_EL1_TGRAN_SHIFT);
+
+       return feat >= ID_AA64MMFR0_EL1_TGRAN_LPA2;
+#else
+       return false;
+#endif
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif