kasan, arm64: implement HW_TAGS runtime
[linux-2.6-microblaze.git] / arch / arm64 / kernel / cpufeature.c
index 39138f6..7ffb5f1 100644 (file)
 #include <linux/types.h>
 #include <linux/mm.h>
 #include <linux/cpu.h>
+#include <linux/kasan.h>
 #include <asm/cpu.h>
 #include <asm/cpufeature.h>
 #include <asm/cpu_ops.h>
 #include <asm/fpsimd.h>
+#include <asm/kvm_host.h>
 #include <asm/mmu_context.h>
 #include <asm/mte.h>
 #include <asm/processor.h>
@@ -1709,9 +1711,26 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
                cleared_zero_page = true;
                mte_clear_page_tags(lm_alias(empty_zero_page));
        }
+
+       kasan_init_hw_tags_cpu();
 }
 #endif /* CONFIG_ARM64_MTE */
 
+#ifdef CONFIG_KVM
+static bool is_kvm_protected_mode(const struct arm64_cpu_capabilities *entry, int __unused)
+{
+       if (kvm_get_mode() != KVM_MODE_PROTECTED)
+               return false;
+
+       if (is_kernel_in_hyp_mode()) {
+               pr_warn("Protected KVM not available with VHE\n");
+               return false;
+       }
+
+       return true;
+}
+#endif /* CONFIG_KVM */
+
 /* Internal helper functions to match cpu capability type */
 static bool
 cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
@@ -1803,6 +1822,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .field_pos = ID_AA64PFR0_EL1_SHIFT,
                .min_field_value = ID_AA64PFR0_EL1_32BIT_64BIT,
        },
+       {
+               .desc = "Protected KVM",
+               .capability = ARM64_KVM_PROTECTED_MODE,
+               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = is_kvm_protected_mode,
+       },
 #endif
        {
                .desc = "Kernel page table isolation (KPTI)",
@@ -2831,14 +2856,28 @@ static int __init enable_mrs_emulation(void)
 
 core_initcall(enable_mrs_emulation);
 
+enum mitigation_state arm64_get_meltdown_state(void)
+{
+       if (__meltdown_safe)
+               return SPECTRE_UNAFFECTED;
+
+       if (arm64_kernel_unmapped_at_el0())
+               return SPECTRE_MITIGATED;
+
+       return SPECTRE_VULNERABLE;
+}
+
 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
                          char *buf)
 {
-       if (__meltdown_safe)
+       switch (arm64_get_meltdown_state()) {
+       case SPECTRE_UNAFFECTED:
                return sprintf(buf, "Not affected\n");
 
-       if (arm64_kernel_unmapped_at_el0())
+       case SPECTRE_MITIGATED:
                return sprintf(buf, "Mitigation: PTI\n");
 
-       return sprintf(buf, "Vulnerable\n");
+       default:
+               return sprintf(buf, "Vulnerable\n");
+       }
 }