.sign = FTR_UNSIGNED,
},
#endif
+ #ifdef CONFIG_ARM64_MTE
+ {
+ .desc = "Memory Tagging Extension",
+ .capability = ARM64_MTE,
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64PFR1_EL1,
+ .field_pos = ID_AA64PFR1_MTE_SHIFT,
+ .min_field_value = ID_AA64PFR1_MTE,
+ .sign = FTR_UNSIGNED,
+ .cpu_enable = cpu_enable_mte,
+ },
+ #endif /* CONFIG_ARM64_MTE */
++ ++++++ {
++ ++++++ .desc = "RCpc load-acquire (LDAPR)",
++ ++++++ .capability = ARM64_HAS_LDAPR,
++ ++++++ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
++ ++++++ .sys_reg = SYS_ID_AA64ISAR1_EL1,
++ ++++++ .sign = FTR_UNSIGNED,
++ ++++++ .field_pos = ID_AA64ISAR1_LRCPC_SHIFT,
++ ++++++ .matches = has_cpuid_feature,
++ ++++++ .min_field_value = 1,
++ ++++++ },
{},
};
PAGE_KERNEL_RO);
}
++++++ ++static bool crash_mem_map __initdata;
++++++ ++
++++++ ++static int __init enable_crash_mem_map(char *arg)
++++++ ++{
++++++ ++ /*
++++++ ++ * Proper parameter parsing is done by reserve_crashkernel(). We only
++++++ ++ * need to know if the linear map has to avoid block mappings so that
++++++ ++ * the crashkernel reservations can be unmapped later.
++++++ ++ */
++++++ ++ crash_mem_map = true;
++++++ ++
++++++ ++ return 0;
++++++ ++}
++++++ ++early_param("crashkernel", enable_crash_mem_map);
++++++ ++
static void __init map_mem(pgd_t *pgdp)
{
----- --- phys_addr_t kernel_start = __pa_symbol(_text);
+++++ +++ phys_addr_t kernel_start = __pa_symbol(_stext);
phys_addr_t kernel_end = __pa_symbol(__init_begin);
- struct memblock_region *reg;
+ phys_addr_t start, end;
int flags = 0;
+ u64 i;
------ -- if (rodata_full || debug_pagealloc_enabled())
++++++ ++ if (rodata_full || crash_mem_map || debug_pagealloc_enabled())
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/*
* the following for-loop
*/
memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
------ --#ifdef CONFIG_KEXEC_CORE
------ -- if (crashk_res.end)
------ -- memblock_mark_nomap(crashk_res.start,
------ -- resource_size(&crashk_res));
------ --#endif
/* map all the memory banks */
- for_each_memblock(memory, reg) {
- phys_addr_t start = reg->base;
- phys_addr_t end = start + reg->size;
-
+ for_each_mem_range(i, &start, &end) {
if (start >= end)
break;
- if (memblock_is_nomap(reg))
- continue;
-
- __map_memblock(pgdp, start, end, PAGE_KERNEL, flags);
+ /*
+ * The linear map must allow allocation tags reading/writing
+ * if MTE is present. Otherwise, it has the same attributes as
+ * PAGE_KERNEL.
+ */
+ __map_memblock(pgdp, start, end, PAGE_KERNEL_TAGGED, flags);
}
/*