1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kasan.h>
3 #include <linux/sched/task.h>
4 #include <linux/pgtable.h>
5 #include <asm/pgalloc.h>
7 #include <asm/mem_detect.h>
8 #include <asm/processor.h>
10 #include <asm/facility.h>
11 #include <asm/sections.h>
12 #include <asm/setup.h>
15 static unsigned long segment_pos __initdata;
16 static unsigned long segment_low __initdata;
17 static bool has_edat __initdata;
18 static bool has_nx __initdata;
20 #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
22 static void __init kasan_early_panic(const char *reason)
24 sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
25 sclp_early_printk(reason);
29 static void * __init kasan_early_alloc_segment(void)
31 segment_pos -= _SEGMENT_SIZE;
33 if (segment_pos < segment_low)
34 kasan_early_panic("out of memory during initialisation\n");
36 return __va(segment_pos);
39 static void * __init kasan_early_alloc_pages(unsigned int order)
41 pgalloc_pos -= (PAGE_SIZE << order);
43 if (pgalloc_pos < pgalloc_low)
44 kasan_early_panic("out of memory during initialisation\n");
46 return __va(pgalloc_pos);
49 static void * __init kasan_early_crst_alloc(unsigned long val)
53 table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
55 crst_table_init(table, val);
59 static pte_t * __init kasan_early_pte_alloc(void)
61 static void *pte_leftover;
64 BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
67 pte_leftover = kasan_early_alloc_pages(0);
68 pte = pte_leftover + _PAGE_TABLE_SIZE;
73 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
83 static inline pgprot_t pgprot_clear_bit(pgprot_t pgprot, unsigned long bit)
85 return __pgprot(pgprot_val(pgprot) & ~bit);
88 static void __init kasan_early_pgtable_populate(unsigned long address,
90 enum populate_mode mode)
92 pgprot_t pgt_prot_zero = PAGE_KERNEL_RO;
93 pgprot_t pgt_prot = PAGE_KERNEL;
94 pgprot_t sgt_prot = SEGMENT_KERNEL;
104 pgt_prot_zero = pgprot_clear_bit(pgt_prot_zero, _PAGE_NOEXEC);
105 pgt_prot = pgprot_clear_bit(pgt_prot, _PAGE_NOEXEC);
106 sgt_prot = pgprot_clear_bit(sgt_prot, _SEGMENT_ENTRY_NOEXEC);
109 while (address < end) {
110 pg_dir = pgd_offset_k(address);
111 if (pgd_none(*pg_dir)) {
112 if (mode == POPULATE_ZERO_SHADOW &&
113 IS_ALIGNED(address, PGDIR_SIZE) &&
114 end - address >= PGDIR_SIZE) {
115 pgd_populate(&init_mm, pg_dir,
116 kasan_early_shadow_p4d);
117 address = (address + PGDIR_SIZE) & PGDIR_MASK;
120 p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
121 pgd_populate(&init_mm, pg_dir, p4_dir);
124 if (mode == POPULATE_SHALLOW) {
125 address = (address + P4D_SIZE) & P4D_MASK;
129 p4_dir = p4d_offset(pg_dir, address);
130 if (p4d_none(*p4_dir)) {
131 if (mode == POPULATE_ZERO_SHADOW &&
132 IS_ALIGNED(address, P4D_SIZE) &&
133 end - address >= P4D_SIZE) {
134 p4d_populate(&init_mm, p4_dir,
135 kasan_early_shadow_pud);
136 address = (address + P4D_SIZE) & P4D_MASK;
139 pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
140 p4d_populate(&init_mm, p4_dir, pu_dir);
143 pu_dir = pud_offset(p4_dir, address);
144 if (pud_none(*pu_dir)) {
145 if (mode == POPULATE_ZERO_SHADOW &&
146 IS_ALIGNED(address, PUD_SIZE) &&
147 end - address >= PUD_SIZE) {
148 pud_populate(&init_mm, pu_dir,
149 kasan_early_shadow_pmd);
150 address = (address + PUD_SIZE) & PUD_MASK;
153 pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
154 pud_populate(&init_mm, pu_dir, pm_dir);
157 pm_dir = pmd_offset(pu_dir, address);
158 if (pmd_none(*pm_dir)) {
159 if (IS_ALIGNED(address, PMD_SIZE) &&
160 end - address >= PMD_SIZE) {
161 if (mode == POPULATE_ZERO_SHADOW) {
162 pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
163 address = (address + PMD_SIZE) & PMD_MASK;
165 } else if (has_edat) {
166 void *page = kasan_early_alloc_segment();
168 memset(page, 0, _SEGMENT_SIZE);
169 pmd = __pmd(__pa(page));
170 pmd = set_pmd_bit(pmd, sgt_prot);
171 set_pmd(pm_dir, pmd);
172 address = (address + PMD_SIZE) & PMD_MASK;
176 pt_dir = kasan_early_pte_alloc();
177 pmd_populate(&init_mm, pm_dir, pt_dir);
178 } else if (pmd_large(*pm_dir)) {
179 address = (address + PMD_SIZE) & PMD_MASK;
183 pt_dir = pte_offset_kernel(pm_dir, address);
184 if (pte_none(*pt_dir)) {
189 page = kasan_early_alloc_pages(0);
190 memset(page, 0, PAGE_SIZE);
191 pte = __pte(__pa(page));
192 pte = set_pte_bit(pte, pgt_prot);
193 set_pte(pt_dir, pte);
195 case POPULATE_ZERO_SHADOW:
196 page = kasan_early_shadow_page;
197 pte = __pte(__pa(page));
198 pte = set_pte_bit(pte, pgt_prot_zero);
199 set_pte(pt_dir, pte);
201 case POPULATE_SHALLOW:
202 /* should never happen */
206 address += PAGE_SIZE;
210 static void __init kasan_early_detect_facilities(void)
212 if (test_facility(8)) {
214 __ctl_set_bit(0, 23);
216 if (!noexec_disabled && test_facility(130)) {
218 __ctl_set_bit(0, 20);
222 void __init kasan_early_init(void)
224 pte_t pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
225 pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
226 pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
227 p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
228 unsigned long untracked_end = MODULES_VADDR;
229 unsigned long shadow_alloc_size;
230 unsigned long start, end;
233 kasan_early_detect_facilities();
235 pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
237 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
238 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
240 /* init kasan zero shadow */
241 crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
242 crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
243 crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
244 memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
247 shadow_alloc_size = get_mem_detect_usable_total() >> KASAN_SHADOW_SCALE_SHIFT;
248 segment_pos = round_down(pgalloc_pos, _SEGMENT_SIZE);
249 segment_low = segment_pos - shadow_alloc_size;
250 segment_low = round_down(segment_low, _SEGMENT_SIZE);
251 pgalloc_pos = segment_low;
254 * Current memory layout:
255 * +- 0 -------------+ +- shadow start -+
256 * |1:1 ident mapping| /|1/8 of ident map|
258 * +-end of ident map+ / +----------------+
259 * | ... gap ... | / | kasan |
260 * | | / | zero page |
261 * +- vmalloc area -+ / | mapping |
262 * | vmalloc_size | / | (untracked) |
263 * +- modules vaddr -+ / +----------------+
264 * | 2Gb |/ | unmapped | allocated per module
265 * +- shadow start -+ +----------------+
266 * | 1/8 addr space | | zero pg mapping| (untracked)
267 * +- shadow end ----+---------+- shadow end ---+
269 * Current memory layout (KASAN_VMALLOC):
270 * +- 0 -------------+ +- shadow start -+
271 * |1:1 ident mapping| /|1/8 of ident map|
273 * +-end of ident map+ / +----------------+
274 * | ... gap ... | / | kasan zero page| (untracked)
276 * +- vmalloc area -+ / +----------------+
277 * | vmalloc_size | / |shallow populate|
278 * +- modules vaddr -+ / +----------------+
279 * | 2Gb |/ |shallow populate|
280 * +- shadow start -+ +----------------+
281 * | 1/8 addr space | | zero pg mapping| (untracked)
282 * +- shadow end ----+---------+- shadow end ---+
284 /* populate kasan shadow (for identity mapping and zero page mapping) */
285 for_each_mem_detect_usable_block(i, &start, &end)
286 kasan_early_pgtable_populate(__sha(start), __sha(end), POPULATE_MAP);
287 if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
288 untracked_end = VMALLOC_START;
289 /* shallowly populate kasan shadow for vmalloc and modules */
290 kasan_early_pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END),
293 /* populate kasan shadow for untracked memory */
294 kasan_early_pgtable_populate(__sha(ident_map_size), __sha(untracked_end),
295 POPULATE_ZERO_SHADOW);
296 kasan_early_pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE),
297 POPULATE_ZERO_SHADOW);
299 init_task.kasan_depth = 0;
300 sclp_early_printk("KernelAddressSanitizer initialized\n");