1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/sched/task.h>
3 #include <linux/pgtable.h>
4 #include <linux/kasan.h>
5 #include <asm/pgalloc.h>
6 #include <asm/facility.h>
7 #include <asm/sections.h>
8 #include <asm/physmem_info.h>
9 #include <asm/maccess.h>
10 #include <asm/abs_lowcore.h>
11 #include "decompressor.h"
14 unsigned long __bootdata_preserved(s390_invalid_asce);
17 atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
20 #define init_mm (*(struct mm_struct *)vmlinux.init_mm_off)
21 #define swapper_pg_dir vmlinux.swapper_pg_dir_off
22 #define invalid_pg_dir vmlinux.invalid_pg_dir_off
29 POPULATE_KASAN_MAP_SHADOW,
30 POPULATE_KASAN_ZERO_SHADOW,
31 POPULATE_KASAN_SHALLOW
35 static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode);
39 #define kasan_early_shadow_page vmlinux.kasan_early_shadow_page_off
40 #define kasan_early_shadow_pte ((pte_t *)vmlinux.kasan_early_shadow_pte_off)
41 #define kasan_early_shadow_pmd ((pmd_t *)vmlinux.kasan_early_shadow_pmd_off)
42 #define kasan_early_shadow_pud ((pud_t *)vmlinux.kasan_early_shadow_pud_off)
43 #define kasan_early_shadow_p4d ((p4d_t *)vmlinux.kasan_early_shadow_p4d_off)
44 #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
48 static inline void kasan_populate(unsigned long start, unsigned long end, enum populate_mode mode)
50 start = PAGE_ALIGN_DOWN(__sha(start));
51 end = PAGE_ALIGN(__sha(end));
52 pgtable_populate(start, end, mode);
55 static void kasan_populate_shadow(void)
57 pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
58 pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
59 p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
60 unsigned long untracked_end;
61 unsigned long start, end;
64 pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
66 pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
67 crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
68 crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
69 crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
70 memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
73 * Current memory layout:
74 * +- 0 -------------+ +- shadow start -+
75 * |1:1 ident mapping| /|1/8 of ident map|
77 * +-end of ident map+ / +----------------+
78 * | ... gap ... | / | kasan |
80 * +- vmalloc area -+ / | mapping |
81 * | vmalloc_size | / | (untracked) |
82 * +- modules vaddr -+ / +----------------+
83 * | 2Gb |/ | unmapped | allocated per module
84 * +- shadow start -+ +----------------+
85 * | 1/8 addr space | | zero pg mapping| (untracked)
86 * +- shadow end ----+---------+- shadow end ---+
88 * Current memory layout (KASAN_VMALLOC):
89 * +- 0 -------------+ +- shadow start -+
90 * |1:1 ident mapping| /|1/8 of ident map|
92 * +-end of ident map+ / +----------------+
93 * | ... gap ... | / | kasan zero page| (untracked)
95 * +- vmalloc area -+ / +----------------+
96 * | vmalloc_size | / |shallow populate|
97 * +- modules vaddr -+ / +----------------+
98 * | 2Gb |/ |shallow populate|
99 * +- shadow start -+ +----------------+
100 * | 1/8 addr space | | zero pg mapping| (untracked)
101 * +- shadow end ----+---------+- shadow end ---+
104 for_each_physmem_usable_range(i, &start, &end)
105 kasan_populate(start, end, POPULATE_KASAN_MAP_SHADOW);
106 if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
107 untracked_end = VMALLOC_START;
108 /* shallowly populate kasan shadow for vmalloc and modules */
109 kasan_populate(VMALLOC_START, MODULES_END, POPULATE_KASAN_SHALLOW);
111 untracked_end = MODULES_VADDR;
113 /* populate kasan shadow for untracked memory */
114 kasan_populate(ident_map_size, untracked_end, POPULATE_KASAN_ZERO_SHADOW);
115 kasan_populate(MODULES_END, _REGION1_SIZE, POPULATE_KASAN_ZERO_SHADOW);
118 static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
119 unsigned long end, enum populate_mode mode)
121 if (mode == POPULATE_KASAN_ZERO_SHADOW &&
122 IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
123 pgd_populate(&init_mm, pgd, kasan_early_shadow_p4d);
129 static bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
130 unsigned long end, enum populate_mode mode)
132 if (mode == POPULATE_KASAN_ZERO_SHADOW &&
133 IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
134 p4d_populate(&init_mm, p4d, kasan_early_shadow_pud);
140 static bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
141 unsigned long end, enum populate_mode mode)
143 if (mode == POPULATE_KASAN_ZERO_SHADOW &&
144 IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
145 pud_populate(&init_mm, pud, kasan_early_shadow_pmd);
151 static bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
152 unsigned long end, enum populate_mode mode)
154 if (mode == POPULATE_KASAN_ZERO_SHADOW &&
155 IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
156 pmd_populate(&init_mm, pmd, kasan_early_shadow_pte);
162 static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
166 if (mode == POPULATE_KASAN_ZERO_SHADOW) {
174 static inline void kasan_populate_shadow(void) {}
176 static inline bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
177 unsigned long end, enum populate_mode mode)
182 static inline bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
183 unsigned long end, enum populate_mode mode)
188 static inline bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
189 unsigned long end, enum populate_mode mode)
194 static inline bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
195 unsigned long end, enum populate_mode mode)
200 static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
208 * Mimic virt_to_kpte() in lack of init_mm symbol. Skip pmd NULL check though.
210 static inline pte_t *__virt_to_kpte(unsigned long va)
212 return pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va), va);
215 static void *boot_crst_alloc(unsigned long val)
217 unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER;
218 unsigned long *table;
220 table = (unsigned long *)physmem_alloc_top_down(RR_VMEM, size, size);
221 crst_table_init(table, val);
225 static pte_t *boot_pte_alloc(void)
227 static void *pte_leftover;
231 * handling pte_leftovers this way helps to avoid memory fragmentation
232 * during POPULATE_KASAN_MAP_SHADOW when EDAT is off
235 pte_leftover = (void *)physmem_alloc_top_down(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
236 pte = pte_leftover + _PAGE_TABLE_SIZE;
242 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
246 static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_mode mode)
251 case POPULATE_DIRECT:
253 case POPULATE_ABS_LOWCORE:
254 return __abs_lowcore_pa(addr);
256 case POPULATE_KASAN_MAP_SHADOW:
257 addr = physmem_alloc_top_down(RR_VMEM, size, size);
258 memset((void *)addr, 0, size);
266 static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end)
268 return machine.has_edat2 &&
269 IS_ALIGNED(addr, PUD_SIZE) && (end - addr) >= PUD_SIZE;
272 static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end)
274 return machine.has_edat1 &&
275 IS_ALIGNED(addr, PMD_SIZE) && (end - addr) >= PMD_SIZE;
278 static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
279 enum populate_mode mode)
281 unsigned long pages = 0;
284 pte = pte_offset_kernel(pmd, addr);
285 for (; addr < end; addr += PAGE_SIZE, pte++) {
286 if (pte_none(*pte)) {
287 if (kasan_pte_populate_zero_shadow(pte, mode))
289 entry = __pte(_pa(addr, PAGE_SIZE, mode));
290 entry = set_pte_bit(entry, PAGE_KERNEL_EXEC);
295 if (mode == POPULATE_DIRECT)
296 update_page_count(PG_DIRECT_MAP_4K, pages);
299 static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end,
300 enum populate_mode mode)
302 unsigned long next, pages = 0;
306 pmd = pmd_offset(pud, addr);
307 for (; addr < end; addr = next, pmd++) {
308 next = pmd_addr_end(addr, end);
309 if (pmd_none(*pmd)) {
310 if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
312 if (can_large_pmd(pmd, addr, next)) {
313 entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
314 entry = set_pmd_bit(entry, SEGMENT_KERNEL_EXEC);
319 pte = boot_pte_alloc();
320 pmd_populate(&init_mm, pmd, pte);
321 } else if (pmd_large(*pmd)) {
324 pgtable_pte_populate(pmd, addr, next, mode);
326 if (mode == POPULATE_DIRECT)
327 update_page_count(PG_DIRECT_MAP_1M, pages);
330 static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end,
331 enum populate_mode mode)
333 unsigned long next, pages = 0;
337 pud = pud_offset(p4d, addr);
338 for (; addr < end; addr = next, pud++) {
339 next = pud_addr_end(addr, end);
340 if (pud_none(*pud)) {
341 if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
343 if (can_large_pud(pud, addr, next)) {
344 entry = __pud(_pa(addr, _REGION3_SIZE, mode));
345 entry = set_pud_bit(entry, REGION3_KERNEL_EXEC);
350 pmd = boot_crst_alloc(_SEGMENT_ENTRY_EMPTY);
351 pud_populate(&init_mm, pud, pmd);
352 } else if (pud_large(*pud)) {
355 pgtable_pmd_populate(pud, addr, next, mode);
357 if (mode == POPULATE_DIRECT)
358 update_page_count(PG_DIRECT_MAP_2G, pages);
361 static void pgtable_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long end,
362 enum populate_mode mode)
368 p4d = p4d_offset(pgd, addr);
369 for (; addr < end; addr = next, p4d++) {
370 next = p4d_addr_end(addr, end);
371 if (p4d_none(*p4d)) {
372 if (kasan_p4d_populate_zero_shadow(p4d, addr, next, mode))
374 pud = boot_crst_alloc(_REGION3_ENTRY_EMPTY);
375 p4d_populate(&init_mm, p4d, pud);
377 pgtable_pud_populate(p4d, addr, next, mode);
381 static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode)
387 pgd = pgd_offset(&init_mm, addr);
388 for (; addr < end; addr = next, pgd++) {
389 next = pgd_addr_end(addr, end);
390 if (pgd_none(*pgd)) {
391 if (kasan_pgd_populate_zero_shadow(pgd, addr, next, mode))
393 p4d = boot_crst_alloc(_REGION2_ENTRY_EMPTY);
394 pgd_populate(&init_mm, pgd, p4d);
397 if (mode == POPULATE_KASAN_SHALLOW)
400 pgtable_p4d_populate(pgd, addr, next, mode);
404 void setup_vmem(unsigned long asce_limit)
406 unsigned long start, end;
407 unsigned long asce_type;
408 unsigned long asce_bits;
411 if (asce_limit == _REGION1_SIZE) {
412 asce_type = _REGION2_ENTRY_EMPTY;
413 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
415 asce_type = _REGION3_ENTRY_EMPTY;
416 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
418 s390_invalid_asce = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
420 crst_table_init((unsigned long *)swapper_pg_dir, asce_type);
421 crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
424 * To allow prefixing the lowcore must be mapped with 4KB pages.
425 * To prevent creation of a large page at address 0 first map
426 * the lowcore and create the identity mapping only afterwards.
428 pgtable_populate(0, sizeof(struct lowcore), POPULATE_DIRECT);
429 for_each_physmem_usable_range(i, &start, &end)
430 pgtable_populate(start, end, POPULATE_DIRECT);
431 pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
432 POPULATE_ABS_LOWCORE);
433 pgtable_populate(__memcpy_real_area, __memcpy_real_area + PAGE_SIZE,
435 memcpy_real_ptep = __virt_to_kpte(__memcpy_real_area);
437 kasan_populate_shadow();
439 S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits;
440 S390_lowcore.user_asce = s390_invalid_asce;
442 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
443 __ctl_load(S390_lowcore.user_asce, 7, 7);
444 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
446 init_mm.context.asce = S390_lowcore.kernel_asce;