1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_32_DEFS_H
3 #define _ASM_X86_PGTABLE_32_DEFS_H
6 * The Linux x86 paging architecture is 'compile-time dual-mode', it
7 * implements both the traditional 2-level x86 page tables and the
8 * newer 3-level PAE-mode page tables.
11 # include <asm/pgtable-3level_types.h>
12 # define PMD_SIZE (1UL << PMD_SHIFT)
13 # define PMD_MASK (~(PMD_SIZE - 1))
15 # include <asm/pgtable-2level_types.h>
18 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
19 #define PGDIR_MASK (~(PGDIR_SIZE - 1))
21 /* Just any arbitrary offset to the start of the vmalloc VM area: the
22 * current 8MB value just means that there will be a 8MB "hole" after the
23 * physical memory until the kernel virtual memory starts. That means that
24 * any out-of-bounds memory accesses will hopefully be caught.
25 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
26 * area for the same reason. ;)
28 #define VMALLOC_OFFSET (8 * 1024 * 1024)
31 extern bool __vmalloc_start_set; /* set once high_memory is set */
34 #define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
36 #define LAST_PKMAP 512
38 #define LAST_PKMAP 1024
42 * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c
43 * to avoid include recursion hell
45 #define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40)
47 #define CPU_ENTRY_AREA_BASE \
48 ((FIXADDR_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) & PMD_MASK)
51 ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)
54 # define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
56 # define VMALLOC_END (CPU_ENTRY_AREA_BASE - 2 * PAGE_SIZE)
59 #define MODULES_VADDR VMALLOC_START
60 #define MODULES_END VMALLOC_END
61 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
63 #define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
65 #endif /* _ASM_X86_PGTABLE_32_DEFS_H */