1 // SPDX-License-Identifier: GPL-2.0
3 * Helper routines for building identity mapping page tables. This is
4 * included by both the compressed kernel and the regular kernel.
7 static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
8 unsigned long addr, unsigned long end)
11 for (; addr < end; addr += PMD_SIZE) {
12 pmd_t *pmd = pmd_page + pmd_index(addr);
14 if (pmd_present(*pmd))
17 set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
21 static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
22 unsigned long addr, unsigned long end)
26 for (; addr < end; addr = next) {
27 pud_t *pud = pud_page + pud_index(addr);
31 next = (addr & PUD_MASK) + PUD_SIZE;
35 /* if this is already a gbpage, this portion is already mapped */
39 /* Is using a gbpage allowed? */
40 use_gbpage = info->direct_gbpages;
42 /* Don't use gbpage if it maps more than the requested region. */
43 /* at the begining: */
44 use_gbpage &= ((addr & ~PUD_MASK) == 0);
45 /* ... or at the end: */
46 use_gbpage &= ((next & ~PUD_MASK) == 0);
48 /* Never overwrite existing mappings */
49 use_gbpage &= !pud_present(*pud);
54 pudval = __pud((addr - info->offset) | info->page_flag);
59 if (pud_present(*pud)) {
60 pmd = pmd_offset(pud, 0);
61 ident_pmd_init(info, pmd, addr, next);
64 pmd = (pmd_t *)info->alloc_pgt_page(info->context);
67 ident_pmd_init(info, pmd, addr, next);
68 set_pud(pud, __pud(__pa(pmd) | info->kernpg_flag));
74 static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
75 unsigned long addr, unsigned long end)
80 for (; addr < end; addr = next) {
81 p4d_t *p4d = p4d_page + p4d_index(addr);
84 next = (addr & P4D_MASK) + P4D_SIZE;
88 if (p4d_present(*p4d)) {
89 pud = pud_offset(p4d, 0);
90 result = ident_pud_init(info, pud, addr, next);
96 pud = (pud_t *)info->alloc_pgt_page(info->context);
100 result = ident_pud_init(info, pud, addr, next);
104 set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
110 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
111 unsigned long pstart, unsigned long pend)
113 unsigned long addr = pstart + info->offset;
114 unsigned long end = pend + info->offset;
118 /* Set the default pagetable flags if not supplied */
119 if (!info->kernpg_flag)
120 info->kernpg_flag = _KERNPG_TABLE;
122 /* Filter out unsupported __PAGE_KERNEL_* bits: */
123 info->kernpg_flag &= __default_kernel_pte_mask;
125 for (; addr < end; addr = next) {
126 pgd_t *pgd = pgd_page + pgd_index(addr);
129 next = (addr & PGDIR_MASK) + PGDIR_SIZE;
133 if (pgd_present(*pgd)) {
134 p4d = p4d_offset(pgd, 0);
135 result = ident_p4d_init(info, p4d, addr, next);
141 p4d = (p4d_t *)info->alloc_pgt_page(info->context);
144 result = ident_p4d_init(info, p4d, addr, next);
147 if (pgtable_l5_enabled()) {
148 set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag));
151 * With p4d folded, pgd is equal to p4d.
152 * The pgd entry has to point to the pud page table in this case.
154 pud_t *pud = pud_offset(p4d, 0);
155 set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag));