1 // SPDX-License-Identifier: GPL-2.0
3 * This code is used on x86_64 to create page table identity mappings on
4 * demand by building up a new set of page tables (or appending to the
5 * existing ones), and then switching over to them when ready.
7 * Copyright (C) 2015-2016 Yinghai Lu
8 * Copyright (C) 2016 Kees Cook
12 * Since we're dealing with identity mappings, physical and virtual
13 * addresses are the same, so override these defines which are ultimately
14 * used by the headers in misc.h.
16 #define __pa(x) ((unsigned long)(x))
17 #define __va(x) ((void *)((unsigned long)(x)))
19 /* No PAGE_TABLE_ISOLATION support needed either: */
20 #undef CONFIG_PAGE_TABLE_ISOLATION
25 /* These actually do the work of building the kernel identity maps. */
26 #include <linux/pgtable.h>
27 #include <asm/cmpxchg.h>
28 #include <asm/trap_pf.h>
29 #include <asm/trapnr.h>
31 /* Use the static base for this part of the boot process */
33 #define __PAGE_OFFSET __PAGE_OFFSET_BASE
34 #include "../../mm/ident_map.c"
36 /* Used by PAGE_KERN* macros: */
37 pteval_t __default_kernel_pte_mask __read_mostly = ~0;
39 /* Used to track our page table allocation area. */
40 struct alloc_pgt_data {
41 unsigned char *pgt_buf;
42 unsigned long pgt_buf_size;
43 unsigned long pgt_buf_offset;
47 * Allocates space for a page table entry, using struct alloc_pgt_data
48 * above. Besides the local callers, this is used as the allocation
49 * callback in mapping_info below.
51 static void *alloc_pgt_page(void *context)
53 struct alloc_pgt_data *pages = (struct alloc_pgt_data *)context;
56 /* Validate there is space available for a new page. */
57 if (pages->pgt_buf_offset >= pages->pgt_buf_size) {
58 debug_putstr("out of pgt_buf in " __FILE__ "!?\n");
59 debug_putaddr(pages->pgt_buf_offset);
60 debug_putaddr(pages->pgt_buf_size);
64 entry = pages->pgt_buf + pages->pgt_buf_offset;
65 pages->pgt_buf_offset += PAGE_SIZE;
70 /* Used to track our allocated page tables. */
71 static struct alloc_pgt_data pgt_data;
73 /* The top level page table entry pointer. */
74 static unsigned long top_level_pgt;
76 phys_addr_t physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
79 * Mapping information structure passed to kernel_ident_mapping_init().
80 * Due to relocation, pointers must be assigned at run time not build time.
82 static struct x86_mapping_info mapping_info;
85 * Adds the specified range to the identity mappings.
87 static void add_identity_map(unsigned long start, unsigned long end)
91 /* Align boundary to 2M. */
92 start = round_down(start, PMD_SIZE);
93 end = round_up(end, PMD_SIZE);
97 /* Build the mapping. */
98 ret = kernel_ident_mapping_init(&mapping_info, (pgd_t *)top_level_pgt, start, end);
100 error("Error: kernel_ident_mapping_init() failed\n");
103 /* Locates and clears a region for a new top level page table. */
104 void initialize_identity_maps(void)
106 /* Exclude the encryption mask from __PHYSICAL_MASK */
107 physical_mask &= ~sme_me_mask;
109 /* Init mapping_info with run-time function/buffer pointers. */
110 mapping_info.alloc_pgt_page = alloc_pgt_page;
111 mapping_info.context = &pgt_data;
112 mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sme_me_mask;
113 mapping_info.kernpg_flag = _KERNPG_TABLE;
116 * It should be impossible for this not to already be true,
117 * but since calling this a second time would rewind the other
118 * counters, let's just make sure this is reset too.
120 pgt_data.pgt_buf_offset = 0;
123 * If we came here via startup_32(), cr3 will be _pgtable already
124 * and we must append to the existing area instead of entirely
127 * With 5-level paging, we use '_pgtable' to allocate the p4d page table,
128 * the top-level page table is allocated separately.
130 * p4d_offset(top_level_pgt, 0) would cover both the 4- and 5-level
131 * cases. On 4-level paging it's equal to 'top_level_pgt'.
133 top_level_pgt = read_cr3_pa();
134 if (p4d_offset((pgd_t *)top_level_pgt, 0) == (p4d_t *)_pgtable) {
135 pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE;
136 pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE;
137 memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
139 pgt_data.pgt_buf = _pgtable;
140 pgt_data.pgt_buf_size = BOOT_PGT_SIZE;
141 memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
142 top_level_pgt = (unsigned long)alloc_pgt_page(&pgt_data);
146 * New page-table is set up - map the kernel image and load it
149 add_identity_map((unsigned long)_head, (unsigned long)_end);
150 write_cr3(top_level_pgt);
154 * This switches the page tables to the new level4 that has been built
155 * via calls to add_identity_map() above. If booted via startup_32(),
156 * this is effectively a no-op.
158 void finalize_identity_maps(void)
160 write_cr3(top_level_pgt);
163 static pte_t *split_large_pmd(struct x86_mapping_info *info,
164 pmd_t *pmdp, unsigned long __address)
166 unsigned long page_flags;
167 unsigned long address;
172 pte = (pte_t *)info->alloc_pgt_page(info->context);
176 address = __address & PMD_MASK;
177 /* No large page - clear PSE flag */
178 page_flags = info->page_flag & ~_PAGE_PSE;
180 /* Populate the PTEs */
181 for (i = 0; i < PTRS_PER_PMD; i++) {
182 set_pte(&pte[i], __pte(address | page_flags));
183 address += PAGE_SIZE;
187 * Ideally we need to clear the large PMD first and do a TLB
188 * flush before we write the new PMD. But the 2M range of the
189 * PMD might contain the code we execute and/or the stack
190 * we are on, so we can't do that. But that should be safe here
191 * because we are going from large to small mappings and we are
192 * also the only user of the page-table, so there is no chance
195 pmd = __pmd((unsigned long)pte | info->kernpg_flag);
197 /* Flush TLB to establish the new PMD */
198 write_cr3(top_level_pgt);
200 return pte + pte_index(__address);
203 static void clflush_page(unsigned long address)
205 unsigned int flush_size;
206 char *cl, *start, *end;
209 * Hardcode cl-size to 64 - CPUID can't be used here because that might
210 * cause another #VC exception and the GHCB is not ready to use yet.
213 start = (char *)(address & PAGE_MASK);
214 end = start + PAGE_SIZE;
217 * First make sure there are no pending writes on the cache-lines to
220 asm volatile("mfence" : : : "memory");
222 for (cl = start; cl != end; cl += flush_size)
226 static int set_clr_page_flags(struct x86_mapping_info *info,
227 unsigned long address,
228 pteval_t set, pteval_t clr)
230 pgd_t *pgdp = (pgd_t *)top_level_pgt;
237 * First make sure there is a PMD mapping for 'address'.
238 * It should already exist, but keep things generic.
240 * To map the page just read from it and fault it in if there is no
241 * mapping yet. add_identity_map() can't be called here because that
242 * would unconditionally map the address on PMD level, destroying any
243 * PTE-level mappings that might already exist. Use assembly here so
244 * the access won't be optimized away.
246 asm volatile("mov %[address], %%r9"
247 :: [address] "g" (*(unsigned long *)address)
251 * The page is mapped at least with PMD size - so skip checks and walk
252 * directly to the PMD.
254 p4dp = p4d_offset(pgdp, address);
255 pudp = pud_offset(p4dp, address);
256 pmdp = pmd_offset(pudp, address);
258 if (pmd_large(*pmdp))
259 ptep = split_large_pmd(info, pmdp, address);
261 ptep = pte_offset_kernel(pmdp, address);
267 * Changing encryption attributes of a page requires to flush it from
270 if ((set | clr) & _PAGE_ENC)
271 clflush_page(address);
275 pte = pte_set_flags(pte, set);
276 pte = pte_clear_flags(pte, clr);
279 /* Flush TLB after changing encryption attribute */
280 write_cr3(top_level_pgt);
285 int set_page_decrypted(unsigned long address)
287 return set_clr_page_flags(&mapping_info, address, 0, _PAGE_ENC);
290 int set_page_encrypted(unsigned long address)
292 return set_clr_page_flags(&mapping_info, address, _PAGE_ENC, 0);
295 int set_page_non_present(unsigned long address)
297 return set_clr_page_flags(&mapping_info, address, 0, _PAGE_PRESENT);
300 static void do_pf_error(const char *msg, unsigned long error_code,
301 unsigned long address, unsigned long ip)
305 error_putstr("\nError Code: ");
306 error_puthex(error_code);
307 error_putstr("\nCR2: 0x");
308 error_puthex(address);
309 error_putstr("\nRIP relative to _head: 0x");
310 error_puthex(ip - (unsigned long)_head);
313 error("Stopping.\n");
316 void do_boot_page_fault(struct pt_regs *regs, unsigned long error_code)
318 unsigned long address = native_read_cr2();
322 ghcb_fault = sev_es_check_ghcb_fault(address);
325 end = address + PMD_SIZE;
328 * Check for unexpected error codes. Unexpected are:
329 * - Faults on present pages
331 * - Reserved bits set
333 if (error_code & (X86_PF_PROT | X86_PF_USER | X86_PF_RSVD))
334 do_pf_error("Unexpected page-fault:", error_code, address, regs->ip);
336 do_pf_error("Page-fault on GHCB page:", error_code, address, regs->ip);
339 * Error code is sane - now identity map the 2M region around
340 * the faulting address.
342 add_identity_map(address, end);