Merge tag 'keys-cve-2020-26541-v3' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / arch / x86 / kernel / head64.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  prepare to run common code
4  *
5  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6  */
7
8 #define DISABLE_BRANCH_PROFILING
9
10 /* cpu_feature_enabled() cannot be used this early */
11 #define USE_EARLY_PGTABLE_L5
12
13 #include <linux/init.h>
14 #include <linux/linkage.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/percpu.h>
19 #include <linux/start_kernel.h>
20 #include <linux/io.h>
21 #include <linux/memblock.h>
22 #include <linux/mem_encrypt.h>
23 #include <linux/pgtable.h>
24
25 #include <asm/processor.h>
26 #include <asm/proto.h>
27 #include <asm/smp.h>
28 #include <asm/setup.h>
29 #include <asm/desc.h>
30 #include <asm/tlbflush.h>
31 #include <asm/sections.h>
32 #include <asm/kdebug.h>
33 #include <asm/e820/api.h>
34 #include <asm/bios_ebda.h>
35 #include <asm/bootparam_utils.h>
36 #include <asm/microcode.h>
37 #include <asm/kasan.h>
38 #include <asm/fixmap.h>
39 #include <asm/realmode.h>
40 #include <asm/extable.h>
41 #include <asm/trapnr.h>
42 #include <asm/sev-es.h>
43
44 /*
45  * Manage page tables very early on.
46  */
47 extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
48 static unsigned int __initdata next_early_pgt;
49 pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
50
51 #ifdef CONFIG_X86_5LEVEL
52 unsigned int __pgtable_l5_enabled __ro_after_init;
53 unsigned int pgdir_shift __ro_after_init = 39;
54 EXPORT_SYMBOL(pgdir_shift);
55 unsigned int ptrs_per_p4d __ro_after_init = 1;
56 EXPORT_SYMBOL(ptrs_per_p4d);
57 #endif
58
59 #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
60 unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4;
61 EXPORT_SYMBOL(page_offset_base);
62 unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4;
63 EXPORT_SYMBOL(vmalloc_base);
64 unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4;
65 EXPORT_SYMBOL(vmemmap_base);
66 #endif
67
68 /*
69  * GDT used on the boot CPU before switching to virtual addresses.
70  */
71 static struct desc_struct startup_gdt[GDT_ENTRIES] = {
72         [GDT_ENTRY_KERNEL32_CS]         = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
73         [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
74         [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
75 };
76
77 /*
78  * Address needs to be set at runtime because it references the startup_gdt
79  * while the kernel still uses a direct mapping.
80  */
81 static struct desc_ptr startup_gdt_descr = {
82         .size = sizeof(startup_gdt),
83         .address = 0,
84 };
85
86 #define __head  __section(".head.text")
87
88 static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
89 {
90         return ptr - (void *)_text + (void *)physaddr;
91 }
92
93 static unsigned long __head *fixup_long(void *ptr, unsigned long physaddr)
94 {
95         return fixup_pointer(ptr, physaddr);
96 }
97
98 #ifdef CONFIG_X86_5LEVEL
99 static unsigned int __head *fixup_int(void *ptr, unsigned long physaddr)
100 {
101         return fixup_pointer(ptr, physaddr);
102 }
103
104 static bool __head check_la57_support(unsigned long physaddr)
105 {
106         /*
107          * 5-level paging is detected and enabled at kernel decomression
108          * stage. Only check if it has been enabled there.
109          */
110         if (!(native_read_cr4() & X86_CR4_LA57))
111                 return false;
112
113         *fixup_int(&__pgtable_l5_enabled, physaddr) = 1;
114         *fixup_int(&pgdir_shift, physaddr) = 48;
115         *fixup_int(&ptrs_per_p4d, physaddr) = 512;
116         *fixup_long(&page_offset_base, physaddr) = __PAGE_OFFSET_BASE_L5;
117         *fixup_long(&vmalloc_base, physaddr) = __VMALLOC_BASE_L5;
118         *fixup_long(&vmemmap_base, physaddr) = __VMEMMAP_BASE_L5;
119
120         return true;
121 }
122 #else
123 static bool __head check_la57_support(unsigned long physaddr)
124 {
125         return false;
126 }
127 #endif
128
129 /* Code in __startup_64() can be relocated during execution, but the compiler
130  * doesn't have to generate PC-relative relocations when accessing globals from
131  * that function. Clang actually does not generate them, which leads to
132  * boot-time crashes. To work around this problem, every global pointer must
133  * be adjusted using fixup_pointer().
134  */
135 unsigned long __head __startup_64(unsigned long physaddr,
136                                   struct boot_params *bp)
137 {
138         unsigned long vaddr, vaddr_end;
139         unsigned long load_delta, *p;
140         unsigned long pgtable_flags;
141         pgdval_t *pgd;
142         p4dval_t *p4d;
143         pudval_t *pud;
144         pmdval_t *pmd, pmd_entry;
145         pteval_t *mask_ptr;
146         bool la57;
147         int i;
148         unsigned int *next_pgt_ptr;
149
150         la57 = check_la57_support(physaddr);
151
152         /* Is the address too large? */
153         if (physaddr >> MAX_PHYSMEM_BITS)
154                 for (;;);
155
156         /*
157          * Compute the delta between the address I am compiled to run at
158          * and the address I am actually running at.
159          */
160         load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
161
162         /* Is the address not 2M aligned? */
163         if (load_delta & ~PMD_PAGE_MASK)
164                 for (;;);
165
166         /* Activate Secure Memory Encryption (SME) if supported and enabled */
167         sme_enable(bp);
168
169         /* Include the SME encryption mask in the fixup value */
170         load_delta += sme_get_me_mask();
171
172         /* Fixup the physical addresses in the page table */
173
174         pgd = fixup_pointer(&early_top_pgt, physaddr);
175         p = pgd + pgd_index(__START_KERNEL_map);
176         if (la57)
177                 *p = (unsigned long)level4_kernel_pgt;
178         else
179                 *p = (unsigned long)level3_kernel_pgt;
180         *p += _PAGE_TABLE_NOENC - __START_KERNEL_map + load_delta;
181
182         if (la57) {
183                 p4d = fixup_pointer(&level4_kernel_pgt, physaddr);
184                 p4d[511] += load_delta;
185         }
186
187         pud = fixup_pointer(&level3_kernel_pgt, physaddr);
188         pud[510] += load_delta;
189         pud[511] += load_delta;
190
191         pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
192         for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
193                 pmd[i] += load_delta;
194
195         /*
196          * Set up the identity mapping for the switchover.  These
197          * entries should *NOT* have the global bit set!  This also
198          * creates a bunch of nonsense entries but that is fine --
199          * it avoids problems around wraparound.
200          */
201
202         next_pgt_ptr = fixup_pointer(&next_early_pgt, physaddr);
203         pud = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
204         pmd = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
205
206         pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
207
208         if (la57) {
209                 p4d = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++],
210                                     physaddr);
211
212                 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
213                 pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
214                 pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
215
216                 i = physaddr >> P4D_SHIFT;
217                 p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
218                 p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
219         } else {
220                 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
221                 pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
222                 pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
223         }
224
225         i = physaddr >> PUD_SHIFT;
226         pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
227         pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
228
229         pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
230         /* Filter out unsupported __PAGE_KERNEL_* bits: */
231         mask_ptr = fixup_pointer(&__supported_pte_mask, physaddr);
232         pmd_entry &= *mask_ptr;
233         pmd_entry += sme_get_me_mask();
234         pmd_entry +=  physaddr;
235
236         for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
237                 int idx = i + (physaddr >> PMD_SHIFT);
238
239                 pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
240         }
241
242         /*
243          * Fixup the kernel text+data virtual addresses. Note that
244          * we might write invalid pmds, when the kernel is relocated
245          * cleanup_highmap() fixes this up along with the mappings
246          * beyond _end.
247          *
248          * Only the region occupied by the kernel image has so far
249          * been checked against the table of usable memory regions
250          * provided by the firmware, so invalidate pages outside that
251          * region. A page table entry that maps to a reserved area of
252          * memory would allow processor speculation into that area,
253          * and on some hardware (particularly the UV platform) even
254          * speculative access to some reserved areas is caught as an
255          * error, causing the BIOS to halt the system.
256          */
257
258         pmd = fixup_pointer(level2_kernel_pgt, physaddr);
259
260         /* invalidate pages before the kernel image */
261         for (i = 0; i < pmd_index((unsigned long)_text); i++)
262                 pmd[i] &= ~_PAGE_PRESENT;
263
264         /* fixup pages that are part of the kernel image */
265         for (; i <= pmd_index((unsigned long)_end); i++)
266                 if (pmd[i] & _PAGE_PRESENT)
267                         pmd[i] += load_delta;
268
269         /* invalidate pages after the kernel image */
270         for (; i < PTRS_PER_PMD; i++)
271                 pmd[i] &= ~_PAGE_PRESENT;
272
273         /*
274          * Fixup phys_base - remove the memory encryption mask to obtain
275          * the true physical address.
276          */
277         *fixup_long(&phys_base, physaddr) += load_delta - sme_get_me_mask();
278
279         /* Encrypt the kernel and related (if SME is active) */
280         sme_encrypt_kernel(bp);
281
282         /*
283          * Clear the memory encryption mask from the .bss..decrypted section.
284          * The bss section will be memset to zero later in the initialization so
285          * there is no need to zero it after changing the memory encryption
286          * attribute.
287          */
288         if (mem_encrypt_active()) {
289                 vaddr = (unsigned long)__start_bss_decrypted;
290                 vaddr_end = (unsigned long)__end_bss_decrypted;
291                 for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
292                         i = pmd_index(vaddr);
293                         pmd[i] -= sme_get_me_mask();
294                 }
295         }
296
297         /*
298          * Return the SME encryption mask (if SME is active) to be used as a
299          * modifier for the initial pgdir entry programmed into CR3.
300          */
301         return sme_get_me_mask();
302 }
303
304 unsigned long __startup_secondary_64(void)
305 {
306         /*
307          * Return the SME encryption mask (if SME is active) to be used as a
308          * modifier for the initial pgdir entry programmed into CR3.
309          */
310         return sme_get_me_mask();
311 }
312
313 /* Wipe all early page tables except for the kernel symbol map */
314 static void __init reset_early_page_tables(void)
315 {
316         memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
317         next_early_pgt = 0;
318         write_cr3(__sme_pa_nodebug(early_top_pgt));
319 }
320
321 /* Create a new PMD entry */
322 bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
323 {
324         unsigned long physaddr = address - __PAGE_OFFSET;
325         pgdval_t pgd, *pgd_p;
326         p4dval_t p4d, *p4d_p;
327         pudval_t pud, *pud_p;
328         pmdval_t *pmd_p;
329
330         /* Invalid address or early pgt is done ?  */
331         if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
332                 return false;
333
334 again:
335         pgd_p = &early_top_pgt[pgd_index(address)].pgd;
336         pgd = *pgd_p;
337
338         /*
339          * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
340          * critical -- __PAGE_OFFSET would point us back into the dynamic
341          * range and we might end up looping forever...
342          */
343         if (!pgtable_l5_enabled())
344                 p4d_p = pgd_p;
345         else if (pgd)
346                 p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
347         else {
348                 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
349                         reset_early_page_tables();
350                         goto again;
351                 }
352
353                 p4d_p = (p4dval_t *)early_dynamic_pgts[next_early_pgt++];
354                 memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
355                 *pgd_p = (pgdval_t)p4d_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
356         }
357         p4d_p += p4d_index(address);
358         p4d = *p4d_p;
359
360         if (p4d)
361                 pud_p = (pudval_t *)((p4d & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
362         else {
363                 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
364                         reset_early_page_tables();
365                         goto again;
366                 }
367
368                 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
369                 memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
370                 *p4d_p = (p4dval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
371         }
372         pud_p += pud_index(address);
373         pud = *pud_p;
374
375         if (pud)
376                 pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
377         else {
378                 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
379                         reset_early_page_tables();
380                         goto again;
381                 }
382
383                 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
384                 memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
385                 *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
386         }
387         pmd_p[pmd_index(address)] = pmd;
388
389         return true;
390 }
391
392 static bool __init early_make_pgtable(unsigned long address)
393 {
394         unsigned long physaddr = address - __PAGE_OFFSET;
395         pmdval_t pmd;
396
397         pmd = (physaddr & PMD_MASK) + early_pmd_flags;
398
399         return __early_make_pgtable(address, pmd);
400 }
401
402 void __init do_early_exception(struct pt_regs *regs, int trapnr)
403 {
404         if (trapnr == X86_TRAP_PF &&
405             early_make_pgtable(native_read_cr2()))
406                 return;
407
408         if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT) &&
409             trapnr == X86_TRAP_VC && handle_vc_boot_ghcb(regs))
410                 return;
411
412         early_fixup_exception(regs, trapnr);
413 }
414
415 /* Don't add a printk in there. printk relies on the PDA which is not initialized 
416    yet. */
417 static void __init clear_bss(void)
418 {
419         memset(__bss_start, 0,
420                (unsigned long) __bss_stop - (unsigned long) __bss_start);
421 }
422
423 static unsigned long get_cmd_line_ptr(void)
424 {
425         unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr;
426
427         cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32;
428
429         return cmd_line_ptr;
430 }
431
432 static void __init copy_bootdata(char *real_mode_data)
433 {
434         char * command_line;
435         unsigned long cmd_line_ptr;
436
437         /*
438          * If SME is active, this will create decrypted mappings of the
439          * boot data in advance of the copy operations.
440          */
441         sme_map_bootdata(real_mode_data);
442
443         memcpy(&boot_params, real_mode_data, sizeof(boot_params));
444         sanitize_boot_params(&boot_params);
445         cmd_line_ptr = get_cmd_line_ptr();
446         if (cmd_line_ptr) {
447                 command_line = __va(cmd_line_ptr);
448                 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
449         }
450
451         /*
452          * The old boot data is no longer needed and won't be reserved,
453          * freeing up that memory for use by the system. If SME is active,
454          * we need to remove the mappings that were created so that the
455          * memory doesn't remain mapped as decrypted.
456          */
457         sme_unmap_bootdata(real_mode_data);
458 }
459
460 asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
461 {
462         /*
463          * Build-time sanity checks on the kernel image and module
464          * area mappings. (these are purely build-time and produce no code)
465          */
466         BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
467         BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
468         BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
469         BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
470         BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
471         BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
472         MAYBE_BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
473                                 (__START_KERNEL & PGDIR_MASK)));
474         BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
475
476         cr4_init_shadow();
477
478         /* Kill off the identity-map trampoline */
479         reset_early_page_tables();
480
481         clear_bss();
482
483         clear_page(init_top_pgt);
484
485         /*
486          * SME support may update early_pmd_flags to include the memory
487          * encryption mask, so it needs to be called before anything
488          * that may generate a page fault.
489          */
490         sme_early_init();
491
492         kasan_early_init();
493
494         idt_setup_early_handler();
495
496         copy_bootdata(__va(real_mode_data));
497
498         /*
499          * Load microcode early on BSP.
500          */
501         load_ucode_bsp();
502
503         /* set init_top_pgt kernel high mapping*/
504         init_top_pgt[511] = early_top_pgt[511];
505
506         x86_64_start_reservations(real_mode_data);
507 }
508
509 void __init x86_64_start_reservations(char *real_mode_data)
510 {
511         /* version is always not zero if it is copied */
512         if (!boot_params.hdr.version)
513                 copy_bootdata(__va(real_mode_data));
514
515         x86_early_init_platform_quirks();
516
517         switch (boot_params.hdr.hardware_subarch) {
518         case X86_SUBARCH_INTEL_MID:
519                 x86_intel_mid_early_setup();
520                 break;
521         default:
522                 break;
523         }
524
525         start_kernel();
526 }
527
528 /*
529  * Data structures and code used for IDT setup in head_64.S. The bringup-IDT is
530  * used until the idt_table takes over. On the boot CPU this happens in
531  * x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases
532  * this happens in the functions called from head_64.S.
533  *
534  * The idt_table can't be used that early because all the code modifying it is
535  * in idt.c and can be instrumented by tracing or KASAN, which both don't work
536  * during early CPU bringup. Also the idt_table has the runtime vectors
537  * configured which require certain CPU state to be setup already (like TSS),
538  * which also hasn't happened yet in early CPU bringup.
539  */
540 static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data;
541
542 static struct desc_ptr bringup_idt_descr = {
543         .size           = (NUM_EXCEPTION_VECTORS * sizeof(gate_desc)) - 1,
544         .address        = 0, /* Set at runtime */
545 };
546
547 static void set_bringup_idt_handler(gate_desc *idt, int n, void *handler)
548 {
549 #ifdef CONFIG_AMD_MEM_ENCRYPT
550         struct idt_data data;
551         gate_desc desc;
552
553         init_idt_data(&data, n, handler);
554         idt_init_desc(&desc, &data);
555         native_write_idt_entry(idt, n, &desc);
556 #endif
557 }
558
559 /* This runs while still in the direct mapping */
560 static void startup_64_load_idt(unsigned long physbase)
561 {
562         struct desc_ptr *desc = fixup_pointer(&bringup_idt_descr, physbase);
563         gate_desc *idt = fixup_pointer(bringup_idt_table, physbase);
564
565
566         if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
567                 void *handler;
568
569                 /* VMM Communication Exception */
570                 handler = fixup_pointer(vc_no_ghcb, physbase);
571                 set_bringup_idt_handler(idt, X86_TRAP_VC, handler);
572         }
573
574         desc->address = (unsigned long)idt;
575         native_load_idt(desc);
576 }
577
578 /* This is used when running on kernel addresses */
579 void early_setup_idt(void)
580 {
581         /* VMM Communication Exception */
582         if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT))
583                 set_bringup_idt_handler(bringup_idt_table, X86_TRAP_VC, vc_boot_ghcb);
584
585         bringup_idt_descr.address = (unsigned long)bringup_idt_table;
586         native_load_idt(&bringup_idt_descr);
587 }
588
589 /*
590  * Setup boot CPU state needed before kernel switches to virtual addresses.
591  */
592 void __head startup_64_setup_env(unsigned long physbase)
593 {
594         /* Load GDT */
595         startup_gdt_descr.address = (unsigned long)fixup_pointer(startup_gdt, physbase);
596         native_load_gdt(&startup_gdt_descr);
597
598         /* New GDT is live - reload data segment registers */
599         asm volatile("movl %%eax, %%ds\n"
600                      "movl %%eax, %%ss\n"
601                      "movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory");
602
603         startup_64_load_idt(physbase);
604 }