1 /* SPDX-License-Identifier: GPL-2.0 */
3 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
8 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
9 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
12 #include <linux/export.h>
13 #include <linux/linkage.h>
14 #include <linux/threads.h>
15 #include <linux/init.h>
16 #include <linux/pgtable.h>
17 #include <asm/segment.h>
20 #include <asm/cache.h>
21 #include <asm/processor-flags.h>
22 #include <asm/percpu.h>
24 #include "../entry/calling.h"
25 #include <asm/nospec-branch.h>
26 #include <asm/apicdef.h>
27 #include <asm/fixmap.h>
31 * We are not able to switch in one step to the final KERNEL ADDRESS SPACE
32 * because we need identity-mapped pages.
34 #define l4_index(x) (((x) >> 39) & 511)
35 #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
37 L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4)
38 L4_START_KERNEL = l4_index(__START_KERNEL_map)
40 L3_START_KERNEL = pud_index(__START_KERNEL_map)
45 SYM_CODE_START_NOALIGN(startup_64)
46 UNWIND_HINT_END_OF_STACK
48 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
49 * and someone has loaded an identity mapped page table
50 * for us. These identity mapped page tables map all of the
51 * kernel pages and possibly all of memory.
53 * %RSI holds the physical address of the boot_params structure
54 * provided by the bootloader. Preserve it in %R15 so C function calls
55 * will not clobber it.
57 * We come here either directly from a 64bit bootloader, or from
58 * arch/x86/boot/compressed/head_64.S.
60 * We only come here initially at boot nothing else comes here.
62 * Since we may be loaded at an address different from what we were
63 * compiled to run at we first fixup the physical addresses in our page
64 * tables and then reload them.
68 /* Set up the stack for verify_cpu() */
69 leaq (__end_init_task - PTREGS_SIZE)(%rip), %rsp
71 leaq _text(%rip), %rdi
73 /* Setup GSBASE to allow stack canary access for C code */
74 movl $MSR_GS_BASE, %ecx
75 leaq INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
80 call startup_64_setup_env
82 /* Now switch to __KERNEL_CS so IRET works reliably */
84 leaq .Lon_kernel_cs(%rip), %rax
89 UNWIND_HINT_END_OF_STACK
91 #ifdef CONFIG_AMD_MEM_ENCRYPT
93 * Activate SEV/SME memory encryption if supported/enabled. This needs to
94 * be done now, since this also includes setup of the SEV-SNP CPUID table,
95 * which needs to be done before any CPUID instructions are executed in
96 * subsequent code. Pass the boot_params pointer as the first argument.
102 /* Sanitize CPU configuration */
106 * Perform pagetable fixups. Additionally, if SME is active, encrypt
107 * the kernel and retrieve the modifier (SME encryption mask if SME
108 * is active) to be added to the initial pgdir entry that will be
109 * programmed into CR3.
111 leaq _text(%rip), %rdi
115 /* Form the CR3 value being sure to include the CR3 modifier */
116 addq $(early_top_pgt - __START_KERNEL_map), %rax
118 #ifdef CONFIG_AMD_MEM_ENCRYPT
122 addq phys_base(%rip), %rdi
125 * For SEV guests: Verify that the C-bit is correct. A malicious
126 * hypervisor could lie about the C-bit position to perform a ROP
127 * attack on the guest by writing to the unencrypted stack and wait for
128 * the next RET instruction.
133 * Restore CR3 value without the phys_base which will be added
134 * below, before writing %cr3.
140 SYM_CODE_END(startup_64)
142 SYM_CODE_START(secondary_startup_64)
143 UNWIND_HINT_END_OF_STACK
146 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
147 * and someone has loaded a mapped page table.
149 * We come here either from startup_64 (using physical addresses)
150 * or from trampoline.S (using virtual addresses).
152 * Using virtual addresses from trampoline.S removes the need
153 * to have any identity mapped pages in the kernel page table
154 * after the boot processor executes this code.
157 /* Sanitize CPU configuration */
161 * The secondary_startup_64_no_verify entry point is only used by
162 * SEV-ES guests. In those guests the call to verify_cpu() would cause
163 * #VC exceptions which can not be handled at this stage of secondary
166 * All non SEV-ES systems, especially Intel systems, need to execute
167 * verify_cpu() above to make sure NX is enabled.
169 SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
170 UNWIND_HINT_END_OF_STACK
173 /* Clear %R15 which holds the boot_params pointer on the boot CPU */
177 * Retrieve the modifier (SME encryption mask if SME is active) to be
178 * added to the initial pgdir entry that will be programmed into CR3.
180 #ifdef CONFIG_AMD_MEM_ENCRYPT
181 movq sme_me_mask, %rax
186 /* Form the CR3 value being sure to include the CR3 modifier */
187 addq $(init_top_pgt - __START_KERNEL_map), %rax
190 #ifdef CONFIG_X86_MCE
192 * Preserve CR4.MCE if the kernel will enable #MC support.
193 * Clearing MCE may fault in some environments (that also force #MC
194 * support). Any machine check that occurs before #MC support is fully
195 * configured will crash the system regardless of the CR4.MCE value set
199 andl $X86_CR4_MCE, %ecx
204 /* Enable PAE mode, PSE, PGE and LA57 */
205 orl $(X86_CR4_PAE | X86_CR4_PSE | X86_CR4_PGE), %ecx
206 #ifdef CONFIG_X86_5LEVEL
207 testb $1, __pgtable_l5_enabled(%rip)
209 orl $X86_CR4_LA57, %ecx
214 /* Setup early boot stage 4-/5-level pagetables. */
215 addq phys_base(%rip), %rax
218 * Switch to new page-table
220 * For the boot CPU this switches to early_top_pgt which still has the
221 * identity mappings present. The secondary CPUs will switch to the
222 * init_top_pgt here, away from the trampoline_pgd and unmap the
223 * identity mapped ranges.
228 * Do a global TLB flush after the CR3 switch to make sure the TLB
229 * entries from the identity mapping are flushed.
233 xorq $X86_CR4_PGE, %rcx
237 /* Ensure I am executing from virtual addresses */
239 ANNOTATE_RETPOLINE_SAFE
242 UNWIND_HINT_END_OF_STACK
243 ANNOTATE_NOENDBR // above
247 * For parallel boot, the APIC ID is read from the APIC, and then
248 * used to look up the CPU number. For booting a single CPU, the
249 * CPU number is encoded in smpboot_control.
251 * Bit 31 STARTUP_READ_APICID (Read APICID from APIC)
252 * Bit 0-23 CPU# if STARTUP_xx flags are not set
254 movl smpboot_control(%rip), %ecx
255 testl $STARTUP_READ_APICID, %ecx
258 * No control bit set, single CPU bringup. CPU number is provided
259 * in bit 0-23. This is also the boot CPU case (CPU number 0).
261 andl $(~STARTUP_PARALLEL_MASK), %ecx
265 /* Check whether X2APIC mode is already enabled */
266 mov $MSR_IA32_APICBASE, %ecx
268 testl $X2APIC_ENABLE, %eax
269 jnz .Lread_apicid_msr
271 #ifdef CONFIG_X86_X2APIC
273 * If system is in X2APIC mode then MMIO base might not be
274 * mapped causing the MMIO read below to fault. Faults can't
275 * be handled at that point.
277 cmpl $0, x2apic_mode(%rip)
278 jz .Lread_apicid_mmio
280 /* Force the AP into X2APIC mode. */
281 orl $X2APIC_ENABLE, %eax
283 jmp .Lread_apicid_msr
287 /* Read the APIC ID from the fix-mapped MMIO space. */
288 movq apic_mmio_base(%rip), %rcx
295 mov $APIC_X2APIC_ID_MSR, %ecx
299 /* EAX contains the APIC ID of the current CPU */
301 leaq cpuid_to_apicid(%rip), %rbx
304 cmpl (%rbx,%rcx,4), %eax
307 #ifdef CONFIG_FORCE_NR_CPUS
310 cmpl nr_cpu_ids(%rip), %ecx
314 /* APIC ID not found in the table. Drop the trampoline lock and bail. */
315 movq trampoline_lock(%rip), %rax
323 /* Get the per cpu offset for the given CPU# which is in ECX */
324 movq __per_cpu_offset(,%rcx,8), %rdx
326 xorl %edx, %edx /* zero-extended to clear all of RDX */
327 #endif /* CONFIG_SMP */
330 * Setup a boot time stack - Any secondary CPU will have lost its stack
331 * by now because the cr3-switch above unmaps the real-mode stack.
333 * RDX contains the per-cpu offset
335 movq pcpu_hot + X86_current_task(%rdx), %rax
336 movq TASK_threadsp(%rax), %rsp
339 * Now that this CPU is running on its own stack, drop the realmode
340 * protection. For the boot CPU the pointer is NULL!
342 movq trampoline_lock(%rip), %rax
349 * We must switch to a new descriptor in kernel space for the GDT
350 * because soon the kernel won't have access anymore to the userspace
351 * addresses where we're currently running on. We have to do that here
352 * because in 32bit we couldn't load a 64bit linear address.
355 movw $(GDT_SIZE-1), (%rsp)
356 leaq gdt_page(%rdx), %rax
361 /* set up data segments */
368 * We don't really need to load %fs or %gs, but load them anyway
369 * to kill any stale realmode selectors. This allows execution
377 * The base of %gs always points to fixed_percpu_data. If the
378 * stack protector canary is enabled, it is located at %gs:40.
379 * Note that, on SMP, the boot cpu uses init data section until
380 * the per cpu areas are set up.
382 movl $MSR_GS_BASE,%ecx
384 leaq INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
390 /* Setup and Load IDT */
393 /* Check if nx is implemented */
394 movl $0x80000001, %eax
398 /* Setup EFER (Extended Feature Enable Register) */
402 * Preserve current value of EFER for comparison and to skip
403 * EFER writes if no change was made (for TDX guest)
406 btsl $_EFER_SCE, %eax /* Enable System Call */
407 btl $20,%edi /* No Execute supported? */
410 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
412 /* Avoid writing EFER if no change was made (for TDX guest) */
416 wrmsr /* Make changes effective */
419 movl $CR0_STATE, %eax
420 /* Make changes effective */
423 /* zero EFLAGS after setting rsp */
427 /* Pass the boot_params pointer as first argument */
432 * Jump to run C code and to be on a real kernel address.
433 * Since we are running on identity-mapped space we have to jump
434 * to the full 64bit address, this is only possible as indirect
435 * jump. In addition we need to ensure %cs is set so we make this
438 * Note: do not change to far jump indirect with 64bit offset.
440 * AMD does not support far jump indirect with 64bit offset.
441 * AMD64 Architecture Programmer's Manual, Volume 3: states only
442 * JMP FAR mem16:16 FF /5 Far jump indirect,
443 * with the target specified by a far pointer in memory.
444 * JMP FAR mem16:32 FF /5 Far jump indirect,
445 * with the target specified by a far pointer in memory.
447 * Intel64 does support 64bit offset.
448 * Software Developer Manual Vol 2: states:
449 * FF /5 JMP m16:16 Jump far, absolute indirect,
450 * address given in m16:16
451 * FF /5 JMP m16:32 Jump far, absolute indirect,
452 * address given in m16:32.
453 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
454 * address given in m16:64.
456 pushq $.Lafter_lret # put return address on stack for unwinder
457 xorl %ebp, %ebp # clear frame pointer
458 movq initial_code(%rip), %rax
459 pushq $__KERNEL_CS # set correct cs
460 pushq %rax # target address in negative space
464 SYM_CODE_END(secondary_startup_64)
466 #include "verify_cpu.S"
467 #include "sev_verify_cbit.S"
469 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT)
471 * Entry point for soft restart of a CPU. Invoked from xxx_play_dead() for
472 * restarting the boot CPU or for restarting SEV guest CPUs after CPU hot
473 * unplug. Everything is set up already except the stack.
475 SYM_CODE_START(soft_restart_cpu)
477 UNWIND_HINT_END_OF_STACK
479 /* Find the idle task stack */
480 movq PER_CPU_VAR(pcpu_hot + X86_current_task), %rcx
481 movq TASK_threadsp(%rcx), %rsp
484 SYM_CODE_END(soft_restart_cpu)
487 #ifdef CONFIG_AMD_MEM_ENCRYPT
489 * VC Exception handler used during early boot when running on kernel
490 * addresses, but before the switch to the idt_table can be made.
491 * The early_idt_handler_array can't be used here because it calls into a lot
492 * of __init code and this handler is also used during CPU offlining/onlining.
493 * Therefore this handler ends up in the .text section so that it stays around
494 * when .init.text is freed.
496 SYM_CODE_START_NOALIGN(vc_boot_ghcb)
497 UNWIND_HINT_IRET_REGS offset=8
505 movq ORIG_RAX(%rsp), %rsi
506 movq initial_vc_handler(%rip), %rax
507 ANNOTATE_RETPOLINE_SAFE
513 /* Remove Error Code */
517 SYM_CODE_END(vc_boot_ghcb)
520 /* Both SMP bootup and ACPI suspend change these variables */
523 SYM_DATA(initial_code, .quad x86_64_start_kernel)
524 #ifdef CONFIG_AMD_MEM_ENCRYPT
525 SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb)
528 SYM_DATA(trampoline_lock, .quad 0);
532 SYM_CODE_START(early_idt_handler_array)
534 .rept NUM_EXCEPTION_VECTORS
535 .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0
536 UNWIND_HINT_IRET_REGS
538 pushq $0 # Dummy error code, to make stack frame uniform
540 UNWIND_HINT_IRET_REGS offset=8
543 pushq $i # 72(%rsp) Vector number
544 jmp early_idt_handler_common
545 UNWIND_HINT_IRET_REGS
547 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
549 SYM_CODE_END(early_idt_handler_array)
550 ANNOTATE_NOENDBR // early_idt_handler_array[NUM_EXCEPTION_VECTORS]
552 SYM_CODE_START_LOCAL(early_idt_handler_common)
553 UNWIND_HINT_IRET_REGS offset=16
555 * The stack is the hardware frame, an error code or zero, and the
560 incl early_recursion_flag(%rip)
562 /* The vector number is currently in the pt_regs->di slot. */
563 pushq %rsi /* pt_regs->si */
564 movq 8(%rsp), %rsi /* RSI = vector number */
565 movq %rdi, 8(%rsp) /* pt_regs->di = RDI */
566 pushq %rdx /* pt_regs->dx */
567 pushq %rcx /* pt_regs->cx */
568 pushq %rax /* pt_regs->ax */
569 pushq %r8 /* pt_regs->r8 */
570 pushq %r9 /* pt_regs->r9 */
571 pushq %r10 /* pt_regs->r10 */
572 pushq %r11 /* pt_regs->r11 */
573 pushq %rbx /* pt_regs->bx */
574 pushq %rbp /* pt_regs->bp */
575 pushq %r12 /* pt_regs->r12 */
576 pushq %r13 /* pt_regs->r13 */
577 pushq %r14 /* pt_regs->r14 */
578 pushq %r15 /* pt_regs->r15 */
581 movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */
582 call do_early_exception
584 decl early_recursion_flag(%rip)
585 jmp restore_regs_and_return_to_kernel
586 SYM_CODE_END(early_idt_handler_common)
588 #ifdef CONFIG_AMD_MEM_ENCRYPT
590 * VC Exception handler used during very early boot. The
591 * early_idt_handler_array can't be used because it returns via the
592 * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early.
594 * XXX it does, fix this.
596 * This handler will end up in the .init.text section and not be
597 * available to boot secondary CPUs.
599 SYM_CODE_START_NOALIGN(vc_no_ghcb)
600 UNWIND_HINT_IRET_REGS offset=8
608 movq ORIG_RAX(%rsp), %rsi
614 /* Remove Error Code */
617 /* Pure iret required here - don't use INTERRUPT_RETURN */
619 SYM_CODE_END(vc_no_ghcb)
622 #define SYM_DATA_START_PAGE_ALIGNED(name) \
623 SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
625 #ifdef CONFIG_PAGE_TABLE_ISOLATION
627 * Each PGD needs to be 8k long and 8k aligned. We do not
628 * ever go out to userspace with these, so we do not
629 * strictly *need* the second page, but this allows us to
630 * have a single set_pgd() implementation that does not
631 * need to worry about whether it has 4k or 8k to work
634 * This ensures PGDs are 8k long:
636 #define PTI_USER_PGD_FILL 512
637 /* This ensures they are 8k-aligned: */
638 #define SYM_DATA_START_PTI_ALIGNED(name) \
639 SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE)
641 #define SYM_DATA_START_PTI_ALIGNED(name) \
642 SYM_DATA_START_PAGE_ALIGNED(name)
643 #define PTI_USER_PGD_FILL 0
646 /* Automate the creation of 1 to 1 mapping pmd entries */
647 #define PMDS(START, PERM, COUNT) \
650 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \
657 SYM_DATA_START_PTI_ALIGNED(early_top_pgt)
659 .fill PTI_USER_PGD_FILL,8,0
660 SYM_DATA_END(early_top_pgt)
662 SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts)
663 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
664 SYM_DATA_END(early_dynamic_pgts)
666 SYM_DATA(early_recursion_flag, .long 0)
670 #if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH)
671 SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
672 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
673 .org init_top_pgt + L4_PAGE_OFFSET*8, 0
674 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
675 .org init_top_pgt + L4_START_KERNEL*8, 0
676 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
677 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
678 .fill PTI_USER_PGD_FILL,8,0
679 SYM_DATA_END(init_top_pgt)
681 SYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt)
682 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
684 SYM_DATA_END(level3_ident_pgt)
685 SYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt)
687 * Since I easily can, map the first 1G.
688 * Don't set NX because code runs from these pages.
690 * Note: This sets _PAGE_GLOBAL despite whether
691 * the CPU supports it or it is enabled. But,
692 * the CPU should ignore the bit.
694 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
695 SYM_DATA_END(level2_ident_pgt)
697 SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
699 .fill PTI_USER_PGD_FILL,8,0
700 SYM_DATA_END(init_top_pgt)
703 #ifdef CONFIG_X86_5LEVEL
704 SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt)
706 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
707 SYM_DATA_END(level4_kernel_pgt)
710 SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt)
711 .fill L3_START_KERNEL,8,0
712 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
713 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
714 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
715 SYM_DATA_END(level3_kernel_pgt)
717 SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
719 * Kernel high mapping.
721 * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in
722 * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled,
725 * (NOTE: after that starts the module area, see MODULES_VADDR.)
727 * This table is eventually used by the kernel during normal runtime.
728 * Care must be taken to clear out undesired bits later, like _PAGE_RW
729 * or _PAGE_GLOBAL in some cases.
731 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
732 SYM_DATA_END(level2_kernel_pgt)
734 SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
735 .fill (512 - 4 - FIXMAP_PMD_NUM),8,0
737 .rept (FIXMAP_PMD_NUM)
738 .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
742 /* 6 MB reserved space + a 2MB hole */
744 SYM_DATA_END(level2_fixmap_pgt)
746 SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt)
747 .rept (FIXMAP_PMD_NUM)
750 SYM_DATA_END(level1_fixmap_pgt)
757 SYM_DATA(smpboot_control, .long 0)
760 /* This must match the first entry in level2_kernel_pgt */
761 SYM_DATA(phys_base, .quad 0x0)
762 EXPORT_SYMBOL(phys_base)
764 #include "../../x86/xen/xen-head.S"
767 SYM_DATA_START_PAGE_ALIGNED(empty_zero_page)
769 SYM_DATA_END(empty_zero_page)
770 EXPORT_SYMBOL(empty_zero_page)