1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Copyright (C) 1991, 1992, 1993 Linus Torvalds
9 * head.S contains the 32-bit startup code.
11 * NOTE!!! Startup happens at absolute address 0x00001000, which is also where
12 * the page directory will exist. The startup code will be overwritten by
13 * the page directory. [According to comments etc elsewhere on a compressed
14 * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
16 * Page 0 is deliberately kept safe, since System Management Mode code in
17 * laptops may need to access the BIOS data stored there. This is also
18 * useful for future device drivers that either access the BIOS via VM86
23 * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
28 #include <linux/init.h>
29 #include <linux/linkage.h>
30 #include <asm/segment.h>
33 #include <asm/processor-flags.h>
34 #include <asm/asm-offsets.h>
35 #include <asm/bootparam.h>
36 #include <asm/desc_defs.h>
40 * Locally defined symbols should be marked hidden:
49 * This macro gives the relative virtual address of X, i.e. the offset of X
50 * from startup_32. This is the same as the link-time virtual address of X,
51 * since startup_32 is at 0, but defining it this way tells the
52 * assembler/linker that we do not want the actual run-time address of X. This
53 * prevents the linker from trying to create unwanted run-time relocation
54 * entries for the reference when the compressed kernel is linked as PIE.
56 * A reference X(%reg) will result in the link-time VA of X being stored with
57 * the instruction, and a run-time R_X86_64_RELATIVE relocation entry that
58 * adds the 64-bit base address where the kernel is loaded.
60 * Replacing it with (X-startup_32)(%reg) results in the offset being stored,
61 * and no run-time relocation.
63 * The macro should be used as a displacement with a base register containing
64 * the run-time address of startup_32 [i.e. rva(X)(%reg)], or as an immediate
67 * This macro can only be used from within the .head.text section, since the
68 * expression requires startup_32 to be in the same section as the code being
71 #define rva(X) ((X) - startup_32)
74 SYM_FUNC_START(startup_32)
76 * 32bit entry is 0 and it is ABI so immutable!
77 * If we come here directly from a bootloader,
78 * kernel(text+data+bss+brk) ramdisk, zero_page, command line
79 * all need to be under the 4G limit.
85 * Calculate the delta between where we were compiled to run
86 * at and where we were actually loaded at. This can only be done
87 * with a short local call on x86. Nothing else will tell us what
88 * address we are running at. The reserved chunk of the real-mode
89 * data at 0x1e4 (defined as a scratch field) are used as the stack
90 * for this calculation. Only 4 bytes are needed.
92 leal (BP_scratch+4)(%esi), %esp
97 /* Load new GDT with the 64bit segments using 32bit descriptor */
98 leal rva(gdt)(%ebp), %eax
102 /* Load segment registers with our descriptors */
103 movl $__BOOT_DS, %eax
110 /* Setup a stack and load CS from current GDT */
111 leal rva(boot_stack_end)(%ebp), %esp
114 leal rva(1f)(%ebp), %eax
119 /* Setup Exception handling for SEV-ES */
120 call startup32_load_idt
122 /* Make sure cpu supports long mode. */
128 * Compute the delta between where we were compiled to run at
129 * and where the code will actually run at.
131 * %ebp contains the address we are loaded at by the boot loader and %ebx
132 * contains the address where we should move the kernel image temporarily
133 * for safe in-place decompression.
136 #ifdef CONFIG_RELOCATABLE
139 #ifdef CONFIG_EFI_STUB
141 * If we were loaded via the EFI LoadImage service, startup_32 will be at an
142 * offset to the start of the space allocated for the image. efi_pe_entry will
143 * set up image_offset to tell us where the image actually starts, so that we
144 * can use the full available buffer.
145 * image_offset = startup_32 - image_base
146 * Otherwise image_offset will be zero and has no effect on the calculations.
148 subl rva(image_offset)(%ebp), %ebx
151 movl BP_kernel_alignment(%esi), %eax
156 cmpl $LOAD_PHYSICAL_ADDR, %ebx
159 movl $LOAD_PHYSICAL_ADDR, %ebx
162 /* Target address to relocate to for decompression */
163 addl BP_init_size(%esi), %ebx
164 subl $ rva(_end), %ebx
167 * Prepare for entering 64 bit mode
170 /* Enable PAE mode */
172 orl $X86_CR4_PAE, %eax
176 * Build early 4G boot pagetable
179 * If SEV is active then set the encryption mask in the page tables.
180 * This will insure that when the kernel is copied and decompressed
181 * it will be done so encrypted.
183 call get_sev_encryption_bit
187 subl $32, %eax /* Encryption bit is always above bit 31 */
188 bts %eax, %edx /* Set encryption mask for page tables */
191 /* Initialize Page tables to 0 */
192 leal rva(pgtable)(%ebx), %edi
194 movl $(BOOT_INIT_PGT_SIZE/4), %ecx
198 leal rva(pgtable + 0)(%ebx), %edi
199 leal 0x1007 (%edi), %eax
204 leal rva(pgtable + 0x1000)(%ebx), %edi
205 leal 0x1007(%edi), %eax
207 1: movl %eax, 0x00(%edi)
208 addl %edx, 0x04(%edi)
209 addl $0x00001000, %eax
215 leal rva(pgtable + 0x2000)(%ebx), %edi
216 movl $0x00000183, %eax
218 1: movl %eax, 0(%edi)
220 addl $0x00200000, %eax
225 /* Enable the boot page tables */
226 leal rva(pgtable)(%ebx), %eax
229 /* Enable Long mode in EFER (Extended Feature Enable Register) */
232 btsl $_EFER_LME, %eax
235 /* After gdt is loaded */
238 movl $__BOOT_TSS, %eax
242 * Setup for the jump to 64bit mode
244 * When the jump is performend we will be in long mode but
245 * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
246 * (and in turn EFER.LMA = 1). To jump into 64bit mode we use
247 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
248 * We place all of the values on our mini stack so lret can
249 * used to perform that far jump.
251 leal rva(startup_64)(%ebp), %eax
252 #ifdef CONFIG_EFI_MIXED
253 movl rva(efi32_boot_args)(%ebp), %edi
256 leal rva(efi64_stub_entry)(%ebp), %eax
257 movl rva(efi32_boot_args+4)(%ebp), %esi
258 movl rva(efi32_boot_args+8)(%ebp), %edx // saved bootparams pointer
262 * efi_pe_entry uses MS calling convention, which requires 32 bytes of
263 * shadow space on the stack even if all arguments are passed in
264 * registers. We also need an additional 8 bytes for the space that
265 * would be occupied by the return address, and this also results in
266 * the correct stack alignment for entry.
269 leal rva(efi_pe_entry)(%ebp), %eax
270 movl %edi, %ecx // MS calling convention
277 /* Enter paged protected Mode, activating Long Mode */
278 movl $(X86_CR0_PG | X86_CR0_PE), %eax /* Enable Paging and Protected mode */
281 /* Jump from 32bit compatibility mode into 64bit mode. */
283 SYM_FUNC_END(startup_32)
285 #ifdef CONFIG_EFI_MIXED
287 SYM_FUNC_START(efi32_stub_entry)
288 add $0x4, %esp /* Discard return address */
297 movl %esi, rva(efi32_boot_args+8)(%ebp)
298 SYM_INNER_LABEL(efi32_pe_stub_entry, SYM_L_LOCAL)
299 movl %ecx, rva(efi32_boot_args)(%ebp)
300 movl %edx, rva(efi32_boot_args+4)(%ebp)
301 movb $0, rva(efi_is64)(%ebp)
303 /* Save firmware GDTR and code/data selectors */
304 sgdtl rva(efi32_boot_gdt)(%ebp)
305 movw %cs, rva(efi32_boot_cs)(%ebp)
306 movw %ds, rva(efi32_boot_ds)(%ebp)
310 btrl $X86_CR0_PG_BIT, %eax
314 SYM_FUNC_END(efi32_stub_entry)
319 SYM_CODE_START(startup_64)
321 * 64bit entry is 0x200 and it is ABI so immutable!
322 * We come here either from startup_32 or directly from a
324 * If we come here from a bootloader, kernel(text+data+bss+brk),
325 * ramdisk, zero_page, command line could be above 4G.
326 * We depend on an identity mapped page table being provided
327 * that maps our entire kernel(text+data+bss+brk), zero page
334 /* Setup data segments. */
343 * Compute the decompressed kernel start address. It is where
344 * we were loaded at aligned to a 2M boundary. %rbp contains the
345 * decompressed kernel start address.
347 * If it is a relocatable kernel then decompress and run the kernel
348 * from load address aligned to 2MB addr, otherwise decompress and
349 * run the kernel from LOAD_PHYSICAL_ADDR
351 * We cannot rely on the calculation done in 32-bit mode, since we
352 * may have been invoked via the 64-bit entry point.
355 /* Start with the delta to where the kernel will run at. */
356 #ifdef CONFIG_RELOCATABLE
357 leaq startup_32(%rip) /* - $startup_32 */, %rbp
359 #ifdef CONFIG_EFI_STUB
361 * If we were loaded via the EFI LoadImage service, startup_32 will be at an
362 * offset to the start of the space allocated for the image. efi_pe_entry will
363 * set up image_offset to tell us where the image actually starts, so that we
364 * can use the full available buffer.
365 * image_offset = startup_32 - image_base
366 * Otherwise image_offset will be zero and has no effect on the calculations.
368 movl image_offset(%rip), %eax
372 movl BP_kernel_alignment(%rsi), %eax
377 cmpq $LOAD_PHYSICAL_ADDR, %rbp
380 movq $LOAD_PHYSICAL_ADDR, %rbp
383 /* Target address to relocate to for decompression */
384 movl BP_init_size(%rsi), %ebx
385 subl $ rva(_end), %ebx
388 /* Set up the stack */
389 leaq rva(boot_stack_end)(%rbx), %rsp
392 * At this point we are in long mode with 4-level paging enabled,
393 * but we might want to enable 5-level paging or vice versa.
395 * The problem is that we cannot do it directly. Setting or clearing
396 * CR4.LA57 in long mode would trigger #GP. So we need to switch off
397 * long mode and paging first.
399 * We also need a trampoline in lower memory to switch over from
400 * 4- to 5-level paging for cases when the bootloader puts the kernel
401 * above 4G, but didn't enable 5-level paging for us.
403 * The same trampoline can be used to switch from 5- to 4-level paging
404 * mode, like when starting 4-level paging kernel via kexec() when
405 * original kernel worked in 5-level paging mode.
407 * For the trampoline, we need the top page table to reside in lower
408 * memory as we don't have a way to load 64-bit values into CR3 in
411 * We go though the trampoline even if we don't have to: if we're
412 * already in a desired paging mode. This way the trampoline code gets
413 * tested on every boot.
416 /* Make sure we have GDT with 32-bit code segment */
417 leaq gdt64(%rip), %rax
421 /* Reload CS so IRET returns to a CS actually in the GDT */
423 leaq .Lon_kernel_cs(%rip), %rax
434 * paging_prepare() sets up the trampoline and checks if we need to
435 * enable 5-level paging.
437 * paging_prepare() returns a two-quadword structure which lands
439 * - Address of the trampoline is returned in RAX.
440 * - Non zero RDX means trampoline needs to enable 5-level
443 * RSI holds real mode data and needs to be preserved across
444 * this function call.
447 movq %rsi, %rdi /* real mode address */
451 /* Save the trampoline address in RCX */
455 * Load the address of trampoline_return() into RDI.
456 * It will be used by the trampoline to return to the main code.
458 leaq trampoline_return(%rip), %rdi
460 /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
462 leaq TRAMPOLINE_32BIT_CODE_OFFSET(%rax), %rax
466 /* Restore the stack, the 32-bit trampoline uses its own stack */
467 leaq rva(boot_stack_end)(%rbx), %rsp
470 * cleanup_trampoline() would restore trampoline memory.
472 * RDI is address of the page table to use instead of page table
473 * in trampoline memory (if required).
475 * RSI holds real mode data and needs to be preserved across
476 * this function call.
479 leaq rva(top_pgtable)(%rbx), %rdi
480 call cleanup_trampoline
488 * Copy the compressed kernel to the end of our buffer
489 * where decompression in place becomes safe.
492 leaq (_bss-8)(%rip), %rsi
493 leaq rva(_bss-8)(%rbx), %rdi
494 movl $(_bss - startup_32), %ecx
502 * The GDT may get overwritten either during the copy we just did or
503 * during extract_kernel below. To avoid any issues, repoint the GDTR
504 * to the new copy of the GDT.
506 leaq rva(gdt64)(%rbx), %rax
507 leaq rva(gdt)(%rbx), %rdx
512 * Jump to the relocated address.
514 leaq rva(.Lrelocated)(%rbx), %rax
516 SYM_CODE_END(startup_64)
518 #ifdef CONFIG_EFI_STUB
520 SYM_FUNC_START(efi64_stub_entry)
521 SYM_FUNC_START_ALIAS(efi_stub_entry)
522 and $~0xf, %rsp /* realign the stack */
523 movq %rdx, %rbx /* save boot_params pointer */
526 leaq rva(startup_64)(%rax), %rax
528 SYM_FUNC_END(efi64_stub_entry)
529 SYM_FUNC_END_ALIAS(efi_stub_entry)
533 SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
536 * Clear BSS (stack is currently empty)
539 leaq _bss(%rip), %rdi
540 leaq _ebss(%rip), %rcx
546 * If running as an SEV guest, the encryption mask is required in the
547 * page-table setup code below. When the guest also has SEV-ES enabled
548 * set_sev_encryption_mask() will cause #VC exceptions, but the stage2
549 * handler can't map its GHCB because the page-table is not set up yet.
550 * So set up the encryption mask here while still on the stage1 #VC
551 * handler. Then load stage2 IDT and switch to the kernel's own
555 call set_sev_encryption_mask
558 /* Pass boot_params to initialize_identity_maps() */
560 call initialize_identity_maps
564 * Do the extraction, and jump to the new kernel..
566 pushq %rsi /* Save the real mode argument */
567 movq %rsi, %rdi /* real mode address */
568 leaq boot_heap(%rip), %rsi /* malloc area for uncompression */
569 leaq input_data(%rip), %rdx /* input_data */
570 movl input_len(%rip), %ecx /* input_len */
571 movq %rbp, %r8 /* output target address */
572 movl output_len(%rip), %r9d /* decompressed length, end of relocs */
573 call extract_kernel /* returns kernel location in %rax */
577 * Jump to the decompressed kernel.
580 SYM_FUNC_END(.Lrelocated)
584 * This is the 32-bit trampoline that will be copied over to low memory.
586 * RDI contains the return address (might be above 4G).
587 * ECX contains the base address of the trampoline memory.
588 * Non zero RDX means trampoline needs to enable 5-level paging.
590 SYM_CODE_START(trampoline_32bit_src)
591 /* Set up data and stack segments */
592 movl $__KERNEL_DS, %eax
596 /* Set up new stack */
597 leal TRAMPOLINE_32BIT_STACK_END(%ecx), %esp
601 btrl $X86_CR0_PG_BIT, %eax
604 /* Check what paging mode we want to be in after the trampoline */
608 /* We want 5-level paging: don't touch CR3 if it already points to 5-level page tables */
610 testl $X86_CR4_LA57, %eax
614 /* We want 4-level paging: don't touch CR3 if it already points to 4-level page tables */
616 testl $X86_CR4_LA57, %eax
619 /* Point CR3 to the trampoline's new top level page table */
620 leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
623 /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
628 btsl $_EFER_LME, %eax
633 /* Enable PAE and LA57 (if required) paging modes */
634 movl $X86_CR4_PAE, %eax
637 orl $X86_CR4_LA57, %eax
641 /* Calculate address of paging_enabled() once we are executing in the trampoline */
642 leal .Lpaging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%ecx), %eax
644 /* Prepare the stack for far return to Long Mode */
648 /* Enable paging again */
649 movl $(X86_CR0_PG | X86_CR0_PE), %eax
653 SYM_CODE_END(trampoline_32bit_src)
656 SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled)
657 /* Return from the trampoline */
659 SYM_FUNC_END(.Lpaging_enabled)
662 * The trampoline code has a size limit.
663 * Make sure we fail to compile if the trampoline code grows
664 * beyond TRAMPOLINE_32BIT_CODE_SIZE bytes.
666 .org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE
669 SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode)
670 /* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */
674 SYM_FUNC_END(.Lno_longmode)
676 #include "../../kernel/verify_cpu.S"
679 SYM_DATA_START_LOCAL(gdt64)
680 .word gdt_end - gdt - 1
684 SYM_DATA_START_LOCAL(gdt)
685 .word gdt_end - gdt - 1
688 .quad 0x00cf9a000000ffff /* __KERNEL32_CS */
689 .quad 0x00af9a000000ffff /* __KERNEL_CS */
690 .quad 0x00cf92000000ffff /* __KERNEL_DS */
691 .quad 0x0080890000000000 /* TS descriptor */
692 .quad 0x0000000000000000 /* TS continued */
693 SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end)
695 SYM_DATA_START(boot_idt_desc)
696 .word boot_idt_end - boot_idt - 1
698 SYM_DATA_END(boot_idt_desc)
700 SYM_DATA_START(boot_idt)
701 .rept BOOT_IDT_ENTRIES
705 SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end)
707 #ifdef CONFIG_AMD_MEM_ENCRYPT
708 SYM_DATA_START(boot32_idt_desc)
709 .word boot32_idt_end - boot32_idt - 1
711 SYM_DATA_END(boot32_idt_desc)
713 SYM_DATA_START(boot32_idt)
717 SYM_DATA_END_LABEL(boot32_idt, SYM_L_GLOBAL, boot32_idt_end)
720 #ifdef CONFIG_EFI_STUB
721 SYM_DATA(image_offset, .long 0)
723 #ifdef CONFIG_EFI_MIXED
724 SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0)
725 SYM_DATA(efi_is64, .byte 1)
727 #define ST32_boottime 60 // offsetof(efi_system_table_32_t, boottime)
728 #define BS32_handle_protocol 88 // offsetof(efi_boot_services_32_t, handle_protocol)
729 #define LI32_image_base 32 // offsetof(efi_loaded_image_32_t, image_base)
733 SYM_FUNC_START(efi32_pe_entry)
735 * efi_status_t efi32_pe_entry(efi_handle_t image_handle,
736 * efi_system_table_32_t *sys_table)
741 pushl %eax // dummy push to allocate loaded_image
743 pushl %ebx // save callee-save registers
746 call verify_cpu // check for long mode support
748 movl $0x80000003, %eax // EFI_UNSUPPORTED
755 /* Get the loaded image protocol pointer from the image handle */
757 pushl %eax // &loaded_image
758 leal rva(loaded_image_proto)(%ebx), %eax
759 pushl %eax // pass the GUID address
760 pushl 8(%ebp) // pass the image handle
763 * Note the alignment of the stack frame.
765 * handle <-- 16-byte aligned on entry by ABI
768 * loaded_image <-- local variable
769 * saved %ebx <-- 16-byte aligned here
772 * &loaded_image_proto
773 * handle <-- 16-byte aligned for call to handle_protocol
776 movl 12(%ebp), %eax // sys_table
777 movl ST32_boottime(%eax), %eax // sys_table->boottime
778 call *BS32_handle_protocol(%eax) // sys_table->boottime->handle_protocol
779 addl $12, %esp // restore argument space
783 movl 8(%ebp), %ecx // image_handle
784 movl 12(%ebp), %edx // sys_table
785 movl -4(%ebp), %esi // loaded_image
786 movl LI32_image_base(%esi), %esi // loaded_image->image_base
787 movl %ebx, %ebp // startup_32 for efi32_pe_stub_entry
789 * We need to set the image_offset variable here since startup_32() will
790 * use it before we get to the 64-bit efi_pe_entry() in C code.
793 movl %ebx, rva(image_offset)(%ebp) // save image_offset
794 jmp efi32_pe_stub_entry
796 2: popl %edi // restore callee-save registers
800 SYM_FUNC_END(efi32_pe_entry)
803 /* EFI loaded image protocol GUID */
805 SYM_DATA_START_LOCAL(loaded_image_proto)
808 .byte 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b
809 SYM_DATA_END(loaded_image_proto)
812 #ifdef CONFIG_AMD_MEM_ENCRYPT
816 * Write an IDT entry into boot32_idt
820 * %eax: Handler address
821 * %edx: Vector number
823 * Physical offset is expected in %ebp
825 SYM_FUNC_START(startup32_set_idt_entry)
829 /* IDT entry address to %ebx */
830 leal rva(boot32_idt)(%ebp), %ebx
834 /* Build IDT entry, lower 4 bytes */
836 andl $0x0000ffff, %edx # Target code segment offset [15:0]
837 movl $__KERNEL32_CS, %ecx # Target code segment selector
841 /* Store lower 4 bytes to IDT */
844 /* Build IDT entry, upper 4 bytes */
846 andl $0xffff0000, %edx # Target code segment offset [31:16]
847 orl $0x00008e00, %edx # Present, Type 32-bit Interrupt Gate
849 /* Store upper 4 bytes to IDT */
855 SYM_FUNC_END(startup32_set_idt_entry)
858 SYM_FUNC_START(startup32_load_idt)
859 #ifdef CONFIG_AMD_MEM_ENCRYPT
861 leal rva(boot32_idt)(%ebp), %eax
862 movl %eax, rva(boot32_idt_desc+2)(%ebp)
863 lidt rva(boot32_idt_desc)(%ebp)
866 SYM_FUNC_END(startup32_load_idt)
869 * Stack and heap for uncompression
873 SYM_DATA_LOCAL(boot_heap, .fill BOOT_HEAP_SIZE, 1, 0)
875 SYM_DATA_START_LOCAL(boot_stack)
876 .fill BOOT_STACK_SIZE, 1, 0
878 SYM_DATA_END_LABEL(boot_stack, SYM_L_LOCAL, boot_stack_end)
881 * Space for page tables (not in .bss so not zeroed)
883 .section ".pgtable","aw",@nobits
885 SYM_DATA_LOCAL(pgtable, .fill BOOT_PGT_SIZE, 1, 0)
888 * The page table is going to be used instead of page table in the trampoline
891 SYM_DATA_LOCAL(top_pgtable, .fill PAGE_SIZE, 1, 0)