1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Copyright (C) 1991, 1992, 1993 Linus Torvalds
9 * head.S contains the 32-bit startup code.
11 * NOTE!!! Startup happens at absolute address 0x00001000, which is also where
12 * the page directory will exist. The startup code will be overwritten by
13 * the page directory. [According to comments etc elsewhere on a compressed
14 * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
16 * Page 0 is deliberately kept safe, since System Management Mode code in
17 * laptops may need to access the BIOS data stored there. This is also
18 * useful for future device drivers that either access the BIOS via VM86
23 * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
28 #include <linux/init.h>
29 #include <linux/linkage.h>
30 #include <asm/segment.h>
33 #include <asm/processor-flags.h>
34 #include <asm/asm-offsets.h>
35 #include <asm/bootparam.h>
36 #include <asm/desc_defs.h>
40 * Locally defined symbols should be marked hidden:
50 SYM_FUNC_START(startup_32)
52 * 32bit entry is 0 and it is ABI so immutable!
53 * If we come here directly from a bootloader,
54 * kernel(text+data+bss+brk) ramdisk, zero_page, command line
55 * all need to be under the 4G limit.
61 * Calculate the delta between where we were compiled to run
62 * at and where we were actually loaded at. This can only be done
63 * with a short local call on x86. Nothing else will tell us what
64 * address we are running at. The reserved chunk of the real-mode
65 * data at 0x1e4 (defined as a scratch field) are used as the stack
66 * for this calculation. Only 4 bytes are needed.
68 leal (BP_scratch+4)(%esi), %esp
73 /* Load new GDT with the 64bit segments using 32bit descriptor */
78 /* Load segment registers with our descriptors */
86 /* setup a stack and make sure cpu supports long mode. */
87 leal boot_stack_end(%ebp), %esp
94 * Compute the delta between where we were compiled to run at
95 * and where the code will actually run at.
97 * %ebp contains the address we are loaded at by the boot loader and %ebx
98 * contains the address where we should move the kernel image temporarily
99 * for safe in-place decompression.
102 #ifdef CONFIG_RELOCATABLE
105 #ifdef CONFIG_EFI_STUB
107 * If we were loaded via the EFI LoadImage service, startup_32 will be at an
108 * offset to the start of the space allocated for the image. efi_pe_entry will
109 * set up image_offset to tell us where the image actually starts, so that we
110 * can use the full available buffer.
111 * image_offset = startup_32 - image_base
112 * Otherwise image_offset will be zero and has no effect on the calculations.
114 subl image_offset(%ebp), %ebx
117 movl BP_kernel_alignment(%esi), %eax
122 cmpl $LOAD_PHYSICAL_ADDR, %ebx
125 movl $LOAD_PHYSICAL_ADDR, %ebx
128 /* Target address to relocate to for decompression */
129 addl BP_init_size(%esi), %ebx
133 * Prepare for entering 64 bit mode
136 /* Enable PAE mode */
138 orl $X86_CR4_PAE, %eax
142 * Build early 4G boot pagetable
145 * If SEV is active then set the encryption mask in the page tables.
146 * This will insure that when the kernel is copied and decompressed
147 * it will be done so encrypted.
149 call get_sev_encryption_bit
153 subl $32, %eax /* Encryption bit is always above bit 31 */
154 bts %eax, %edx /* Set encryption mask for page tables */
157 /* Initialize Page tables to 0 */
158 leal pgtable(%ebx), %edi
160 movl $(BOOT_INIT_PGT_SIZE/4), %ecx
164 leal pgtable + 0(%ebx), %edi
165 leal 0x1007 (%edi), %eax
170 leal pgtable + 0x1000(%ebx), %edi
171 leal 0x1007(%edi), %eax
173 1: movl %eax, 0x00(%edi)
174 addl %edx, 0x04(%edi)
175 addl $0x00001000, %eax
181 leal pgtable + 0x2000(%ebx), %edi
182 movl $0x00000183, %eax
184 1: movl %eax, 0(%edi)
186 addl $0x00200000, %eax
191 /* Enable the boot page tables */
192 leal pgtable(%ebx), %eax
195 /* Enable Long mode in EFER (Extended Feature Enable Register) */
198 btsl $_EFER_LME, %eax
201 /* After gdt is loaded */
204 movl $__BOOT_TSS, %eax
208 * Setup for the jump to 64bit mode
210 * When the jump is performend we will be in long mode but
211 * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
212 * (and in turn EFER.LMA = 1). To jump into 64bit mode we use
213 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
214 * We place all of the values on our mini stack so lret can
215 * used to perform that far jump.
217 leal startup_64(%ebp), %eax
218 #ifdef CONFIG_EFI_MIXED
219 movl efi32_boot_args(%ebp), %edi
222 leal efi64_stub_entry(%ebp), %eax
223 movl efi32_boot_args+4(%ebp), %esi
224 movl efi32_boot_args+8(%ebp), %edx // saved bootparams pointer
228 * efi_pe_entry uses MS calling convention, which requires 32 bytes of
229 * shadow space on the stack even if all arguments are passed in
230 * registers. We also need an additional 8 bytes for the space that
231 * would be occupied by the return address, and this also results in
232 * the correct stack alignment for entry.
235 leal efi_pe_entry(%ebp), %eax
236 movl %edi, %ecx // MS calling convention
243 /* Enter paged protected Mode, activating Long Mode */
244 movl $(X86_CR0_PG | X86_CR0_PE), %eax /* Enable Paging and Protected mode */
247 /* Jump from 32bit compatibility mode into 64bit mode. */
249 SYM_FUNC_END(startup_32)
251 #ifdef CONFIG_EFI_MIXED
253 SYM_FUNC_START(efi32_stub_entry)
254 add $0x4, %esp /* Discard return address */
263 movl %esi, efi32_boot_args+8(%ebp)
264 SYM_INNER_LABEL(efi32_pe_stub_entry, SYM_L_LOCAL)
265 movl %ecx, efi32_boot_args(%ebp)
266 movl %edx, efi32_boot_args+4(%ebp)
267 movb $0, efi_is64(%ebp)
269 /* Save firmware GDTR and code/data selectors */
270 sgdtl efi32_boot_gdt(%ebp)
271 movw %cs, efi32_boot_cs(%ebp)
272 movw %ds, efi32_boot_ds(%ebp)
276 btrl $X86_CR0_PG_BIT, %eax
280 SYM_FUNC_END(efi32_stub_entry)
285 SYM_CODE_START(startup_64)
287 * 64bit entry is 0x200 and it is ABI so immutable!
288 * We come here either from startup_32 or directly from a
290 * If we come here from a bootloader, kernel(text+data+bss+brk),
291 * ramdisk, zero_page, command line could be above 4G.
292 * We depend on an identity mapped page table being provided
293 * that maps our entire kernel(text+data+bss+brk), zero page
300 /* Setup data segments. */
309 * Compute the decompressed kernel start address. It is where
310 * we were loaded at aligned to a 2M boundary. %rbp contains the
311 * decompressed kernel start address.
313 * If it is a relocatable kernel then decompress and run the kernel
314 * from load address aligned to 2MB addr, otherwise decompress and
315 * run the kernel from LOAD_PHYSICAL_ADDR
317 * We cannot rely on the calculation done in 32-bit mode, since we
318 * may have been invoked via the 64-bit entry point.
321 /* Start with the delta to where the kernel will run at. */
322 #ifdef CONFIG_RELOCATABLE
323 leaq startup_32(%rip) /* - $startup_32 */, %rbp
325 #ifdef CONFIG_EFI_STUB
327 * If we were loaded via the EFI LoadImage service, startup_32 will be at an
328 * offset to the start of the space allocated for the image. efi_pe_entry will
329 * set up image_offset to tell us where the image actually starts, so that we
330 * can use the full available buffer.
331 * image_offset = startup_32 - image_base
332 * Otherwise image_offset will be zero and has no effect on the calculations.
334 movl image_offset(%rip), %eax
338 movl BP_kernel_alignment(%rsi), %eax
343 cmpq $LOAD_PHYSICAL_ADDR, %rbp
346 movq $LOAD_PHYSICAL_ADDR, %rbp
349 /* Target address to relocate to for decompression */
350 movl BP_init_size(%rsi), %ebx
354 /* Set up the stack */
355 leaq boot_stack_end(%rbx), %rsp
358 * paging_prepare() and cleanup_trampoline() below can have GOT
359 * references. Adjust the table with address we are running at.
361 * Zero RAX for adjust_got: the GOT was not adjusted before;
362 * there's no adjustment to undo.
367 * Calculate the address the binary is loaded at and use it as
377 * At this point we are in long mode with 4-level paging enabled,
378 * but we might want to enable 5-level paging or vice versa.
380 * The problem is that we cannot do it directly. Setting or clearing
381 * CR4.LA57 in long mode would trigger #GP. So we need to switch off
382 * long mode and paging first.
384 * We also need a trampoline in lower memory to switch over from
385 * 4- to 5-level paging for cases when the bootloader puts the kernel
386 * above 4G, but didn't enable 5-level paging for us.
388 * The same trampoline can be used to switch from 5- to 4-level paging
389 * mode, like when starting 4-level paging kernel via kexec() when
390 * original kernel worked in 5-level paging mode.
392 * For the trampoline, we need the top page table to reside in lower
393 * memory as we don't have a way to load 64-bit values into CR3 in
396 * We go though the trampoline even if we don't have to: if we're
397 * already in a desired paging mode. This way the trampoline code gets
398 * tested on every boot.
401 /* Make sure we have GDT with 32-bit code segment */
402 leaq gdt64(%rip), %rax
406 /* Reload CS so IRET returns to a CS actually in the GDT */
408 leaq .Lon_kernel_cs(%rip), %rax
419 * paging_prepare() sets up the trampoline and checks if we need to
420 * enable 5-level paging.
422 * paging_prepare() returns a two-quadword structure which lands
424 * - Address of the trampoline is returned in RAX.
425 * - Non zero RDX means trampoline needs to enable 5-level
428 * RSI holds real mode data and needs to be preserved across
429 * this function call.
432 movq %rsi, %rdi /* real mode address */
436 /* Save the trampoline address in RCX */
440 * Load the address of trampoline_return() into RDI.
441 * It will be used by the trampoline to return to the main code.
443 leaq trampoline_return(%rip), %rdi
445 /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
447 leaq TRAMPOLINE_32BIT_CODE_OFFSET(%rax), %rax
451 /* Restore the stack, the 32-bit trampoline uses its own stack */
452 leaq boot_stack_end(%rbx), %rsp
455 * cleanup_trampoline() would restore trampoline memory.
457 * RDI is address of the page table to use instead of page table
458 * in trampoline memory (if required).
460 * RSI holds real mode data and needs to be preserved across
461 * this function call.
464 leaq top_pgtable(%rbx), %rdi
465 call cleanup_trampoline
473 * Previously we've adjusted the GOT with address the binary was
474 * loaded at. Now we need to re-adjust for relocation address.
476 * Calculate the address the binary is loaded at, so that we can
477 * undo the previous GOT adjustment.
483 /* The new adjustment is the relocation address */
488 * Copy the compressed kernel to the end of our buffer
489 * where decompression in place becomes safe.
492 leaq (_bss-8)(%rip), %rsi
493 leaq (_bss-8)(%rbx), %rdi
494 movq $_bss /* - $startup_32 */, %rcx
502 * The GDT may get overwritten either during the copy we just did or
503 * during extract_kernel below. To avoid any issues, repoint the GDTR
504 * to the new copy of the GDT.
506 leaq gdt64(%rbx), %rax
512 * Jump to the relocated address.
514 leaq .Lrelocated(%rbx), %rax
516 SYM_CODE_END(startup_64)
518 #ifdef CONFIG_EFI_STUB
520 SYM_FUNC_START(efi64_stub_entry)
521 SYM_FUNC_START_ALIAS(efi_stub_entry)
522 and $~0xf, %rsp /* realign the stack */
523 movq %rdx, %rbx /* save boot_params pointer */
526 leaq startup_64(%rax), %rax
528 SYM_FUNC_END(efi64_stub_entry)
529 SYM_FUNC_END_ALIAS(efi_stub_entry)
533 SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
536 * Clear BSS (stack is currently empty)
539 leaq _bss(%rip), %rdi
540 leaq _ebss(%rip), %rcx
546 * If running as an SEV guest, the encryption mask is required in the
547 * page-table setup code below. When the guest also has SEV-ES enabled
548 * set_sev_encryption_mask() will cause #VC exceptions, but the stage2
549 * handler can't map its GHCB because the page-table is not set up yet.
550 * So set up the encryption mask here while still on the stage1 #VC
551 * handler. Then load stage2 IDT and switch to the kernel's own
555 call set_sev_encryption_mask
557 call initialize_identity_maps
561 * Do the extraction, and jump to the new kernel..
563 pushq %rsi /* Save the real mode argument */
564 movq %rsi, %rdi /* real mode address */
565 leaq boot_heap(%rip), %rsi /* malloc area for uncompression */
566 leaq input_data(%rip), %rdx /* input_data */
567 movl $z_input_len, %ecx /* input_len */
568 movq %rbp, %r8 /* output target address */
569 movl $z_output_len, %r9d /* decompressed length, end of relocs */
570 call extract_kernel /* returns kernel location in %rax */
574 * Jump to the decompressed kernel.
577 SYM_FUNC_END(.Lrelocated)
580 * Adjust the global offset table
582 * RAX is the previous adjustment of the table to undo (use 0 if it's the
583 * first time we touch GOT).
584 * RDI is the new adjustment to apply.
587 /* Walk through the GOT adding the address to the entries */
588 leaq _got(%rip), %rdx
589 leaq _egot(%rip), %rcx
593 subq %rax, (%rdx) /* Undo previous adjustment */
594 addq %rdi, (%rdx) /* Apply the new adjustment */
602 * This is the 32-bit trampoline that will be copied over to low memory.
604 * RDI contains the return address (might be above 4G).
605 * ECX contains the base address of the trampoline memory.
606 * Non zero RDX means trampoline needs to enable 5-level paging.
608 SYM_CODE_START(trampoline_32bit_src)
609 /* Set up data and stack segments */
610 movl $__KERNEL_DS, %eax
614 /* Set up new stack */
615 leal TRAMPOLINE_32BIT_STACK_END(%ecx), %esp
619 btrl $X86_CR0_PG_BIT, %eax
622 /* Check what paging mode we want to be in after the trampoline */
626 /* We want 5-level paging: don't touch CR3 if it already points to 5-level page tables */
628 testl $X86_CR4_LA57, %eax
632 /* We want 4-level paging: don't touch CR3 if it already points to 4-level page tables */
634 testl $X86_CR4_LA57, %eax
637 /* Point CR3 to the trampoline's new top level page table */
638 leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
641 /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
646 btsl $_EFER_LME, %eax
651 /* Enable PAE and LA57 (if required) paging modes */
652 movl $X86_CR4_PAE, %eax
655 orl $X86_CR4_LA57, %eax
659 /* Calculate address of paging_enabled() once we are executing in the trampoline */
660 leal .Lpaging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%ecx), %eax
662 /* Prepare the stack for far return to Long Mode */
666 /* Enable paging again */
667 movl $(X86_CR0_PG | X86_CR0_PE), %eax
671 SYM_CODE_END(trampoline_32bit_src)
674 SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled)
675 /* Return from the trampoline */
677 SYM_FUNC_END(.Lpaging_enabled)
680 * The trampoline code has a size limit.
681 * Make sure we fail to compile if the trampoline code grows
682 * beyond TRAMPOLINE_32BIT_CODE_SIZE bytes.
684 .org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE
687 SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode)
688 /* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */
692 SYM_FUNC_END(.Lno_longmode)
694 #include "../../kernel/verify_cpu.S"
697 SYM_DATA_START_LOCAL(gdt64)
698 .word gdt_end - gdt - 1
702 SYM_DATA_START_LOCAL(gdt)
703 .word gdt_end - gdt - 1
706 .quad 0x00cf9a000000ffff /* __KERNEL32_CS */
707 .quad 0x00af9a000000ffff /* __KERNEL_CS */
708 .quad 0x00cf92000000ffff /* __KERNEL_DS */
709 .quad 0x0080890000000000 /* TS descriptor */
710 .quad 0x0000000000000000 /* TS continued */
711 SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end)
713 SYM_DATA_START(boot_idt_desc)
714 .word boot_idt_end - boot_idt - 1
716 SYM_DATA_END(boot_idt_desc)
718 SYM_DATA_START(boot_idt)
719 .rept BOOT_IDT_ENTRIES
723 SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end)
725 #ifdef CONFIG_EFI_STUB
726 SYM_DATA(image_offset, .long 0)
728 #ifdef CONFIG_EFI_MIXED
729 SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0)
730 SYM_DATA(efi_is64, .byte 1)
732 #define ST32_boottime 60 // offsetof(efi_system_table_32_t, boottime)
733 #define BS32_handle_protocol 88 // offsetof(efi_boot_services_32_t, handle_protocol)
734 #define LI32_image_base 32 // offsetof(efi_loaded_image_32_t, image_base)
738 SYM_FUNC_START(efi32_pe_entry)
740 * efi_status_t efi32_pe_entry(efi_handle_t image_handle,
741 * efi_system_table_32_t *sys_table)
746 pushl %eax // dummy push to allocate loaded_image
748 pushl %ebx // save callee-save registers
751 call verify_cpu // check for long mode support
753 movl $0x80000003, %eax // EFI_UNSUPPORTED
760 /* Get the loaded image protocol pointer from the image handle */
762 pushl %eax // &loaded_image
763 leal loaded_image_proto(%ebx), %eax
764 pushl %eax // pass the GUID address
765 pushl 8(%ebp) // pass the image handle
768 * Note the alignment of the stack frame.
770 * handle <-- 16-byte aligned on entry by ABI
773 * loaded_image <-- local variable
774 * saved %ebx <-- 16-byte aligned here
777 * &loaded_image_proto
778 * handle <-- 16-byte aligned for call to handle_protocol
781 movl 12(%ebp), %eax // sys_table
782 movl ST32_boottime(%eax), %eax // sys_table->boottime
783 call *BS32_handle_protocol(%eax) // sys_table->boottime->handle_protocol
784 addl $12, %esp // restore argument space
788 movl 8(%ebp), %ecx // image_handle
789 movl 12(%ebp), %edx // sys_table
790 movl -4(%ebp), %esi // loaded_image
791 movl LI32_image_base(%esi), %esi // loaded_image->image_base
792 movl %ebx, %ebp // startup_32 for efi32_pe_stub_entry
794 * We need to set the image_offset variable here since startup_32() will
795 * use it before we get to the 64-bit efi_pe_entry() in C code.
798 movl %ebx, image_offset(%ebp) // save image_offset
799 jmp efi32_pe_stub_entry
801 2: popl %edi // restore callee-save registers
805 SYM_FUNC_END(efi32_pe_entry)
808 /* EFI loaded image protocol GUID */
810 SYM_DATA_START_LOCAL(loaded_image_proto)
813 .byte 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b
814 SYM_DATA_END(loaded_image_proto)
818 * Stack and heap for uncompression
822 SYM_DATA_LOCAL(boot_heap, .fill BOOT_HEAP_SIZE, 1, 0)
824 SYM_DATA_START_LOCAL(boot_stack)
825 .fill BOOT_STACK_SIZE, 1, 0
827 SYM_DATA_END_LABEL(boot_stack, SYM_L_LOCAL, boot_stack_end)
830 * Space for page tables (not in .bss so not zeroed)
832 .section ".pgtable","aw",@nobits
834 SYM_DATA_LOCAL(pgtable, .fill BOOT_PGT_SIZE, 1, 0)
837 * The page table is going to be used instead of page table in the trampoline
840 SYM_DATA_LOCAL(top_pgtable, .fill PAGE_SIZE, 1, 0)