1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Low-level CPU initialisation
4 * Based on arch/arm/kernel/head.S
6 * Copyright (C) 1994-2002 Russell King
7 * Copyright (C) 2003-2012 ARM Ltd.
8 * Authors: Catalin Marinas <catalin.marinas@arm.com>
9 * Will Deacon <will.deacon@arm.com>
12 #include <linux/linkage.h>
13 #include <linux/init.h>
14 #include <linux/irqchip/arm-gic-v3.h>
16 #include <asm/asm_pointer_auth.h>
17 #include <asm/assembler.h>
19 #include <asm/ptrace.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/cache.h>
22 #include <asm/cputype.h>
24 #include <asm/image.h>
25 #include <asm/kernel-pgtable.h>
26 #include <asm/kvm_arm.h>
27 #include <asm/memory.h>
28 #include <asm/pgtable-hwdef.h>
29 #include <asm/pgtable.h>
33 #include <asm/sysreg.h>
34 #include <asm/thread_info.h>
37 #include "efi-header.S"
39 #define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET)
41 #if (TEXT_OFFSET & 0xfff) != 0
42 #error TEXT_OFFSET must be at least 4KB aligned
43 #elif (PAGE_OFFSET & 0x1fffff) != 0
44 #error PAGE_OFFSET must be at least 2MB aligned
45 #elif TEXT_OFFSET > 0x1fffff
46 #error TEXT_OFFSET must be less than 2MB
50 * Kernel startup entry point.
51 * ---------------------------
53 * The requirements are:
54 * MMU = off, D-cache = off, I-cache = on or off,
55 * x0 = physical address to the FDT blob.
57 * This code is mostly position independent so you call this at
58 * __pa(PAGE_OFFSET + TEXT_OFFSET).
60 * Note that the callee-saved registers are used for storing variables
61 * that are useful before the MMU is enabled. The allocations are described
62 * in the entry routines.
67 * DO NOT MODIFY. Image header expected by Linux boot-loaders.
71 * This add instruction has no meaningful effect except that
72 * its opcode forms the magic "MZ" signature required by UEFI.
77 b primary_entry // branch to kernel start, magic
80 le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian
81 le64sym _kernel_size_le // Effective size of kernel image, little-endian
82 le64sym _kernel_flags_le // Informative flags, little-endian
86 .ascii ARM64_IMAGE_MAGIC // Magic number
88 .long pe_header - _head // Offset to the PE header.
99 * The following callee saved general purpose registers are used on the
100 * primary lowlevel boot path:
102 * Register Scope Purpose
103 * x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0
104 * x23 primary_entry() .. start_kernel() physical misalignment/KASLR offset
105 * x28 __create_page_tables() callee preserved temp register
106 * x19/x20 __primary_switch() callee preserved temp registers
107 * x24 __primary_switch() .. relocate_kernel() current RELR displacement
109 SYM_CODE_START(primary_entry)
110 bl preserve_boot_args
111 bl el2_setup // Drop to EL1, w0=cpu_boot_mode
112 adrp x23, __PHYS_OFFSET
113 and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0
114 bl set_cpu_boot_mode_flag
115 bl __create_page_tables
117 * The following calls CPU setup code, see arch/arm64/mm/proc.S for
119 * On return, the CPU will be ready for the MMU to be turned on and
120 * the TCR will have been set.
122 bl __cpu_setup // initialise processor
124 SYM_CODE_END(primary_entry)
127 * Preserve the arguments passed by the bootloader in x0 .. x3
129 SYM_CODE_START_LOCAL(preserve_boot_args)
130 mov x21, x0 // x21=FDT
132 adr_l x0, boot_args // record the contents of
133 stp x21, x1, [x0] // x0 .. x3 at kernel entry
134 stp x2, x3, [x0, #16]
136 dmb sy // needed before dc ivac with
139 mov x1, #0x20 // 4 x 8 bytes
140 b __inval_dcache_area // tail call
141 SYM_CODE_END(preserve_boot_args)
144 * Macro to create a table entry to the next page.
146 * tbl: page table address
147 * virt: virtual address
148 * shift: #imm page table shift
149 * ptrs: #imm pointers per table page
152 * Corrupts: ptrs, tmp1, tmp2
153 * Returns: tbl -> next level table page address
155 .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
156 add \tmp1, \tbl, #PAGE_SIZE
157 phys_to_pte \tmp2, \tmp1
158 orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
159 lsr \tmp1, \virt, #\shift
161 and \tmp1, \tmp1, \ptrs // table index
162 str \tmp2, [\tbl, \tmp1, lsl #3]
163 add \tbl, \tbl, #PAGE_SIZE // next level table page
167 * Macro to populate page table entries, these entries can be pointers to the next level
168 * or last level entries pointing to physical memory.
170 * tbl: page table address
171 * rtbl: pointer to page table or physical memory
172 * index: start index to write
173 * eindex: end index to write - [index, eindex] written to
174 * flags: flags for pagetable entry to or in
175 * inc: increment to rtbl between each entry
176 * tmp1: temporary variable
178 * Preserves: tbl, eindex, flags, inc
179 * Corrupts: index, tmp1
182 .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1
183 .Lpe\@: phys_to_pte \tmp1, \rtbl
184 orr \tmp1, \tmp1, \flags // tmp1 = table entry
185 str \tmp1, [\tbl, \index, lsl #3]
186 add \rtbl, \rtbl, \inc // rtbl = pa next level
187 add \index, \index, #1
193 * Compute indices of table entries from virtual address range. If multiple entries
194 * were needed in the previous page table level then the next page table level is assumed
195 * to be composed of multiple pages. (This effectively scales the end index).
197 * vstart: virtual address of start of range
198 * vend: virtual address of end of range
199 * shift: shift used to transform virtual address into index
200 * ptrs: number of entries in page table
201 * istart: index in table corresponding to vstart
202 * iend: index in table corresponding to vend
203 * count: On entry: how many extra entries were required in previous level, scales
205 * On exit: returns how many extra entries required for next page table level
207 * Preserves: vstart, vend, shift, ptrs
208 * Returns: istart, iend, count
210 .macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count
211 lsr \iend, \vend, \shift
213 sub \istart, \istart, #1
214 and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1)
216 mul \istart, \istart, \count
217 add \iend, \iend, \istart // iend += (count - 1) * ptrs
218 // our entries span multiple tables
220 lsr \istart, \vstart, \shift
222 sub \count, \count, #1
223 and \istart, \istart, \count
225 sub \count, \iend, \istart
229 * Map memory for specified virtual address range. Each level of page table needed supports
230 * multiple entries. If a level requires n entries the next page table level is assumed to be
231 * formed from n pages.
233 * tbl: location of page table
234 * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE)
235 * vstart: start address to map
236 * vend: end address to map - we map [vstart, vend]
237 * flags: flags to use to map last level entries
238 * phys: physical address corresponding to vstart - physical memory is contiguous
239 * pgds: the number of pgd entries
241 * Temporaries: istart, iend, tmp, count, sv - these need to be different registers
242 * Preserves: vstart, vend, flags
243 * Corrupts: tbl, rtbl, istart, iend, tmp, count, sv
245 .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
246 add \rtbl, \tbl, #PAGE_SIZE
249 compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count
250 populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
254 #if SWAPPER_PGTABLE_LEVELS > 3
255 compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count
256 populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
261 #if SWAPPER_PGTABLE_LEVELS > 2
262 compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count
263 populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
267 compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count
268 bic \count, \phys, #SWAPPER_BLOCK_SIZE - 1
269 populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
273 * Setup the initial page tables. We only setup the barest amount which is
274 * required to get the kernel running. The following sections are required:
275 * - identity mapping to enable the MMU (low address, TTBR0)
276 * - first few MB of the kernel linear mapping to jump to once the MMU has
279 SYM_FUNC_START_LOCAL(__create_page_tables)
283 * Invalidate the init page tables to avoid potential dirty cache lines
284 * being evicted. Other page tables are allocated in rodata as part of
285 * the kernel image, and thus are clean to the PoC per the boot
291 bl __inval_dcache_area
294 * Clear the init page tables.
299 1: stp xzr, xzr, [x0], #16
300 stp xzr, xzr, [x0], #16
301 stp xzr, xzr, [x0], #16
302 stp xzr, xzr, [x0], #16
306 mov x7, SWAPPER_MM_MMUFLAGS
309 * Create the identity mapping.
311 adrp x0, idmap_pg_dir
312 adrp x3, __idmap_text_start // __pa(__idmap_text_start)
314 #ifdef CONFIG_ARM64_VA_BITS_52
315 mrs_s x6, SYS_ID_AA64MMFR2_EL1
316 and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
322 adr_l x6, vabits_actual
325 dc ivac, x6 // Invalidate potentially stale cache line
328 * VA_BITS may be too small to allow for an ID mapping to be created
329 * that covers system RAM if that is located sufficiently high in the
330 * physical address space. So for the ID map, use an extended virtual
331 * range in that case, and configure an additional translation level
334 * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
335 * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
336 * this number conveniently equals the number of leading zeroes in
337 * the physical address of __idmap_text_end.
339 adrp x5, __idmap_text_end
341 cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
342 b.ge 1f // .. then skip VA range extension
347 dc ivac, x6 // Invalidate potentially stale cache line
350 #define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
351 #define EXTRA_PTRS (1 << (PHYS_MASK_SHIFT - EXTRA_SHIFT))
354 * If VA_BITS < 48, we have to configure an additional table level.
355 * First, we have to verify our assumption that the current value of
356 * VA_BITS was chosen such that all translation levels are fully
357 * utilised, and that lowering T0SZ will always result in an additional
358 * translation level to be configured.
360 #if VA_BITS != EXTRA_SHIFT
361 #error "Mismatch between VA_BITS and page size/number of translation levels"
365 create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6
368 * If VA_BITS == 48, we don't have to configure an additional
369 * translation level, but the top-level table has more entries.
371 mov x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT)
372 str_l x4, idmap_ptrs_per_pgd, x5
375 ldr_l x4, idmap_ptrs_per_pgd
376 mov x5, x3 // __pa(__idmap_text_start)
377 adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
379 map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
382 * Map the kernel image (starting with PHYS_OFFSET).
385 mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text)
386 add x5, x5, x23 // add KASLR displacement
388 adrp x6, _end // runtime __pa(_end)
389 adrp x3, _text // runtime __pa(_text)
390 sub x6, x6, x3 // _end - _text
391 add x6, x6, x5 // runtime __va(_end)
393 map_memory x0, x1, x5, x6, x7, x3, x4, x10, x11, x12, x13, x14
396 * Since the page tables have been populated with non-cacheable
397 * accesses (MMU disabled), invalidate those tables again to
398 * remove any speculatively loaded cache lines.
402 adrp x0, idmap_pg_dir
403 adrp x1, idmap_pg_end
405 bl __inval_dcache_area
410 bl __inval_dcache_area
413 SYM_FUNC_END(__create_page_tables)
416 * The following fragment of code is executed with the MMU enabled.
420 SYM_FUNC_START_LOCAL(__primary_switched)
421 adrp x4, init_thread_union
422 add sp, x4, #THREAD_SIZE
424 msr sp_el0, x5 // Save thread_info
426 #ifdef CONFIG_ARM64_PTR_AUTH
427 __ptrauth_keys_init_cpu x5, x6, x7, x8
430 adr_l x8, vectors // load VBAR_EL1 with virtual
431 msr vbar_el1, x8 // vector table address
434 stp xzr, x30, [sp, #-16]!
437 #ifdef CONFIG_SHADOW_CALL_STACK
438 adr_l scs_sp, init_shadow_call_stack // Set shadow call stack
441 str_l x21, __fdt_pointer, x5 // Save FDT pointer
443 ldr_l x4, kimage_vaddr // Save the offset between
444 sub x4, x4, x0 // the kernel virtual and
445 str_l x4, kimage_voffset, x5 // physical mappings
448 adr_l x0, __bss_start
453 dsb ishst // Make zero page visible to PTW
458 #ifdef CONFIG_RANDOMIZE_BASE
459 tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
461 mov x0, x21 // pass FDT address in x0
462 bl kaslr_early_init // parse FDT for KASLR options
463 cbz x0, 0f // KASLR disabled? just proceed
464 orr x23, x23, x0 // record KASLR offset
465 ldp x29, x30, [sp], #16 // we must enable KASLR, return
466 ret // to __primary_switch()
473 SYM_FUNC_END(__primary_switched)
475 .pushsection ".rodata", "a"
476 SYM_DATA_START(kimage_vaddr)
477 .quad _text - TEXT_OFFSET
478 SYM_DATA_END(kimage_vaddr)
479 EXPORT_SYMBOL(kimage_vaddr)
483 * end early head section, begin head code that is also used for
484 * hotplug and needs to have the same protections as the text region
486 .section ".idmap.text","awx"
489 * If we're fortunate enough to boot at EL2, ensure that the world is
490 * sane before dropping to EL1.
492 * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if
493 * booted in EL1 or EL2 respectively.
495 SYM_FUNC_START(el2_setup)
496 msr SPsel, #1 // We want to use SP_EL{1,2}
498 cmp x0, #CurrentEL_EL2
500 mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
502 mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
506 1: mov_q x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
509 #ifdef CONFIG_ARM64_VHE
511 * Check for VHE being present. For the rest of the EL2 setup,
512 * x2 being non-zero indicates that we do have VHE, and that the
513 * kernel is intended to run at EL2.
515 mrs x2, id_aa64mmfr1_el1
516 ubfx x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4
521 /* Hyp configuration. */
522 mov_q x0, HCR_HOST_NVHE_FLAGS
524 mov_q x0, HCR_HOST_VHE_FLAGS
530 * Allow Non-secure EL1 and EL0 to access physical timer and counter.
531 * This is not necessary for VHE, since the host kernel runs in EL2,
532 * and EL0 accesses are configured in the later stage of boot process.
533 * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout
534 * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined
535 * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1
536 * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
541 orr x0, x0, #3 // Enable EL1 physical timers
544 msr cntvoff_el2, xzr // Clear virtual offset
546 #ifdef CONFIG_ARM_GIC_V3
547 /* GICv3 system register access */
548 mrs x0, id_aa64pfr0_el1
549 ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4
552 mrs_s x0, SYS_ICC_SRE_EL2
553 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
554 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1
555 msr_s SYS_ICC_SRE_EL2, x0
556 isb // Make sure SRE is now set
557 mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back,
558 tbz x0, #0, 3f // and check that it sticks
559 msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
564 /* Populate ID registers. */
571 msr hstr_el2, xzr // Disable CP15 traps to EL2
575 mrs x1, id_aa64dfr0_el1
576 sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
578 b.lt 4f // Skip if no PMU present
579 mrs x0, pmcr_el0 // Disable debug access traps
580 ubfx x0, x0, #11, #5 // to EL2 and allow access to
582 csel x3, xzr, x0, lt // all PMU counters from EL1
584 /* Statistical profiling */
585 ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
586 cbz x0, 7f // Skip if SPE not present
588 mrs_s x4, SYS_PMBIDR_EL1 // If SPE available at EL2,
589 and x4, x4, #(1 << SYS_PMBIDR_EL1_P_SHIFT)
590 cbnz x4, 5f // then permit sampling of physical
591 mov x4, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \
592 1 << SYS_PMSCR_EL2_PA_SHIFT)
593 msr_s SYS_PMSCR_EL2, x4 // addresses and physical counter
595 mov x1, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
596 orr x3, x3, x1 // If we don't have VHE, then
597 b 7f // use EL1&0 translation.
598 6: // For VHE, use EL2 translation
599 orr x3, x3, #MDCR_EL2_TPMS // and disable access from EL1
601 msr mdcr_el2, x3 // Configure debug traps
604 mrs x1, id_aa64mmfr1_el1
605 ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4
607 msr_s SYS_LORC_EL1, xzr
610 /* Stage-2 translation */
613 cbz x2, install_el2_stub
615 mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
619 SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL)
621 * When VHE is not in use, early init of EL2 and EL1 needs to be
623 * When VHE _is_ in use, EL1 will not be used in the host and
624 * requires no configuration, and all non-hyp-specific EL2 setup
625 * will be done via the _EL1 system register aliases in __cpu_setup.
627 mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
630 /* Coprocessor traps. */
632 msr cptr_el2, x0 // Disable copro. traps to EL2
634 /* SVE register access */
635 mrs x1, id_aa64pfr0_el1
636 ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4
639 bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps
640 msr cptr_el2, x0 // Disable copro. traps to EL2
642 mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector
643 msr_s SYS_ZCR_EL2, x1 // length for EL1.
645 /* Hypervisor stub */
646 7: adr_l x0, __hyp_stub_vectors
650 mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
654 mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
656 SYM_FUNC_END(el2_setup)
659 * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
660 * in w0. See arch/arm64/include/asm/virt.h for more info.
662 SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
663 adr_l x1, __boot_cpu_mode
664 cmp w0, #BOOT_CPU_MODE_EL2
667 1: str w0, [x1] // This CPU has booted in EL1
669 dc ivac, x1 // Invalidate potentially stale cache line
671 SYM_FUNC_END(set_cpu_boot_mode_flag)
674 * These values are written with the MMU off, but read with the MMU on.
675 * Writers will invalidate the corresponding address, discarding up to a
676 * 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures
677 * sufficient alignment that the CWG doesn't overlap another section.
679 .pushsection ".mmuoff.data.write", "aw"
681 * We need to find out the CPU boot mode long after boot, so we need to
682 * store it in a writable variable.
684 * This is not in .bss, because we set it sufficiently early that the boot-time
685 * zeroing of .bss would clobber it.
687 SYM_DATA_START(__boot_cpu_mode)
688 .long BOOT_CPU_MODE_EL2
689 .long BOOT_CPU_MODE_EL1
690 SYM_DATA_END(__boot_cpu_mode)
692 * The booting CPU updates the failed status @__early_cpu_boot_status,
693 * with MMU turned off.
695 SYM_DATA_START(__early_cpu_boot_status)
697 SYM_DATA_END(__early_cpu_boot_status)
702 * This provides a "holding pen" for platforms to hold all secondary
703 * cores are held until we're ready for them to initialise.
705 SYM_FUNC_START(secondary_holding_pen)
706 bl el2_setup // Drop to EL1, w0=cpu_boot_mode
707 bl set_cpu_boot_mode_flag
709 mov_q x1, MPIDR_HWID_BITMASK
711 adr_l x3, secondary_holding_pen_release
714 b.eq secondary_startup
717 SYM_FUNC_END(secondary_holding_pen)
720 * Secondary entry point that jumps straight into the kernel. Only to
721 * be used where CPUs are brought online dynamically by the kernel.
723 SYM_FUNC_START(secondary_entry)
724 bl el2_setup // Drop to EL1
725 bl set_cpu_boot_mode_flag
727 SYM_FUNC_END(secondary_entry)
729 SYM_FUNC_START_LOCAL(secondary_startup)
731 * Common entry point for secondary CPUs.
733 bl __cpu_secondary_check52bitva
734 bl __cpu_setup // initialise processor
735 adrp x1, swapper_pg_dir
737 ldr x8, =__secondary_switched
739 SYM_FUNC_END(secondary_startup)
741 SYM_FUNC_START_LOCAL(__secondary_switched)
746 adr_l x0, secondary_data
747 ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
748 cbz x1, __secondary_too_slow
750 ldr x2, [x0, #CPU_BOOT_TASK]
751 cbz x2, __secondary_too_slow
757 #ifdef CONFIG_ARM64_PTR_AUTH
758 ptrauth_keys_init_cpu x2, x3, x4, x5
761 b secondary_start_kernel
762 SYM_FUNC_END(__secondary_switched)
764 SYM_FUNC_START_LOCAL(__secondary_too_slow)
767 b __secondary_too_slow
768 SYM_FUNC_END(__secondary_too_slow)
771 * The booting CPU updates the failed status @__early_cpu_boot_status,
772 * with MMU turned off.
774 * update_early_cpu_boot_status tmp, status
775 * - Corrupts tmp1, tmp2
776 * - Writes 'status' to __early_cpu_boot_status and makes sure
777 * it is committed to memory.
780 .macro update_early_cpu_boot_status status, tmp1, tmp2
782 adr_l \tmp1, __early_cpu_boot_status
785 dc ivac, \tmp1 // Invalidate potentially stale cache line
791 * x0 = SCTLR_EL1 value for turning on the MMU.
792 * x1 = TTBR1_EL1 value
794 * Returns to the caller via x30/lr. This requires the caller to be covered
795 * by the .idmap.text section.
797 * Checks if the selected granule size is supported by the CPU.
798 * If it isn't, park the CPU
800 SYM_FUNC_START(__enable_mmu)
801 mrs x2, ID_AA64MMFR0_EL1
802 ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
803 cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
804 b.ne __no_granule_support
805 update_early_cpu_boot_status 0, x2, x3
806 adrp x2, idmap_pg_dir
809 msr ttbr0_el1, x2 // load TTBR0
811 msr ttbr1_el1, x1 // load TTBR1
816 * Invalidate the local I-cache so that any instructions fetched
817 * speculatively from the PoC are discarded, since they may have
818 * been dynamically patched at the PoU.
824 SYM_FUNC_END(__enable_mmu)
826 SYM_FUNC_START(__cpu_secondary_check52bitva)
827 #ifdef CONFIG_ARM64_VA_BITS_52
828 ldr_l x0, vabits_actual
832 mrs_s x0, SYS_ID_AA64MMFR2_EL1
833 and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
836 update_early_cpu_boot_status \
837 CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1
844 SYM_FUNC_END(__cpu_secondary_check52bitva)
846 SYM_FUNC_START_LOCAL(__no_granule_support)
847 /* Indicate that this CPU can't boot and is stuck in the kernel */
848 update_early_cpu_boot_status \
849 CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2
854 SYM_FUNC_END(__no_granule_support)
856 #ifdef CONFIG_RELOCATABLE
857 SYM_FUNC_START_LOCAL(__relocate_kernel)
859 * Iterate over each entry in the relocation table, and apply the
860 * relocations in place.
862 ldr w9, =__rela_offset // offset to reloc table
863 ldr w10, =__rela_size // size of reloc table
865 mov_q x11, KIMAGE_VADDR // default virtual offset
866 add x11, x11, x23 // actual virtual offset
867 add x9, x9, x11 // __va(.rela)
868 add x10, x9, x10 // __va(.rela) + sizeof(.rela)
872 ldp x12, x13, [x9], #24
874 cmp w13, #R_AARCH64_RELATIVE
876 add x14, x14, x23 // relocate
883 * Apply RELR relocations.
885 * RELR is a compressed format for storing relative relocations. The
886 * encoded sequence of entries looks like:
887 * [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ]
889 * i.e. start with an address, followed by any number of bitmaps. The
890 * address entry encodes 1 relocation. The subsequent bitmap entries
891 * encode up to 63 relocations each, at subsequent offsets following
892 * the last address entry.
894 * The bitmap entries must have 1 in the least significant bit. The
895 * assumption here is that an address cannot have 1 in lsb. Odd
896 * addresses are not supported. Any odd addresses are stored in the RELA
897 * section, which is handled above.
899 * Excluding the least significant bit in the bitmap, each non-zero
900 * bit in the bitmap represents a relocation to be applied to
901 * a corresponding machine word that follows the base address
902 * word. The second least significant bit represents the machine
903 * word immediately following the initial address, and each bit
904 * that follows represents the next word, in linear order. As such,
905 * a single bitmap can encode up to 63 relocations in a 64-bit object.
907 * In this implementation we store the address of the next RELR table
908 * entry in x9, the address being relocated by the current address or
909 * bitmap entry in x13 and the address being relocated by the current
912 * Because addends are stored in place in the binary, RELR relocations
913 * cannot be applied idempotently. We use x24 to keep track of the
914 * currently applied displacement so that we can correctly relocate if
915 * __relocate_kernel is called twice with non-zero displacements (i.e.
916 * if there is both a physical misalignment and a KASLR displacement).
918 ldr w9, =__relr_offset // offset to reloc table
919 ldr w10, =__relr_size // size of reloc table
920 add x9, x9, x11 // __va(.relr)
921 add x10, x9, x10 // __va(.relr) + sizeof(.relr)
923 sub x15, x23, x24 // delta from previous offset
924 cbz x15, 7f // nothing to do if unchanged
925 mov x24, x23 // save new offset
930 tbnz x11, #0, 3f // branch to handle bitmaps
932 ldr x12, [x13] // relocate address entry
934 str x12, [x13], #8 // adjust to start of bitmap
940 tbz x11, #0, 5f // skip bit if not set
941 ldr x12, [x14] // relocate bit
945 5: add x14, x14, #8 // move to next bit's address
949 * Move to the next bitmap's address. 8 is the word size, and 63 is the
950 * number of significant bits in a bitmap entry.
952 add x13, x13, #(8 * 63)
959 SYM_FUNC_END(__relocate_kernel)
962 SYM_FUNC_START_LOCAL(__primary_switch)
963 #ifdef CONFIG_RANDOMIZE_BASE
964 mov x19, x0 // preserve new SCTLR_EL1 value
965 mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value
970 #ifdef CONFIG_RELOCATABLE
972 mov x24, #0 // no RELR displacement yet
975 #ifdef CONFIG_RANDOMIZE_BASE
976 ldr x8, =__primary_switched
977 adrp x0, __PHYS_OFFSET
981 * If we return here, we have a KASLR displacement in x23 which we need
982 * to take into account by discarding the current kernel mapping and
983 * creating a new one.
985 pre_disable_mmu_workaround
986 msr sctlr_el1, x20 // disable the MMU
988 bl __create_page_tables // recreate kernel mapping
990 tlbi vmalle1 // Remove any stale TLB entries
993 msr sctlr_el1, x19 // re-enable the MMU
995 ic iallu // flush instructions fetched
996 dsb nsh // via old mapping
1002 ldr x8, =__primary_switched
1003 adrp x0, __PHYS_OFFSET
1005 SYM_FUNC_END(__primary_switch)