1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Low-level CPU initialisation
4 * Based on arch/arm/kernel/head.S
6 * Copyright (C) 1994-2002 Russell King
7 * Copyright (C) 2003-2012 ARM Ltd.
8 * Authors: Catalin Marinas <catalin.marinas@arm.com>
9 * Will Deacon <will.deacon@arm.com>
12 #include <linux/linkage.h>
13 #include <linux/init.h>
14 #include <linux/irqchip/arm-gic-v3.h>
15 #include <linux/pgtable.h>
17 #include <asm/asm_pointer_auth.h>
18 #include <asm/assembler.h>
20 #include <asm/ptrace.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/cache.h>
23 #include <asm/cputype.h>
25 #include <asm/image.h>
26 #include <asm/kernel-pgtable.h>
27 #include <asm/kvm_arm.h>
28 #include <asm/memory.h>
29 #include <asm/pgtable-hwdef.h>
33 #include <asm/sysreg.h>
34 #include <asm/thread_info.h>
37 #include "efi-header.S"
39 #define __PHYS_OFFSET KERNEL_START
41 #if (PAGE_OFFSET & 0x1fffff) != 0
42 #error PAGE_OFFSET must be at least 2MB aligned
46 * Kernel startup entry point.
47 * ---------------------------
49 * The requirements are:
50 * MMU = off, D-cache = off, I-cache = on or off,
51 * x0 = physical address to the FDT blob.
53 * This code is mostly position independent so you call this at
56 * Note that the callee-saved registers are used for storing variables
57 * that are useful before the MMU is enabled. The allocations are described
58 * in the entry routines.
63 * DO NOT MODIFY. Image header expected by Linux boot-loaders.
67 * This add instruction has no meaningful effect except that
68 * its opcode forms the magic "MZ" signature required by UEFI.
73 b primary_entry // branch to kernel start, magic
76 .quad 0 // Image load offset from start of RAM, little-endian
77 le64sym _kernel_size_le // Effective size of kernel image, little-endian
78 le64sym _kernel_flags_le // Informative flags, little-endian
82 .ascii ARM64_IMAGE_MAGIC // Magic number
84 .long pe_header - _head // Offset to the PE header.
95 * The following callee saved general purpose registers are used on the
96 * primary lowlevel boot path:
98 * Register Scope Purpose
99 * x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0
100 * x23 primary_entry() .. start_kernel() physical misalignment/KASLR offset
101 * x28 __create_page_tables() callee preserved temp register
102 * x19/x20 __primary_switch() callee preserved temp registers
103 * x24 __primary_switch() .. relocate_kernel() current RELR displacement
105 SYM_CODE_START(primary_entry)
106 bl preserve_boot_args
107 bl el2_setup // Drop to EL1, w0=cpu_boot_mode
108 adrp x23, __PHYS_OFFSET
109 and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0
110 bl set_cpu_boot_mode_flag
111 bl __create_page_tables
113 * The following calls CPU setup code, see arch/arm64/mm/proc.S for
115 * On return, the CPU will be ready for the MMU to be turned on and
116 * the TCR will have been set.
118 bl __cpu_setup // initialise processor
120 SYM_CODE_END(primary_entry)
123 * Preserve the arguments passed by the bootloader in x0 .. x3
125 SYM_CODE_START_LOCAL(preserve_boot_args)
126 mov x21, x0 // x21=FDT
128 adr_l x0, boot_args // record the contents of
129 stp x21, x1, [x0] // x0 .. x3 at kernel entry
130 stp x2, x3, [x0, #16]
132 dmb sy // needed before dc ivac with
135 mov x1, #0x20 // 4 x 8 bytes
136 b __inval_dcache_area // tail call
137 SYM_CODE_END(preserve_boot_args)
140 * Macro to create a table entry to the next page.
142 * tbl: page table address
143 * virt: virtual address
144 * shift: #imm page table shift
145 * ptrs: #imm pointers per table page
148 * Corrupts: ptrs, tmp1, tmp2
149 * Returns: tbl -> next level table page address
151 .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
152 add \tmp1, \tbl, #PAGE_SIZE
153 phys_to_pte \tmp2, \tmp1
154 orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
155 lsr \tmp1, \virt, #\shift
157 and \tmp1, \tmp1, \ptrs // table index
158 str \tmp2, [\tbl, \tmp1, lsl #3]
159 add \tbl, \tbl, #PAGE_SIZE // next level table page
163 * Macro to populate page table entries, these entries can be pointers to the next level
164 * or last level entries pointing to physical memory.
166 * tbl: page table address
167 * rtbl: pointer to page table or physical memory
168 * index: start index to write
169 * eindex: end index to write - [index, eindex] written to
170 * flags: flags for pagetable entry to or in
171 * inc: increment to rtbl between each entry
172 * tmp1: temporary variable
174 * Preserves: tbl, eindex, flags, inc
175 * Corrupts: index, tmp1
178 .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1
179 .Lpe\@: phys_to_pte \tmp1, \rtbl
180 orr \tmp1, \tmp1, \flags // tmp1 = table entry
181 str \tmp1, [\tbl, \index, lsl #3]
182 add \rtbl, \rtbl, \inc // rtbl = pa next level
183 add \index, \index, #1
189 * Compute indices of table entries from virtual address range. If multiple entries
190 * were needed in the previous page table level then the next page table level is assumed
191 * to be composed of multiple pages. (This effectively scales the end index).
193 * vstart: virtual address of start of range
194 * vend: virtual address of end of range
195 * shift: shift used to transform virtual address into index
196 * ptrs: number of entries in page table
197 * istart: index in table corresponding to vstart
198 * iend: index in table corresponding to vend
199 * count: On entry: how many extra entries were required in previous level, scales
201 * On exit: returns how many extra entries required for next page table level
203 * Preserves: vstart, vend, shift, ptrs
204 * Returns: istart, iend, count
206 .macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count
207 lsr \iend, \vend, \shift
209 sub \istart, \istart, #1
210 and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1)
212 mul \istart, \istart, \count
213 add \iend, \iend, \istart // iend += (count - 1) * ptrs
214 // our entries span multiple tables
216 lsr \istart, \vstart, \shift
218 sub \count, \count, #1
219 and \istart, \istart, \count
221 sub \count, \iend, \istart
225 * Map memory for specified virtual address range. Each level of page table needed supports
226 * multiple entries. If a level requires n entries the next page table level is assumed to be
227 * formed from n pages.
229 * tbl: location of page table
230 * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE)
231 * vstart: start address to map
232 * vend: end address to map - we map [vstart, vend]
233 * flags: flags to use to map last level entries
234 * phys: physical address corresponding to vstart - physical memory is contiguous
235 * pgds: the number of pgd entries
237 * Temporaries: istart, iend, tmp, count, sv - these need to be different registers
238 * Preserves: vstart, vend, flags
239 * Corrupts: tbl, rtbl, istart, iend, tmp, count, sv
241 .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
242 add \rtbl, \tbl, #PAGE_SIZE
245 compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count
246 populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
250 #if SWAPPER_PGTABLE_LEVELS > 3
251 compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count
252 populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
257 #if SWAPPER_PGTABLE_LEVELS > 2
258 compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count
259 populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
263 compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count
264 bic \count, \phys, #SWAPPER_BLOCK_SIZE - 1
265 populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
269 * Setup the initial page tables. We only setup the barest amount which is
270 * required to get the kernel running. The following sections are required:
271 * - identity mapping to enable the MMU (low address, TTBR0)
272 * - first few MB of the kernel linear mapping to jump to once the MMU has
275 SYM_FUNC_START_LOCAL(__create_page_tables)
279 * Invalidate the init page tables to avoid potential dirty cache lines
280 * being evicted. Other page tables are allocated in rodata as part of
281 * the kernel image, and thus are clean to the PoC per the boot
287 bl __inval_dcache_area
290 * Clear the init page tables.
295 1: stp xzr, xzr, [x0], #16
296 stp xzr, xzr, [x0], #16
297 stp xzr, xzr, [x0], #16
298 stp xzr, xzr, [x0], #16
302 mov x7, SWAPPER_MM_MMUFLAGS
305 * Create the identity mapping.
307 adrp x0, idmap_pg_dir
308 adrp x3, __idmap_text_start // __pa(__idmap_text_start)
310 #ifdef CONFIG_ARM64_VA_BITS_52
311 mrs_s x6, SYS_ID_AA64MMFR2_EL1
312 and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
318 adr_l x6, vabits_actual
321 dc ivac, x6 // Invalidate potentially stale cache line
324 * VA_BITS may be too small to allow for an ID mapping to be created
325 * that covers system RAM if that is located sufficiently high in the
326 * physical address space. So for the ID map, use an extended virtual
327 * range in that case, and configure an additional translation level
330 * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
331 * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
332 * this number conveniently equals the number of leading zeroes in
333 * the physical address of __idmap_text_end.
335 adrp x5, __idmap_text_end
337 cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
338 b.ge 1f // .. then skip VA range extension
343 dc ivac, x6 // Invalidate potentially stale cache line
346 #define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
347 #define EXTRA_PTRS (1 << (PHYS_MASK_SHIFT - EXTRA_SHIFT))
350 * If VA_BITS < 48, we have to configure an additional table level.
351 * First, we have to verify our assumption that the current value of
352 * VA_BITS was chosen such that all translation levels are fully
353 * utilised, and that lowering T0SZ will always result in an additional
354 * translation level to be configured.
356 #if VA_BITS != EXTRA_SHIFT
357 #error "Mismatch between VA_BITS and page size/number of translation levels"
361 create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6
364 * If VA_BITS == 48, we don't have to configure an additional
365 * translation level, but the top-level table has more entries.
367 mov x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT)
368 str_l x4, idmap_ptrs_per_pgd, x5
371 ldr_l x4, idmap_ptrs_per_pgd
372 mov x5, x3 // __pa(__idmap_text_start)
373 adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
375 map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
378 * Map the kernel image (starting with PHYS_OFFSET).
381 mov_q x5, KIMAGE_VADDR // compile time __va(_text)
382 add x5, x5, x23 // add KASLR displacement
384 adrp x6, _end // runtime __pa(_end)
385 adrp x3, _text // runtime __pa(_text)
386 sub x6, x6, x3 // _end - _text
387 add x6, x6, x5 // runtime __va(_end)
389 map_memory x0, x1, x5, x6, x7, x3, x4, x10, x11, x12, x13, x14
392 * Since the page tables have been populated with non-cacheable
393 * accesses (MMU disabled), invalidate those tables again to
394 * remove any speculatively loaded cache lines.
398 adrp x0, idmap_pg_dir
399 adrp x1, idmap_pg_end
401 bl __inval_dcache_area
406 bl __inval_dcache_area
409 SYM_FUNC_END(__create_page_tables)
412 * The following fragment of code is executed with the MMU enabled.
416 SYM_FUNC_START_LOCAL(__primary_switched)
417 adrp x4, init_thread_union
418 add sp, x4, #THREAD_SIZE
420 msr sp_el0, x5 // Save thread_info
422 #ifdef CONFIG_ARM64_PTR_AUTH
423 __ptrauth_keys_init_cpu x5, x6, x7, x8
426 adr_l x8, vectors // load VBAR_EL1 with virtual
427 msr vbar_el1, x8 // vector table address
430 stp xzr, x30, [sp, #-16]!
433 #ifdef CONFIG_SHADOW_CALL_STACK
434 adr_l scs_sp, init_shadow_call_stack // Set shadow call stack
437 str_l x21, __fdt_pointer, x5 // Save FDT pointer
439 ldr_l x4, kimage_vaddr // Save the offset between
440 sub x4, x4, x0 // the kernel virtual and
441 str_l x4, kimage_voffset, x5 // physical mappings
444 adr_l x0, __bss_start
449 dsb ishst // Make zero page visible to PTW
454 #ifdef CONFIG_RANDOMIZE_BASE
455 tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
457 mov x0, x21 // pass FDT address in x0
458 bl kaslr_early_init // parse FDT for KASLR options
459 cbz x0, 0f // KASLR disabled? just proceed
460 orr x23, x23, x0 // record KASLR offset
461 ldp x29, x30, [sp], #16 // we must enable KASLR, return
462 ret // to __primary_switch()
469 SYM_FUNC_END(__primary_switched)
471 .pushsection ".rodata", "a"
472 SYM_DATA_START(kimage_vaddr)
474 SYM_DATA_END(kimage_vaddr)
475 EXPORT_SYMBOL(kimage_vaddr)
479 * end early head section, begin head code that is also used for
480 * hotplug and needs to have the same protections as the text region
482 .section ".idmap.text","awx"
485 * If we're fortunate enough to boot at EL2, ensure that the world is
486 * sane before dropping to EL1.
488 * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if
489 * booted in EL1 or EL2 respectively.
491 SYM_FUNC_START(el2_setup)
492 msr SPsel, #1 // We want to use SP_EL{1,2}
494 cmp x0, #CurrentEL_EL2
496 mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
498 mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
502 1: mov_q x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
505 #ifdef CONFIG_ARM64_VHE
507 * Check for VHE being present. For the rest of the EL2 setup,
508 * x2 being non-zero indicates that we do have VHE, and that the
509 * kernel is intended to run at EL2.
511 mrs x2, id_aa64mmfr1_el1
512 ubfx x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4
517 /* Hyp configuration. */
518 mov_q x0, HCR_HOST_NVHE_FLAGS
520 mov_q x0, HCR_HOST_VHE_FLAGS
526 * Allow Non-secure EL1 and EL0 to access physical timer and counter.
527 * This is not necessary for VHE, since the host kernel runs in EL2,
528 * and EL0 accesses are configured in the later stage of boot process.
529 * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout
530 * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined
531 * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1
532 * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
537 orr x0, x0, #3 // Enable EL1 physical timers
540 msr cntvoff_el2, xzr // Clear virtual offset
542 #ifdef CONFIG_ARM_GIC_V3
543 /* GICv3 system register access */
544 mrs x0, id_aa64pfr0_el1
545 ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4
548 mrs_s x0, SYS_ICC_SRE_EL2
549 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
550 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1
551 msr_s SYS_ICC_SRE_EL2, x0
552 isb // Make sure SRE is now set
553 mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back,
554 tbz x0, #0, 3f // and check that it sticks
555 msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
560 /* Populate ID registers. */
567 msr hstr_el2, xzr // Disable CP15 traps to EL2
571 mrs x1, id_aa64dfr0_el1
572 sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
574 b.lt 4f // Skip if no PMU present
575 mrs x0, pmcr_el0 // Disable debug access traps
576 ubfx x0, x0, #11, #5 // to EL2 and allow access to
578 csel x3, xzr, x0, lt // all PMU counters from EL1
580 /* Statistical profiling */
581 ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
582 cbz x0, 7f // Skip if SPE not present
584 mrs_s x4, SYS_PMBIDR_EL1 // If SPE available at EL2,
585 and x4, x4, #(1 << SYS_PMBIDR_EL1_P_SHIFT)
586 cbnz x4, 5f // then permit sampling of physical
587 mov x4, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \
588 1 << SYS_PMSCR_EL2_PA_SHIFT)
589 msr_s SYS_PMSCR_EL2, x4 // addresses and physical counter
591 mov x1, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
592 orr x3, x3, x1 // If we don't have VHE, then
593 b 7f // use EL1&0 translation.
594 6: // For VHE, use EL2 translation
595 orr x3, x3, #MDCR_EL2_TPMS // and disable access from EL1
597 msr mdcr_el2, x3 // Configure debug traps
600 mrs x1, id_aa64mmfr1_el1
601 ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4
603 msr_s SYS_LORC_EL1, xzr
606 /* Stage-2 translation */
609 cbz x2, install_el2_stub
611 mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
615 SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL)
617 * When VHE is not in use, early init of EL2 and EL1 needs to be
619 * When VHE _is_ in use, EL1 will not be used in the host and
620 * requires no configuration, and all non-hyp-specific EL2 setup
621 * will be done via the _EL1 system register aliases in __cpu_setup.
623 mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
626 /* Coprocessor traps. */
628 msr cptr_el2, x0 // Disable copro. traps to EL2
630 /* SVE register access */
631 mrs x1, id_aa64pfr0_el1
632 ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4
635 bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps
636 msr cptr_el2, x0 // Disable copro. traps to EL2
638 mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector
639 msr_s SYS_ZCR_EL2, x1 // length for EL1.
641 /* Hypervisor stub */
642 7: adr_l x0, __hyp_stub_vectors
646 mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
650 mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
652 SYM_FUNC_END(el2_setup)
655 * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
656 * in w0. See arch/arm64/include/asm/virt.h for more info.
658 SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
659 adr_l x1, __boot_cpu_mode
660 cmp w0, #BOOT_CPU_MODE_EL2
663 1: str w0, [x1] // This CPU has booted in EL1
665 dc ivac, x1 // Invalidate potentially stale cache line
667 SYM_FUNC_END(set_cpu_boot_mode_flag)
670 * These values are written with the MMU off, but read with the MMU on.
671 * Writers will invalidate the corresponding address, discarding up to a
672 * 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures
673 * sufficient alignment that the CWG doesn't overlap another section.
675 .pushsection ".mmuoff.data.write", "aw"
677 * We need to find out the CPU boot mode long after boot, so we need to
678 * store it in a writable variable.
680 * This is not in .bss, because we set it sufficiently early that the boot-time
681 * zeroing of .bss would clobber it.
683 SYM_DATA_START(__boot_cpu_mode)
684 .long BOOT_CPU_MODE_EL2
685 .long BOOT_CPU_MODE_EL1
686 SYM_DATA_END(__boot_cpu_mode)
688 * The booting CPU updates the failed status @__early_cpu_boot_status,
689 * with MMU turned off.
691 SYM_DATA_START(__early_cpu_boot_status)
693 SYM_DATA_END(__early_cpu_boot_status)
698 * This provides a "holding pen" for platforms to hold all secondary
699 * cores are held until we're ready for them to initialise.
701 SYM_FUNC_START(secondary_holding_pen)
702 bl el2_setup // Drop to EL1, w0=cpu_boot_mode
703 bl set_cpu_boot_mode_flag
705 mov_q x1, MPIDR_HWID_BITMASK
707 adr_l x3, secondary_holding_pen_release
710 b.eq secondary_startup
713 SYM_FUNC_END(secondary_holding_pen)
716 * Secondary entry point that jumps straight into the kernel. Only to
717 * be used where CPUs are brought online dynamically by the kernel.
719 SYM_FUNC_START(secondary_entry)
720 bl el2_setup // Drop to EL1
721 bl set_cpu_boot_mode_flag
723 SYM_FUNC_END(secondary_entry)
725 SYM_FUNC_START_LOCAL(secondary_startup)
727 * Common entry point for secondary CPUs.
729 bl __cpu_secondary_check52bitva
730 bl __cpu_setup // initialise processor
731 adrp x1, swapper_pg_dir
733 ldr x8, =__secondary_switched
735 SYM_FUNC_END(secondary_startup)
737 SYM_FUNC_START_LOCAL(__secondary_switched)
742 adr_l x0, secondary_data
743 ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
744 cbz x1, __secondary_too_slow
746 ldr x2, [x0, #CPU_BOOT_TASK]
747 cbz x2, __secondary_too_slow
753 #ifdef CONFIG_ARM64_PTR_AUTH
754 ptrauth_keys_init_cpu x2, x3, x4, x5
757 b secondary_start_kernel
758 SYM_FUNC_END(__secondary_switched)
760 SYM_FUNC_START_LOCAL(__secondary_too_slow)
763 b __secondary_too_slow
764 SYM_FUNC_END(__secondary_too_slow)
767 * The booting CPU updates the failed status @__early_cpu_boot_status,
768 * with MMU turned off.
770 * update_early_cpu_boot_status tmp, status
771 * - Corrupts tmp1, tmp2
772 * - Writes 'status' to __early_cpu_boot_status and makes sure
773 * it is committed to memory.
776 .macro update_early_cpu_boot_status status, tmp1, tmp2
778 adr_l \tmp1, __early_cpu_boot_status
781 dc ivac, \tmp1 // Invalidate potentially stale cache line
787 * x0 = SCTLR_EL1 value for turning on the MMU.
788 * x1 = TTBR1_EL1 value
790 * Returns to the caller via x30/lr. This requires the caller to be covered
791 * by the .idmap.text section.
793 * Checks if the selected granule size is supported by the CPU.
794 * If it isn't, park the CPU
796 SYM_FUNC_START(__enable_mmu)
797 mrs x2, ID_AA64MMFR0_EL1
798 ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
799 cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
800 b.ne __no_granule_support
801 update_early_cpu_boot_status 0, x2, x3
802 adrp x2, idmap_pg_dir
805 msr ttbr0_el1, x2 // load TTBR0
807 msr ttbr1_el1, x1 // load TTBR1
812 * Invalidate the local I-cache so that any instructions fetched
813 * speculatively from the PoC are discarded, since they may have
814 * been dynamically patched at the PoU.
820 SYM_FUNC_END(__enable_mmu)
822 SYM_FUNC_START(__cpu_secondary_check52bitva)
823 #ifdef CONFIG_ARM64_VA_BITS_52
824 ldr_l x0, vabits_actual
828 mrs_s x0, SYS_ID_AA64MMFR2_EL1
829 and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
832 update_early_cpu_boot_status \
833 CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1
840 SYM_FUNC_END(__cpu_secondary_check52bitva)
842 SYM_FUNC_START_LOCAL(__no_granule_support)
843 /* Indicate that this CPU can't boot and is stuck in the kernel */
844 update_early_cpu_boot_status \
845 CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2
850 SYM_FUNC_END(__no_granule_support)
852 #ifdef CONFIG_RELOCATABLE
853 SYM_FUNC_START_LOCAL(__relocate_kernel)
855 * Iterate over each entry in the relocation table, and apply the
856 * relocations in place.
858 ldr w9, =__rela_offset // offset to reloc table
859 ldr w10, =__rela_size // size of reloc table
861 mov_q x11, KIMAGE_VADDR // default virtual offset
862 add x11, x11, x23 // actual virtual offset
863 add x9, x9, x11 // __va(.rela)
864 add x10, x9, x10 // __va(.rela) + sizeof(.rela)
868 ldp x12, x13, [x9], #24
870 cmp w13, #R_AARCH64_RELATIVE
872 add x14, x14, x23 // relocate
879 * Apply RELR relocations.
881 * RELR is a compressed format for storing relative relocations. The
882 * encoded sequence of entries looks like:
883 * [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ]
885 * i.e. start with an address, followed by any number of bitmaps. The
886 * address entry encodes 1 relocation. The subsequent bitmap entries
887 * encode up to 63 relocations each, at subsequent offsets following
888 * the last address entry.
890 * The bitmap entries must have 1 in the least significant bit. The
891 * assumption here is that an address cannot have 1 in lsb. Odd
892 * addresses are not supported. Any odd addresses are stored in the RELA
893 * section, which is handled above.
895 * Excluding the least significant bit in the bitmap, each non-zero
896 * bit in the bitmap represents a relocation to be applied to
897 * a corresponding machine word that follows the base address
898 * word. The second least significant bit represents the machine
899 * word immediately following the initial address, and each bit
900 * that follows represents the next word, in linear order. As such,
901 * a single bitmap can encode up to 63 relocations in a 64-bit object.
903 * In this implementation we store the address of the next RELR table
904 * entry in x9, the address being relocated by the current address or
905 * bitmap entry in x13 and the address being relocated by the current
908 * Because addends are stored in place in the binary, RELR relocations
909 * cannot be applied idempotently. We use x24 to keep track of the
910 * currently applied displacement so that we can correctly relocate if
911 * __relocate_kernel is called twice with non-zero displacements (i.e.
912 * if there is both a physical misalignment and a KASLR displacement).
914 ldr w9, =__relr_offset // offset to reloc table
915 ldr w10, =__relr_size // size of reloc table
916 add x9, x9, x11 // __va(.relr)
917 add x10, x9, x10 // __va(.relr) + sizeof(.relr)
919 sub x15, x23, x24 // delta from previous offset
920 cbz x15, 7f // nothing to do if unchanged
921 mov x24, x23 // save new offset
926 tbnz x11, #0, 3f // branch to handle bitmaps
928 ldr x12, [x13] // relocate address entry
930 str x12, [x13], #8 // adjust to start of bitmap
936 tbz x11, #0, 5f // skip bit if not set
937 ldr x12, [x14] // relocate bit
941 5: add x14, x14, #8 // move to next bit's address
945 * Move to the next bitmap's address. 8 is the word size, and 63 is the
946 * number of significant bits in a bitmap entry.
948 add x13, x13, #(8 * 63)
955 SYM_FUNC_END(__relocate_kernel)
958 SYM_FUNC_START_LOCAL(__primary_switch)
959 #ifdef CONFIG_RANDOMIZE_BASE
960 mov x19, x0 // preserve new SCTLR_EL1 value
961 mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value
966 #ifdef CONFIG_RELOCATABLE
968 mov x24, #0 // no RELR displacement yet
971 #ifdef CONFIG_RANDOMIZE_BASE
972 ldr x8, =__primary_switched
973 adrp x0, __PHYS_OFFSET
977 * If we return here, we have a KASLR displacement in x23 which we need
978 * to take into account by discarding the current kernel mapping and
979 * creating a new one.
981 pre_disable_mmu_workaround
982 msr sctlr_el1, x20 // disable the MMU
984 bl __create_page_tables // recreate kernel mapping
986 tlbi vmalle1 // Remove any stale TLB entries
989 msr sctlr_el1, x19 // re-enable the MMU
991 ic iallu // flush instructions fetched
992 dsb nsh // via old mapping
998 ldr x8, =__primary_switched
999 adrp x0, __PHYS_OFFSET
1001 SYM_FUNC_END(__primary_switch)