1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * linux/arch/arm/boot/compressed/head.S
5 * Copyright (C) 1996-2002 Russell King
6 * Copyright (C) 2004 Hyok S. Choi (MPU support)
8 #include <linux/linkage.h>
9 #include <asm/assembler.h>
12 #include "efi-header.S"
14 AR_CLASS( .arch armv7-a )
15 M_CLASS( .arch armv7-m )
20 * Note that these macros must not contain any code which is not
21 * 100% relocatable. Any attempt to do so will result in a crash.
22 * Please select one of the following when turning on debugging.
26 #if defined(CONFIG_DEBUG_ICEDCC)
28 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
29 .macro loadsp, rb, tmp1, tmp2
31 .macro writeb, ch, rb, tmp
32 mcr p14, 0, \ch, c0, c5, 0
34 #elif defined(CONFIG_CPU_XSCALE)
35 .macro loadsp, rb, tmp1, tmp2
37 .macro writeb, ch, rb, tmp
38 mcr p14, 0, \ch, c8, c0, 0
41 .macro loadsp, rb, tmp1, tmp2
43 .macro writeb, ch, rb, tmp
44 mcr p14, 0, \ch, c1, c0, 0
50 #include CONFIG_DEBUG_LL_INCLUDE
52 .macro writeb, ch, rb, tmp
53 #ifdef CONFIG_DEBUG_UART_FLOW_CONTROL
56 waituarttxrdy \tmp, \rb
61 #if defined(CONFIG_ARCH_SA1100)
62 .macro loadsp, rb, tmp1, tmp2
63 mov \rb, #0x80000000 @ physical base address
64 #ifdef CONFIG_DEBUG_LL_SER3
65 add \rb, \rb, #0x00050000 @ Ser3
67 add \rb, \rb, #0x00010000 @ Ser1
71 .macro loadsp, rb, tmp1, tmp2
72 addruart \rb, \tmp1, \tmp2
90 * Debug kernel copy by printing the memory addresses involved
92 .macro dbgkc, begin, end, cbegin, cend
98 kphex \begin, 8 /* Start of compressed kernel */
102 kphex \end, 8 /* End of compressed kernel */
107 kphex \cbegin, 8 /* Start of kernel copy */
111 kphex \cend, 8 /* End of kernel copy */
117 * Debug print of the final appended DTB location
119 .macro dbgadtb, begin, end
127 kphex \begin, 8 /* Start of appended DTB */
132 kphex \end, 8 /* End of appended DTB */
138 .macro enable_cp15_barriers, reg
139 mrc p15, 0, \reg, c1, c0, 0 @ read SCTLR
140 tst \reg, #(1 << 5) @ CP15BEN bit set?
142 orr \reg, \reg, #(1 << 5) @ CP15 barrier instructions
143 mcr p15, 0, \reg, c1, c0, 0 @ write SCTLR
144 ARM( .inst 0xf57ff06f @ v7+ isb )
150 * The kernel build system appends the size of the
151 * decompressed kernel at the end of the compressed data
152 * in little-endian form.
154 .macro get_inflated_image_size, res:req, tmp1:req, tmp2:req
155 adr \res, .Linflated_image_size_offset
157 add \tmp1, \tmp1, \res @ address of inflated image size
159 ldrb \res, [\tmp1] @ get_unaligned_le32
160 ldrb \tmp2, [\tmp1, #1]
161 orr \res, \res, \tmp2, lsl #8
162 ldrb \tmp2, [\tmp1, #2]
163 ldrb \tmp1, [\tmp1, #3]
164 orr \res, \res, \tmp2, lsl #16
165 orr \res, \res, \tmp1, lsl #24
168 .section ".start", "ax"
170 * sort out different calling conventions
174 * Always enter in ARM state for CPUs that support the ARM ISA.
175 * As of today (2014) that's exactly the members of the A and R
180 .type start,#function
182 * These 7 nops along with the 1 nop immediately below for
183 * !THUMB2 form 8 nops that make the compressed kernel bootable
184 * on legacy ARM systems that were assuming the kernel in a.out
185 * binary format. The boot loaders on these systems would
186 * jump 32 bytes into the image to skip the a.out header.
187 * with these 8 nops filling exactly 32 bytes, things still
188 * work as expected on these legacy systems. Thumb2 mode keeps
189 * 7 of the nops as it turns out that some boot loaders
190 * were patching the initial instructions of the kernel, i.e
191 * had started to exploit this "patch area".
196 #ifndef CONFIG_THUMB2_KERNEL
199 AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode
200 M_CLASS( nop.w ) @ M: already in Thumb2 mode
205 .word _magic_sig @ Magic numbers to help the loader
206 .word _magic_start @ absolute load/run zImage address
207 .word _magic_end @ zImage end address
208 .word 0x04030201 @ endianness flag
209 .word 0x45454545 @ another magic number to indicate
210 .word _magic_table @ additional data table
214 ARM_BE8( setend be ) @ go BE8 if compiled for BE8
215 AR_CLASS( mrs r9, cpsr )
216 #ifdef CONFIG_ARM_VIRT_EXT
217 bl __hyp_stub_install @ get into SVC mode, reversibly
219 mov r7, r1 @ save architecture ID
220 mov r8, r2 @ save atags pointer
222 #ifndef CONFIG_CPU_V7M
224 * Booting from Angel - need to enter SVC mode and disable
225 * FIQs/IRQs (numeric definitions from angel arm.h source).
226 * We only do this if we were in user mode on entry.
228 mrs r2, cpsr @ get current mode
229 tst r2, #3 @ not user?
231 mov r0, #0x17 @ angel_SWIreason_EnterSVC
232 ARM( swi 0x123456 ) @ angel_SWI_ARM
233 THUMB( svc 0xab ) @ angel_SWI_THUMB
235 safe_svcmode_maskall r0
236 msr spsr_cxsf, r9 @ Save the CPU boot mode in
240 * Note that some cache flushing and other stuff may
241 * be needed here - is there an Angel SWI call for this?
245 * some architecture specific code can be inserted
246 * by the linker here, but it should preserve r7, r8, and r9.
251 #ifdef CONFIG_AUTO_ZRELADDR
253 * Find the start of physical memory. As we are executing
254 * without the MMU on, we are in the physical address space.
255 * We just need to get rid of any offset by aligning the
258 * This alignment is a balance between the requirements of
259 * different platforms - we have chosen 128MB to allow
260 * platforms which align the start of their physical memory
261 * to 128MB to use this feature, while allowing the zImage
262 * to be placed within the first 128MB of memory on other
263 * platforms. Increasing the alignment means we place
264 * stricter alignment requirements on the start of physical
265 * memory, but relaxing it means that we break people who
266 * are already placing their zImage in (eg) the top 64MB
270 and r4, r4, #0xf8000000
271 /* Determine final kernel image address. */
272 add r4, r4, #TEXT_OFFSET
278 * Set up a page table only if it won't overwrite ourself.
279 * That means r4 < pc || r4 - 16k page directory > &_end.
280 * Given that r4 > &_end is most unfrequent, we add a rough
281 * additional 1MB of room for a possible appended DTB.
288 orrcc r4, r4, #1 @ remember we skipped cache_on
297 get_inflated_image_size r9, r10, lr
299 #ifndef CONFIG_ZBOOT_ROM
300 /* malloc space is above the relocated stack (64k max) */
301 add r10, sp, #MALLOC_SIZE
304 * With ZBOOT_ROM the bss/stack is non relocatable,
305 * but someone could still run this code from RAM,
306 * in which case our reference is _edata.
311 mov r5, #0 @ init dtb size to 0
312 #ifdef CONFIG_ARM_APPENDED_DTB
314 * r4 = final kernel address (possibly with LSB set)
315 * r5 = appended dtb size (still unknown)
317 * r7 = architecture ID
318 * r8 = atags/device tree pointer
319 * r9 = size of decompressed image
320 * r10 = end of this image, including bss/stack/malloc space if non XIP
323 * if there are device trees (dtb) appended to zImage, advance r10 so that the
324 * dtb data will get relocated along with the kernel if necessary.
329 ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian
334 bne dtb_check_done @ not found
336 #ifdef CONFIG_ARM_ATAG_DTB_COMPAT
338 * OK... Let's do some funky business here.
339 * If we do have a DTB appended to zImage, and we do have
340 * an ATAG list around, we want the later to be translated
341 * and folded into the former here. No GOT fixup has occurred
342 * yet, but none of the code we're about to call uses any
346 /* Get the initial DTB size */
349 /* convert to little endian */
350 eor r1, r5, r5, ror #16
351 bic r1, r1, #0x00ff0000
353 eor r5, r5, r1, lsr #8
356 /* 50% DTB growth should be good enough */
357 add r5, r5, r5, lsr #1
358 /* preserve 64-bit alignment */
361 /* clamp to 32KB min and 1MB max */
366 /* temporarily relocate the stack past the DTB work space */
375 * If returned value is 1, there is no ATAG at the location
376 * pointed by r8. Try the typical 0x100 offset from start
377 * of RAM and hope for the best.
380 sub r0, r4, #TEXT_OFFSET
390 mov r8, r6 @ use the appended device tree
393 * Make sure that the DTB doesn't end up in the final
394 * kernel's .bss area. To do so, we adjust the decompressed
395 * kernel size to compensate if that .bss size is larger
396 * than the relocated code.
398 ldr r5, =_kernel_bss_size
399 adr r1, wont_overwrite
404 /* Get the current DTB size */
407 /* convert r5 (dtb size) to little endian */
408 eor r1, r5, r5, ror #16
409 bic r1, r1, #0x00ff0000
411 eor r5, r5, r1, lsr #8
414 /* preserve 64-bit alignment */
418 /* relocate some pointers past the appended dtb */
426 * Check to see if we will overwrite ourselves.
427 * r4 = final kernel address (possibly with LSB set)
428 * r9 = size of decompressed image
429 * r10 = end of this image, including bss/stack/malloc space if non XIP
431 * r4 - 16k page directory >= r10 -> OK
432 * r4 + image length <= address of wont_overwrite -> OK
433 * Note: the possible LSB in r4 is harmless here.
439 adr r9, wont_overwrite
444 * Relocate ourselves past the end of the decompressed kernel.
446 * r10 = end of the decompressed kernel
447 * Because we always copy ahead, we need to do it from the end and go
448 * backward in case the source and destination overlap.
451 * Bump to the next 256-byte boundary with the size of
452 * the relocation code added. This avoids overwriting
453 * ourself when the offset is small.
455 add r10, r10, #((reloc_code_end - restart + 256) & ~255)
458 /* Get start of code we want to copy and align it down. */
462 /* Relocate the hyp vector base if necessary */
463 #ifdef CONFIG_ARM_VIRT_EXT
465 and r0, r0, #MODE_MASK
470 * Compute the address of the hyp vectors after relocation.
471 * This requires some arithmetic since we cannot directly
472 * reference __hyp_stub_vectors in a PC-relative way.
473 * Call __hyp_set_vectors with the new address so that we
474 * can HVC again after the copy.
477 movw r1, #:lower16:__hyp_stub_vectors - 0b
478 movt r1, #:upper16:__hyp_stub_vectors - 0b
486 sub r9, r6, r5 @ size to copy
487 add r9, r9, #31 @ rounded up to a multiple
488 bic r9, r9, #31 @ ... of 32 bytes
496 * We are about to copy the kernel to a new memory area.
497 * The boundaries of the new memory area can be found in
498 * r10 and r9, whilst r5 and r6 contain the boundaries
499 * of the memory we are going to copy.
500 * Calling dbgkc will help with the printing of this
503 dbgkc r5, r6, r10, r9
506 1: ldmdb r6!, {r0 - r3, r10 - r12, lr}
508 stmdb r9!, {r0 - r3, r10 - r12, lr}
511 /* Preserve offset to relocated code. */
514 mov r0, r9 @ start of relocated zImage
515 add r1, sp, r6 @ end of relocated zImage
524 ldmia r0, {r1, r2, r3, r11, r12}
525 sub r0, r0, r1 @ calculate the delta offset
528 * If delta is zero, we are running at the address we were linked at.
532 * r4 = kernel execution address (possibly with LSB set)
533 * r5 = appended dtb size (0 if not present)
534 * r7 = architecture ID
546 #ifndef CONFIG_ZBOOT_ROM
548 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
549 * we need to fix up pointers into the BSS region.
550 * Note that the stack pointer has already been fixed up.
556 * Relocate all entries in the GOT table.
557 * Bump bss entries to _edata + dtb size
559 1: ldr r1, [r11, #0] @ relocate entries in the GOT
560 add r1, r1, r0 @ This fixes up C references
561 cmp r1, r2 @ if entry >= bss_start &&
562 cmphs r3, r1 @ bss_end > entry
563 addhi r1, r1, r5 @ entry += dtb size
564 str r1, [r11], #4 @ next entry
568 /* bump our bss pointers too */
575 * Relocate entries in the GOT table. We only relocate
576 * the entries that are outside the (relocated) BSS region.
578 1: ldr r1, [r11, #0] @ relocate entries in the GOT
579 cmp r1, r2 @ entry < bss_start ||
580 cmphs r3, r1 @ _end < entry
581 addlo r1, r1, r0 @ table. This fixes up the
582 str r1, [r11], #4 @ C references.
587 not_relocated: mov r0, #0
588 1: str r0, [r2], #4 @ clear bss
596 * Did we skip the cache setup earlier?
597 * That is indicated by the LSB in r4.
605 * The C runtime environment should now be setup sufficiently.
606 * Set up some pointers, and start decompressing.
607 * r4 = kernel execution address
608 * r7 = architecture ID
612 mov r1, sp @ malloc space above stack
613 add r2, sp, #MALLOC_SIZE @ 64k max
617 get_inflated_image_size r1, r2, r3
619 mov r0, r4 @ start of inflated image
620 add r1, r1, r0 @ end of inflated image
624 #ifdef CONFIG_ARM_VIRT_EXT
625 mrs r0, spsr @ Get saved CPU boot mode
626 and r0, r0, #MODE_MASK
627 cmp r0, #HYP_MODE @ if not booted in HYP mode...
628 bne __enter_kernel @ boot kernel directly
630 adr r12, .L__hyp_reentry_vectors_offset
635 __HVC(0) @ otherwise bounce to hyp mode
637 b . @ should never be reached
640 .L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - .
648 .word __bss_start @ r2
650 .word _got_start @ r11
655 LC1: .word .L_user_stack_end - LC1 @ sp
656 .word _edata - LC1 @ r6
660 .word _end - restart + 16384 + 1024*1024
662 .Linflated_image_size_offset:
663 .long (input_data_end - 4) - .
665 #ifdef CONFIG_ARCH_RPC
667 params: ldr r0, =0x10000100 @ params_phys for RPC
674 * dcache_line_size - get the minimum D-cache line size from the CTR register
677 .macro dcache_line_size, reg, tmp
678 #ifdef CONFIG_CPU_V7M
679 movw \tmp, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_CTR
680 movt \tmp, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_CTR
683 mrc p15, 0, \tmp, c0, c0, 1 @ read ctr
686 and \tmp, \tmp, #0xf @ cache line size encoding
687 mov \reg, #4 @ bytes per word
688 mov \reg, \reg, lsl \tmp @ actual cache line size
692 * Turn on the cache. We need to setup some page tables so that we
693 * can have both the I and D caches on.
695 * We place the page tables 16k down from the kernel execution address,
696 * and we hope that nothing else is using it. If we're using it, we
700 * r4 = kernel execution address
701 * r7 = architecture number
704 * r0, r1, r2, r3, r9, r10, r12 corrupted
705 * This routine must preserve:
709 cache_on: mov r3, #8 @ cache_on function
713 * Initialize the highest priority protection region, PR7
714 * to cover all 32bit address and cacheable and bufferable.
716 __armv4_mpu_cache_on:
717 mov r0, #0x3f @ 4G, the whole
718 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
719 mcr p15, 0, r0, c6, c7, 1
722 mcr p15, 0, r0, c2, c0, 0 @ D-cache on
723 mcr p15, 0, r0, c2, c0, 1 @ I-cache on
724 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
727 mcr p15, 0, r0, c5, c0, 1 @ I-access permission
728 mcr p15, 0, r0, c5, c0, 0 @ D-access permission
731 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
732 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
733 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
734 mrc p15, 0, r0, c1, c0, 0 @ read control reg
735 @ ...I .... ..D. WC.M
736 orr r0, r0, #0x002d @ .... .... ..1. 11.1
737 orr r0, r0, #0x1000 @ ...1 .... .... ....
739 mcr p15, 0, r0, c1, c0, 0 @ write control reg
742 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
743 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
746 __armv3_mpu_cache_on:
747 mov r0, #0x3f @ 4G, the whole
748 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
751 mcr p15, 0, r0, c2, c0, 0 @ cache on
752 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
755 mcr p15, 0, r0, c5, c0, 0 @ access permission
758 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
760 * ?? ARMv3 MMU does not allow reading the control register,
761 * does this really work on ARMv3 MPU?
763 mrc p15, 0, r0, c1, c0, 0 @ read control reg
764 @ .... .... .... WC.M
765 orr r0, r0, #0x000d @ .... .... .... 11.1
766 /* ?? this overwrites the value constructed above? */
768 mcr p15, 0, r0, c1, c0, 0 @ write control reg
770 /* ?? invalidate for the second time? */
771 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
774 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
780 __setup_mmu: sub r3, r4, #16384 @ Page directory size
781 bic r3, r3, #0xff @ Align the pointer
784 * Initialise the page tables, turning on the cacheable and bufferable
785 * bits for the RAM area only.
789 mov r9, r9, lsl #18 @ start of RAM
790 add r10, r9, #0x10000000 @ a reasonable RAM size
791 mov r1, #0x12 @ XN|U + section mapping
792 orr r1, r1, #3 << 10 @ AP=11
794 1: cmp r1, r9 @ if virt > start of RAM
795 cmphs r10, r1 @ && end of RAM > virt
796 bic r1, r1, #0x1c @ clear XN|U + C + B
797 orrlo r1, r1, #0x10 @ Set XN|U for non-RAM
798 orrhs r1, r1, r6 @ set RAM section settings
799 str r1, [r0], #4 @ 1:1 mapping
804 * If ever we are running from Flash, then we surely want the cache
805 * to be enabled also for our execution instance... We map 2MB of it
806 * so there is no map overlap problem for up to 1 MB compressed kernel.
807 * If the execution is in RAM then we would only be duplicating the above.
809 orr r1, r6, #0x04 @ ensure B is set for this
813 orr r1, r1, r2, lsl #20
814 add r0, r3, r2, lsl #2
821 @ Enable unaligned access on v6, to allow better code generation
822 @ for the decompressor C code:
823 __armv6_mmu_cache_on:
824 mrc p15, 0, r0, c1, c0, 0 @ read SCTLR
825 bic r0, r0, #2 @ A (no unaligned access fault)
826 orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
827 mcr p15, 0, r0, c1, c0, 0 @ write SCTLR
828 b __armv4_mmu_cache_on
830 __arm926ejs_mmu_cache_on:
831 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
832 mov r0, #4 @ put dcache in WT mode
833 mcr p15, 7, r0, c15, c0, 0
836 __armv4_mmu_cache_on:
839 mov r6, #CB_BITS | 0x12 @ U
842 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
843 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
844 mrc p15, 0, r0, c1, c0, 0 @ read control reg
845 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
847 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
848 bl __common_mmu_cache_on
850 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
854 __armv7_mmu_cache_on:
855 enable_cp15_barriers r11
858 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
860 movne r6, #CB_BITS | 0x02 @ !XN
863 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
865 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
867 mrc p15, 0, r0, c1, c0, 0 @ read control reg
868 bic r0, r0, #1 << 28 @ clear SCTLR.TRE
869 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
870 orr r0, r0, #0x003c @ write buffer
871 bic r0, r0, #2 @ A (no unaligned access fault)
872 orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
873 @ (needed for ARM1176)
875 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
876 mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
877 orrne r0, r0, #1 @ MMU enabled
878 movne r1, #0xfffffffd @ domain 0 = client
879 bic r6, r6, #1 << 31 @ 32-bit translation system
880 bic r6, r6, #(7 << 0) | (1 << 4) @ use only ttbr0
881 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
882 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
883 mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
885 mcr p15, 0, r0, c7, c5, 4 @ ISB
886 mcr p15, 0, r0, c1, c0, 0 @ load control register
887 mrc p15, 0, r0, c1, c0, 0 @ and read it back
889 mcr p15, 0, r0, c7, c5, 4 @ ISB
894 mov r6, #CB_BITS | 0x12 @ U
897 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
898 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
899 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
900 mrc p15, 0, r0, c1, c0, 0 @ read control reg
901 orr r0, r0, #0x1000 @ I-cache enable
902 bl __common_mmu_cache_on
904 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
907 __common_mmu_cache_on:
908 #ifndef CONFIG_THUMB2_KERNEL
910 orr r0, r0, #0x000d @ Write buffer, mmu
913 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
914 mcr p15, 0, r1, c3, c0, 0 @ load domain access control
916 .align 5 @ cache line aligned
917 1: mcr p15, 0, r0, c1, c0, 0 @ load control register
918 mrc p15, 0, r0, c1, c0, 0 @ and read it back to
919 sub pc, lr, r0, lsr #32 @ properly flush pipeline
922 #define PROC_ENTRY_SIZE (4*5)
925 * Here follow the relocatable cache support functions for the
926 * various processors. This is a generic hook for locating an
927 * entry and jumping to an instruction at the specified offset
928 * from the start of the block. Please note this is all position
938 call_cache_fn: adr r12, proc_types
939 #ifdef CONFIG_CPU_CP15
940 mrc p15, 0, r9, c0, c0 @ get processor ID
941 #elif defined(CONFIG_CPU_V7M)
943 * On v7-M the processor id is located in the V7M_SCB_CPUID
944 * register, but as cache handling is IMPLEMENTATION DEFINED on
945 * v7-M (if existant at all) we just return early here.
946 * If V7M_SCB_CPUID were used the cpu ID functions (i.e.
947 * __armv7_mmu_cache_{on,off,flush}) would be selected which
948 * use cp15 registers that are not implemented on v7-M.
952 ldr r9, =CONFIG_PROCESSOR_ID
954 1: ldr r1, [r12, #0] @ get value
955 ldr r2, [r12, #4] @ get mask
956 eor r1, r1, r9 @ (real ^ match)
958 ARM( addeq pc, r12, r3 ) @ call cache function
959 THUMB( addeq r12, r3 )
960 THUMB( moveq pc, r12 ) @ call cache function
961 add r12, r12, #PROC_ENTRY_SIZE
965 * Table for cache operations. This is basically:
968 * - 'cache on' method instruction
969 * - 'cache off' method instruction
970 * - 'cache flush' method instruction
972 * We match an entry using: ((real_id ^ match) & mask) == 0
974 * Writethrough caches generally only need 'on' and 'off'
975 * methods. Writeback caches _must_ have the flush method
979 .type proc_types,#object
981 .word 0x41000000 @ old ARM ID
990 .word 0x41007000 @ ARM7/710
999 .word 0x41807200 @ ARM720T (writethrough)
1001 W(b) __armv4_mmu_cache_on
1002 W(b) __armv4_mmu_cache_off
1006 .word 0x41007400 @ ARM74x
1008 W(b) __armv3_mpu_cache_on
1009 W(b) __armv3_mpu_cache_off
1010 W(b) __armv3_mpu_cache_flush
1012 .word 0x41009400 @ ARM94x
1014 W(b) __armv4_mpu_cache_on
1015 W(b) __armv4_mpu_cache_off
1016 W(b) __armv4_mpu_cache_flush
1018 .word 0x41069260 @ ARM926EJ-S (v5TEJ)
1020 W(b) __arm926ejs_mmu_cache_on
1021 W(b) __armv4_mmu_cache_off
1022 W(b) __armv5tej_mmu_cache_flush
1024 .word 0x00007000 @ ARM7 IDs
1033 @ Everything from here on will be the new ID system.
1035 .word 0x4401a100 @ sa110 / sa1100
1037 W(b) __armv4_mmu_cache_on
1038 W(b) __armv4_mmu_cache_off
1039 W(b) __armv4_mmu_cache_flush
1041 .word 0x6901b110 @ sa1110
1043 W(b) __armv4_mmu_cache_on
1044 W(b) __armv4_mmu_cache_off
1045 W(b) __armv4_mmu_cache_flush
1048 .word 0xffffff00 @ PXA9xx
1049 W(b) __armv4_mmu_cache_on
1050 W(b) __armv4_mmu_cache_off
1051 W(b) __armv4_mmu_cache_flush
1053 .word 0x56158000 @ PXA168
1055 W(b) __armv4_mmu_cache_on
1056 W(b) __armv4_mmu_cache_off
1057 W(b) __armv5tej_mmu_cache_flush
1059 .word 0x56050000 @ Feroceon
1061 W(b) __armv4_mmu_cache_on
1062 W(b) __armv4_mmu_cache_off
1063 W(b) __armv5tej_mmu_cache_flush
1065 #ifdef CONFIG_CPU_FEROCEON_OLD_ID
1066 /* this conflicts with the standard ARMv5TE entry */
1067 .long 0x41009260 @ Old Feroceon
1069 b __armv4_mmu_cache_on
1070 b __armv4_mmu_cache_off
1071 b __armv5tej_mmu_cache_flush
1074 .word 0x66015261 @ FA526
1076 W(b) __fa526_cache_on
1077 W(b) __armv4_mmu_cache_off
1078 W(b) __fa526_cache_flush
1080 @ These match on the architecture ID
1082 .word 0x00020000 @ ARMv4T
1084 W(b) __armv4_mmu_cache_on
1085 W(b) __armv4_mmu_cache_off
1086 W(b) __armv4_mmu_cache_flush
1088 .word 0x00050000 @ ARMv5TE
1090 W(b) __armv4_mmu_cache_on
1091 W(b) __armv4_mmu_cache_off
1092 W(b) __armv4_mmu_cache_flush
1094 .word 0x00060000 @ ARMv5TEJ
1096 W(b) __armv4_mmu_cache_on
1097 W(b) __armv4_mmu_cache_off
1098 W(b) __armv5tej_mmu_cache_flush
1100 .word 0x0007b000 @ ARMv6
1102 W(b) __armv6_mmu_cache_on
1103 W(b) __armv4_mmu_cache_off
1104 W(b) __armv6_mmu_cache_flush
1106 .word 0x000f0000 @ new CPU Id
1108 W(b) __armv7_mmu_cache_on
1109 W(b) __armv7_mmu_cache_off
1110 W(b) __armv7_mmu_cache_flush
1112 .word 0 @ unrecognised type
1121 .size proc_types, . - proc_types
1124 * If you get a "non-constant expression in ".if" statement"
1125 * error from the assembler on this line, check that you have
1126 * not accidentally written a "b" instruction where you should
1127 * have written W(b).
1129 .if (. - proc_types) % PROC_ENTRY_SIZE != 0
1130 .error "The size of one or more proc_types entries is wrong."
1134 * Turn off the Cache and MMU. ARMv3 does not support
1135 * reading the control register, but ARMv4 does.
1138 * r0, r1, r2, r3, r9, r12 corrupted
1139 * This routine must preserve:
1143 cache_off: mov r3, #12 @ cache_off function
1146 __armv4_mpu_cache_off:
1147 mrc p15, 0, r0, c1, c0
1149 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
1151 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
1152 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
1153 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
1156 __armv3_mpu_cache_off:
1157 mrc p15, 0, r0, c1, c0
1159 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
1161 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
1164 __armv4_mmu_cache_off:
1166 mrc p15, 0, r0, c1, c0
1168 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
1170 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
1171 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
1175 __armv7_mmu_cache_off:
1176 mrc p15, 0, r0, c1, c0
1182 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
1185 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
1187 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
1188 mcr p15, 0, r0, c7, c10, 4 @ DSB
1189 mcr p15, 0, r0, c7, c5, 4 @ ISB
1193 * Clean and flush the cache to maintain consistency.
1196 * r0 = start address
1197 * r1 = end address (exclusive)
1199 * r1, r2, r3, r9, r10, r11, r12 corrupted
1200 * This routine must preserve:
1209 __armv4_mpu_cache_flush:
1214 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
1215 mov r1, #7 << 5 @ 8 segments
1216 1: orr r3, r1, #63 << 26 @ 64 entries
1217 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
1218 subs r3, r3, #1 << 26
1219 bcs 2b @ entries 63 to 0
1220 subs r1, r1, #1 << 5
1221 bcs 1b @ segments 7 to 0
1224 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
1225 mcr p15, 0, ip, c7, c10, 4 @ drain WB
1228 __fa526_cache_flush:
1232 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
1233 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
1234 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1237 __armv6_mmu_cache_flush:
1240 mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D
1241 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
1242 mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
1243 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1246 __armv7_mmu_cache_flush:
1247 enable_cp15_barriers r10
1250 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
1251 tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
1254 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
1257 dcache_line_size r1, r2 @ r1 := dcache min line size
1258 sub r2, r1, #1 @ r2 := line size mask
1259 bic r0, r0, r2 @ round down start to line size
1260 sub r11, r11, #1 @ end address is exclusive
1261 bic r11, r11, r2 @ round down end to line size
1262 0: cmp r0, r11 @ finished?
1264 mcr p15, 0, r0, c7, c14, 1 @ Dcache clean/invalidate by VA
1268 mcr p15, 0, r10, c7, c10, 4 @ DSB
1269 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
1270 mcr p15, 0, r10, c7, c10, 4 @ DSB
1271 mcr p15, 0, r10, c7, c5, 4 @ ISB
1274 __armv5tej_mmu_cache_flush:
1277 1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test,clean,invalidate D cache
1279 mcr p15, 0, r0, c7, c5, 0 @ flush I cache
1280 mcr p15, 0, r0, c7, c10, 4 @ drain WB
1283 __armv4_mmu_cache_flush:
1286 mov r2, #64*1024 @ default: 32K dcache size (*2)
1287 mov r11, #32 @ default: 32 byte line size
1288 mrc p15, 0, r3, c0, c0, 1 @ read cache type
1289 teq r3, r9 @ cache ID register present?
1294 mov r2, r2, lsl r1 @ base dcache size *2
1295 tst r3, #1 << 14 @ test M bit
1296 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
1300 mov r11, r11, lsl r3 @ cache line size in bytes
1303 bic r1, r1, #63 @ align to longest cache line
1306 ARM( ldr r3, [r1], r11 ) @ s/w flush D cache
1307 THUMB( ldr r3, [r1] ) @ s/w flush D cache
1308 THUMB( add r1, r1, r11 )
1312 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
1313 mcr p15, 0, r1, c7, c6, 0 @ flush D cache
1314 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1317 __armv3_mmu_cache_flush:
1318 __armv3_mpu_cache_flush:
1322 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
1326 * Various debugging routines for printing hex characters and
1327 * memory, which again must be relocatable.
1331 .type phexbuf,#object
1333 .size phexbuf, . - phexbuf
1335 @ phex corrupts {r0, r1, r2, r3}
1336 phex: adr r3, phexbuf
1350 @ puts corrupts {r0, r1, r2, r3}
1351 puts: loadsp r3, r2, r1
1352 1: ldrb r2, [r0], #1
1355 2: writeb r2, r3, r1
1365 @ putc corrupts {r0, r1, r2, r3}
1372 @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
1373 memdump: mov r12, r0
1376 2: mov r0, r11, lsl #2
1384 ldr r0, [r12, r11, lsl #2]
1404 #ifdef CONFIG_ARM_VIRT_EXT
1406 __hyp_reentry_vectors:
1409 #ifdef CONFIG_EFI_STUB
1410 W(b) __enter_kernel_from_hyp @ hvc from HYP
1416 W(b) __enter_kernel @ hyp
1419 #endif /* CONFIG_ARM_VIRT_EXT */
1422 mov r0, #0 @ must be 0
1423 mov r1, r7 @ restore architecture number
1424 mov r2, r8 @ restore atags pointer
1425 ARM( mov pc, r4 ) @ call kernel
1426 M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class
1427 THUMB( bx r4 ) @ entry point is always ARM for A/R classes
1431 #ifdef CONFIG_EFI_STUB
1432 __enter_kernel_from_hyp:
1433 mrc p15, 4, r0, c1, c0, 0 @ read HSCTLR
1434 bic r0, r0, #0x5 @ disable MMU and caches
1435 mcr p15, 4, r0, c1, c0, 0 @ write HSCTLR
1439 ENTRY(efi_enter_kernel)
1440 mov r4, r0 @ preserve image base
1441 mov r8, r1 @ preserve DT pointer
1443 ARM( adrl r0, call_cache_fn )
1444 THUMB( adr r0, call_cache_fn )
1445 adr r1, 0f @ clean the region of code we
1446 bl cache_clean_flush @ may run with the MMU off
1448 #ifdef CONFIG_ARM_VIRT_EXT
1450 @ The EFI spec does not support booting on ARM in HYP mode,
1451 @ since it mandates that the MMU and caches are on, with all
1452 @ 32-bit addressable DRAM mapped 1:1 using short descriptors.
1454 @ While the EDK2 reference implementation adheres to this,
1455 @ U-Boot might decide to enter the EFI stub in HYP mode
1456 @ anyway, with the MMU and caches either on or off.
1458 mrs r0, cpsr @ get the current mode
1459 msr spsr_cxsf, r0 @ record boot mode
1460 and r0, r0, #MODE_MASK @ are we running in HYP mode?
1464 mrc p15, 4, r1, c1, c0, 0 @ read HSCTLR
1465 tst r1, #0x1 @ MMU enabled at HYP?
1469 @ When running in HYP mode with the caches on, we're better
1470 @ off just carrying on using the cached 1:1 mapping that the
1471 @ firmware provided. Set up the HYP vectors so HVC instructions
1472 @ issued from HYP mode take us to the correct handler code. We
1473 @ will disable the MMU before jumping to the kernel proper.
1475 ARM( bic r1, r1, #(1 << 30) ) @ clear HSCTLR.TE
1476 THUMB( orr r1, r1, #(1 << 30) ) @ set HSCTLR.TE
1477 mcr p15, 4, r1, c1, c0, 0
1478 adr r0, __hyp_reentry_vectors
1479 mcr p15, 4, r0, c12, c0, 0 @ set HYP vector base (HVBAR)
1484 @ When running in HYP mode with the caches off, we need to drop
1485 @ into SVC mode now, and let the decompressor set up its cached
1486 @ 1:1 mapping as usual.
1488 1: mov r9, r4 @ preserve image base
1489 bl __hyp_stub_install @ install HYP stub vectors
1490 safe_svcmode_maskall r1 @ drop to SVC mode
1491 msr spsr_cxsf, r0 @ record boot mode
1492 orr r4, r9, #1 @ restore image base and set LSB
1496 mrc p15, 0, r0, c1, c0, 0 @ read SCTLR
1497 tst r0, #0x1 @ MMU enabled?
1498 orreq r4, r4, #1 @ set LSB if not
1501 mov r0, r8 @ DT start
1502 add r1, r8, r2 @ DT end
1503 bl cache_clean_flush
1505 adr r0, 0f @ switch to our stack
1509 mov r5, #0 @ appended DTB size
1510 mov r7, #0xFFFFFFFF @ machine ID
1512 ENDPROC(efi_enter_kernel)
1513 0: .long .L_user_stack_end - .
1517 .section ".stack", "aw", %nobits
1518 .L_user_stack: .space 4096