2 * linux/arch/arm/kernel/head.S
4 * Copyright (C) 1994-2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * Kernel startup code for all 32-bit CPUs
12 #include <linux/config.h>
13 #include <linux/linkage.h>
14 #include <linux/init.h>
16 #include <asm/assembler.h>
17 #include <asm/domain.h>
18 #include <asm/mach-types.h>
19 #include <asm/procinfo.h>
20 #include <asm/ptrace.h>
21 #include <asm/constants.h>
22 #include <asm/system.h>
24 #define PROCINFO_MMUFLAGS 8
25 #define PROCINFO_INITFUNC 12
27 #define MACHINFO_TYPE 0
28 #define MACHINFO_PHYSRAM 4
29 #define MACHINFO_PHYSIO 8
30 #define MACHINFO_PGOFFIO 12
31 #define MACHINFO_NAME 16
33 #ifndef CONFIG_XIP_KERNEL
35 * We place the page tables 16K below TEXTADDR. Therefore, we must make sure
36 * that TEXTADDR is correctly set. Currently, we expect the least significant
37 * 16 bits to be 0x8000, but we could probably relax this restriction to
38 * TEXTADDR >= PAGE_OFFSET + 0x4000
40 * Note that swapper_pg_dir is the virtual address of the page tables, and
41 * pgtbl gives us a position-independent reference to these tables. We can
42 * do this because stext == TEXTADDR
44 #if (TEXTADDR & 0xffff) != 0x8000
45 #error TEXTADDR must start at 0xXXXX8000
49 .equ swapper_pg_dir, TEXTADDR - 0x4000
51 .macro pgtbl, rd, phys
59 * We place the page tables 16K below DATAADDR. Therefore, we must make sure
60 * that DATAADDR is correctly set. Currently, we expect the least significant
61 * 16 bits to be 0x8000, but we could probably relax this restriction to
62 * DATAADDR >= PAGE_OFFSET + 0x4000
64 * Note that pgtbl is meant to return the physical address of swapper_pg_dir.
65 * We can't make it relative to the kernel position in this case since
66 * the kernel can physically be anywhere.
68 #if (DATAADDR & 0xffff) != 0x8000
69 #error DATAADDR must start at 0xXXXX8000
73 .equ swapper_pg_dir, DATAADDR - 0x4000
75 .macro pgtbl, rd, phys
76 ldr \rd, =((DATAADDR - 0x4000) - VIRT_OFFSET)
82 * Kernel startup entry point.
83 * ---------------------------
85 * This is normally called from the decompressor code. The requirements
86 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
89 * This code is mostly position independent, so if you link the kernel at
90 * 0xc0008000, you call this at __pa(0xc0008000).
92 * See linux/arch/arm/tools/mach-types for the complete list of machine
95 * We're trying to keep crap to a minimum; DO NOT add any machine specific
96 * crap here - that's what the boot loader (or in extreme, well justified
97 * circumstances, zImage) is for.
100 .type stext, %function
102 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ ensure svc mode
104 bl __lookup_processor_type @ r5=procinfo r9=cpuid
105 movs r10, r5 @ invalid processor (r5=0)?
106 beq __error_p @ yes, error 'p'
107 bl __lookup_machine_type @ r5=machinfo
108 movs r8, r5 @ invalid machine (r5=0)?
109 beq __error_a @ yes, error 'a'
110 bl __create_page_tables
113 * The following calls CPU specific code in a position independent
114 * manner. See arch/arm/mm/proc-*.S for details. r10 = base of
115 * xxx_proc_info structure selected by __lookup_machine_type
116 * above. On return, the CPU will be ready for the MMU to be
117 * turned on, and r0 will hold the CPU control register value.
119 ldr r13, __switch_data @ address to jump to after
120 @ mmu has been enabled
121 adr lr, __enable_mmu @ return (PIC) address
122 add pc, r10, #PROCINFO_INITFUNC
124 .type __switch_data, %object
126 .long __mmap_switched
127 .long __data_loc @ r4
128 .long __data_start @ r5
129 .long __bss_start @ r6
131 .long processor_id @ r4
132 .long __machine_arch_type @ r5
133 .long cr_alignment @ r6
134 .long init_thread_union+8192 @ sp
137 * The following fragment of code is executed with the MMU on, and uses
138 * absolute addresses; this is not position independent.
140 * r0 = cp#15 control register
144 .type __mmap_switched, %function
146 adr r3, __switch_data + 4
148 ldmia r3!, {r4, r5, r6, r7}
149 cmp r4, r5 @ Copy data segment if needed
155 mov fp, #0 @ Clear BSS (and zero fp)
160 ldmia r3, {r4, r5, r6, sp}
161 str r9, [r4] @ Save processor ID
162 str r1, [r5] @ Save machine type
163 bic r4, r0, #CR_A @ Clear 'A' bit
164 stmia r6, {r0, r4} @ Save control register values
170 * Setup common bits before finally enabling the MMU. Essentially
171 * this is just loading the page table pointer and domain access
174 .type __enable_mmu, %function
176 #ifdef CONFIG_ALIGNMENT_TRAP
181 #ifdef CONFIG_CPU_DCACHE_DISABLE
184 #ifdef CONFIG_CPU_BPREDICT_DISABLE
187 #ifdef CONFIG_CPU_ICACHE_DISABLE
190 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
191 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
192 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
193 domain_val(DOMAIN_IO, DOMAIN_CLIENT))
194 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
195 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
199 * Enable the MMU. This completely changes the structure of the visible
200 * memory space. You will not be able to trace execution through this.
201 * If you have an enquiry about this, *please* check the linux-arm-kernel
202 * mailing list archives BEFORE sending another post to the list.
204 * r0 = cp#15 control register
205 * r13 = *virtual* address to jump to upon completion
207 * other registers depend on the function called upon completion
210 .type __turn_mmu_on, %function
213 mcr p15, 0, r0, c1, c0, 0 @ write control reg
214 mrc p15, 0, r3, c0, c0, 0 @ read id reg
222 * Setup the initial page tables. We only setup the barest
223 * amount which are required to get the kernel running, which
224 * generally means mapping in the kernel code.
231 * r0, r3, r5, r6, r7 corrupted
232 * r4 = physical page table address
234 .type __create_page_tables, %function
235 __create_page_tables:
236 ldr r5, [r8, #MACHINFO_PHYSRAM] @ physram
237 pgtbl r4, r5 @ page table address
240 * Clear the 16K level 1 swapper page table
252 ldr r7, [r10, #PROCINFO_MMUFLAGS] @ mmuflags
255 * Create identity mapping for first MB of kernel to
256 * cater for the MMU enable. This identity mapping
257 * will be removed by paging_init(). We use our current program
258 * counter to determine corresponding section base address.
260 mov r6, pc, lsr #20 @ start of kernel section
261 orr r3, r7, r6, lsl #20 @ flags + kernel base
262 str r3, [r4, r6, lsl #2] @ identity mapping
265 * Now setup the pagetables for our kernel direct
266 * mapped region. We round TEXTADDR down to the
267 * nearest megabyte boundary. It is assumed that
268 * the kernel fits within 4 contigous 1MB sections.
270 add r0, r4, #(TEXTADDR & 0xff000000) >> 18 @ start of kernel
271 str r3, [r0, #(TEXTADDR & 0x00f00000) >> 18]!
273 str r3, [r0, #4]! @ KERNEL + 1MB
275 str r3, [r0, #4]! @ KERNEL + 2MB
277 str r3, [r0, #4] @ KERNEL + 3MB
280 * Then map first 1MB of ram in case it contains our boot params.
282 add r0, r4, #VIRT_OFFSET >> 18
286 #ifdef CONFIG_XIP_KERNEL
288 * Map some ram to cover our .data and .bss areas.
289 * Mapping 3MB should be plenty.
293 add r0, r0, r3, lsl #2
294 add r6, r6, r3, lsl #20
296 add r6, r6, #(1 << 20)
298 add r6, r6, #(1 << 20)
302 bic r7, r7, #0x0c @ turn off cacheable
303 @ and bufferable bits
304 #ifdef CONFIG_DEBUG_LL
306 * Map in IO space for serial debugging.
307 * This allows debug messages to be output
308 * via a serial console before paging_init.
310 ldr r3, [r8, #MACHINFO_PGOFFIO]
312 rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long)
313 cmp r3, #0x0800 @ limit to 512MB
316 ldr r3, [r8, #MACHINFO_PHYSIO]
322 #if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
324 * If we're using the NetWinder, we need to map in
325 * the 16550-type serial port for the debug messages
327 teq r1, #MACH_TYPE_NETWINDER
328 teqne r1, #MACH_TYPE_CATS
330 add r0, r4, #0x3fc0 @ ff000000
339 #ifdef CONFIG_ARCH_RPC
341 * Map in screen at 0x02000000 & SCREEN2_BASE
342 * Similar reasons here - for debug. This is
343 * only for Acorn RiscPC architectures.
345 add r0, r4, #0x80 @ 02000000
349 add r0, r4, #0x3600 @ d8000000
358 * Exception handling. Something went wrong and we can't proceed. We
359 * ought to tell the user, but since we don't have any guarantee that
360 * we're even running on the right architecture, we do virtually nothing.
362 * If CONFIG_DEBUG_LL is set we try to print out something about the error
363 * and hope for the best (useful if bootloader fails to pass a proper
364 * machine ID for example).
367 .type __error_p, %function
369 #ifdef CONFIG_DEBUG_LL
373 str_p1: .asciz "\nError: unrecognized/unsupported processor variant.\n"
377 .type __error_a, %function
379 #ifdef CONFIG_DEBUG_LL
380 mov r4, r1 @ preserve machine ID
388 ldmia r3, {r4, r5, r6} @ get machine desc list
389 sub r4, r3, r4 @ get offset between virt&phys
390 add r5, r5, r4 @ convert virt addresses to
391 add r6, r6, r4 @ physical address space
392 1: ldr r0, [r5, #MACHINFO_TYPE] @ get machine type
396 ldr r0, [r5, #MACHINFO_NAME] @ get machine name
401 add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
407 str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x"
408 str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
409 str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n"
413 .type __error, %function
415 #ifdef CONFIG_ARCH_RPC
417 * Turn the screen red on a error - RiscPC only.
421 orr r3, r3, r3, lsl #8
422 orr r3, r3, r3, lsl #16
433 * Read processor ID register (CP#15, CR0), and look up in the linker-built
434 * supported processor list. Note that we can't use the absolute addresses
435 * for the __proc_info lists since we aren't running with the MMU on
436 * (and therefore, we are not in the correct address space). We have to
437 * calculate the offset.
440 * r3, r4, r6 corrupted
441 * r5 = proc_info pointer in physical address space
444 .type __lookup_processor_type, %function
445 __lookup_processor_type:
447 ldmda r3, {r5, r6, r9}
448 sub r3, r3, r9 @ get offset between virt&phys
449 add r5, r5, r3 @ convert virt addresses to
450 add r6, r6, r3 @ physical address space
451 mrc p15, 0, r9, c0, c0 @ get processor id
452 1: ldmia r5, {r3, r4} @ value, mask
453 and r4, r4, r9 @ mask wanted bits
456 add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list)
459 mov r5, #0 @ unknown processor
463 * This provides a C-API version of the above function.
465 ENTRY(lookup_processor_type)
466 stmfd sp!, {r4 - r6, r9, lr}
467 bl __lookup_processor_type
469 ldmfd sp!, {r4 - r6, r9, pc}
472 * Look in include/asm-arm/procinfo.h and arch/arm/kernel/arch.[ch] for
473 * more information about the __proc_info and __arch_info structures.
475 .long __proc_info_begin
476 .long __proc_info_end
478 .long __arch_info_begin
479 .long __arch_info_end
482 * Lookup machine architecture in the linker-build list of architectures.
483 * Note that we can't use the absolute addresses for the __arch_info
484 * lists since we aren't running with the MMU on (and therefore, we are
485 * not in the correct address space). We have to calculate the offset.
487 * r1 = machine architecture number
489 * r3, r4, r6 corrupted
490 * r5 = mach_info pointer in physical address space
492 .type __lookup_machine_type, %function
493 __lookup_machine_type:
495 ldmia r3, {r4, r5, r6}
496 sub r3, r3, r4 @ get offset between virt&phys
497 add r5, r5, r3 @ convert virt addresses to
498 add r6, r6, r3 @ physical address space
499 1: ldr r3, [r5, #MACHINFO_TYPE] @ get machine type
500 teq r3, r1 @ matches loader number?
502 add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
505 mov r5, #0 @ unknown machine
509 * This provides a C-API version of the above function.
511 ENTRY(lookup_machine_type)
512 stmfd sp!, {r4 - r6, lr}
514 bl __lookup_machine_type
516 ldmfd sp!, {r4 - r6, pc}