1 /* SPDX-License-Identifier: GPL-2.0-or-later */
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
14 * This file contains the low-level support and setup for the
15 * PowerPC platform, including trap and interrupt dispatch.
16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
19 #include <linux/init.h>
20 #include <linux/pgtable.h>
24 #include <asm/cputable.h>
25 #include <asm/cache.h>
26 #include <asm/thread_info.h>
27 #include <asm/ppc_asm.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/ptrace.h>
31 #include <asm/kvm_book3s_asm.h>
32 #include <asm/export.h>
33 #include <asm/feature-fixups.h>
37 #define LOAD_BAT(n, reg, RA, RB) \
38 /* see the comment for clear_bats() -- Cort */ \
40 mtspr SPRN_IBAT##n##U,RA; \
41 mtspr SPRN_DBAT##n##U,RA; \
42 lwz RA,(n*16)+0(reg); \
43 lwz RB,(n*16)+4(reg); \
44 mtspr SPRN_IBAT##n##U,RA; \
45 mtspr SPRN_IBAT##n##L,RB; \
46 lwz RA,(n*16)+8(reg); \
47 lwz RB,(n*16)+12(reg); \
48 mtspr SPRN_DBAT##n##U,RA; \
49 mtspr SPRN_DBAT##n##L,RB
52 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
53 .stabs "head_book3s_32.S",N_SO,0,0,0f
58 * _start is defined this way because the XCOFF loader in the OpenFirmware
59 * on the powermac expects the entry point to be a procedure descriptor.
63 * These are here for legacy reasons, the kernel used to
64 * need to look like a coff function entry for the pmac
65 * but we're always started by some kind of bootloader now.
68 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
69 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
73 * Enter here with the kernel text, data and bss loaded starting at
74 * 0, running with virtual == physical mapping.
75 * r5 points to the prom entry point (the client interface handler
76 * address). Address translation is turned on, with the prom
77 * managing the hash table. Interrupts are disabled. The stack
78 * pointer (r1) points to just below the end of the half-meg region
79 * from 0x380000 - 0x400000, which is mapped in already.
81 * If we are booted from MacOS via BootX, we enter with the kernel
82 * image loaded somewhere, and the following values in registers:
83 * r3: 'BooX' (0x426f6f58)
84 * r4: virtual address of boot_infos_t
88 * This is jumped to on prep systems right after the kernel is relocated
89 * to its proper place in memory by the boot loader. The expected layout
91 * r3: ptr to residual data
92 * r4: initrd_start or if no initrd then 0
93 * r5: initrd_end - unused if r4 is 0
94 * r6: Start of command line string
95 * r7: End of command line string
97 * This just gets a minimal mmu environment setup so we can call
98 * start_here() to do the real work.
105 * We have to do any OF calls before we map ourselves to KERNELBASE,
106 * because OF may have I/O devices mapped into that area
107 * (particularly on CHRP).
112 #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
113 /* find out where we are now */
115 0: mflr r8 /* r8 = runtime addr here */
116 addis r8,r8,(_stext - 0b)@ha
117 addi r8,r8,(_stext - 0b)@l /* current runtime base addr */
119 #endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
121 /* We never return. We also hit that trap if trying to boot
122 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
126 * Check for BootX signature when supporting PowerMac and branch to
127 * appropriate trampoline if it's present
129 #ifdef CONFIG_PPC_PMAC
136 #endif /* CONFIG_PPC_PMAC */
138 1: mr r31,r3 /* save device tree ptr */
142 * early_init() does the early machine identification and does
143 * the necessary low-level setup and clears the BSS
144 * -- Cort <cort@fsmlabs.com>
148 /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
149 * the physical address we are running at, returned by early_init()
157 bl load_segment_registers
158 BEGIN_MMU_FTR_SECTION
161 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
162 #if defined(CONFIG_BOOTX_TEXT)
165 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
168 #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
169 bl setup_usbgecko_bat
173 * Call setup_cpu for CPU 0 and initialize 6xx Idle
177 bl call_setup_cpu /* Call setup_cpu for this CPU */
183 * We need to run with _start at physical address 0.
184 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
185 * the exception vectors at 0 (and therefore this copy
186 * overwrites OF's exception vectors with our own).
187 * The MMU is off at this point.
191 addis r4,r3,KERNELBASE@h /* current address of _start */
192 lis r5,PHYSICAL_START@h
193 cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
196 * we now have the 1st 16M of ram mapped with the bats.
197 * prep needs the mmu to be turned on here, but pmac already has it on.
198 * this shouldn't bother the pmac since it just gets turned on again
199 * as we jump to our code at KERNELBASE. -- Cort
200 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
201 * off, and in other cases, we now turn it off before changing BATs above.
205 ori r0,r0,MSR_DR|MSR_IR|MSR_RI
208 ori r0,r0,start_here@l
210 RFI /* enables MMU */
213 * We need __secondary_hold as a place to hold the other cpus on
214 * an SMP machine, even when we are running a UP kernel.
216 . = 0xc0 /* for prep bootloader */
217 li r3,1 /* MTX only has 1 cpu */
218 .globl __secondary_hold
220 /* tell the master we're here */
221 stw r3,__secondary_hold_acknowledge@l(0)
224 /* wait until we're told to start */
227 /* our cpu # was at addr 0 - go */
228 mr r24,r3 /* cpu # */
232 #endif /* CONFIG_SMP */
234 .globl __secondary_hold_spinloop
235 __secondary_hold_spinloop:
237 .globl __secondary_hold_acknowledge
238 __secondary_hold_acknowledge:
242 /* core99 pmac starts the seconary here by changing the vector, and
243 putting it back to what it was (unknown_exception) when done. */
244 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
248 * On CHRP, this is complicated by the fact that we could get a
249 * machine check inside RTAS, and we have no guarantee that certain
250 * critical registers will have the values we expect. The set of
251 * registers that might have bad values includes all the GPRs
252 * and all the BATs. We indicate that we are in RTAS by putting
253 * a non-zero value, the address of the exception frame to use,
254 * in thread.rtas_sp. The machine check handler checks thread.rtas_sp
255 * and uses its value if it is non-zero.
256 * (Other exception handlers assume that r1 is a valid kernel stack
257 * pointer when we take an exception from supervisor mode.)
264 #ifdef CONFIG_PPC_CHRP
265 mfspr r11, SPRN_SPRG_THREAD
266 lwz r11, RTAS_SP(r11)
269 #endif /* CONFIG_PPC_CHRP */
270 EXCEPTION_PROLOG_1 for_rtas=1
271 7: EXCEPTION_PROLOG_2
272 addi r3,r1,STACK_FRAME_OVERHEAD
273 #ifdef CONFIG_PPC_CHRP
274 #ifdef CONFIG_VMAP_STACK
275 mfspr r4, SPRN_SPRG_THREAD
280 beq cr1, machine_check_tramp
283 b machine_check_tramp
286 /* Data access exception. */
290 #ifdef CONFIG_VMAP_STACK
291 mtspr SPRN_SPRG_SCRATCH0,r10
292 mfspr r10, SPRN_SPRG_THREAD
293 BEGIN_MMU_FTR_SECTION
295 mfspr r10, SPRN_DSISR
297 #ifdef CONFIG_PPC_KUAP
298 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
300 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
302 mfspr r10, SPRN_SPRG_THREAD
304 .Lhash_page_dsi_cont:
307 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
308 mtspr SPRN_SPRG_SCRATCH1,r11
311 mfspr r11, SPRN_DSISR
315 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
318 andi. r11, r11, MSR_PR
321 b handle_page_fault_tramp_1
322 #else /* CONFIG_VMAP_STACK */
323 EXCEPTION_PROLOG handle_dar_dsisr=1
324 get_and_save_dar_dsisr_on_stack r4, r5, r11
325 BEGIN_MMU_FTR_SECTION
326 #ifdef CONFIG_PPC_KUAP
327 andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
329 andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
331 bne handle_page_fault_tramp_2 /* if not, try to put a PTE */
332 rlwinm r3, r5, 32 - 15, 21, 21 /* DSISR_STORE -> _PAGE_RW */
334 b handle_page_fault_tramp_1
336 b handle_page_fault_tramp_2
337 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
338 #endif /* CONFIG_VMAP_STACK */
340 /* Instruction access exception. */
344 #ifdef CONFIG_VMAP_STACK
345 mtspr SPRN_SPRG_SCRATCH0,r10
346 mtspr SPRN_SPRG_SCRATCH1,r11
347 mfspr r10, SPRN_SPRG_THREAD
350 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
353 BEGIN_MMU_FTR_SECTION
354 andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */
356 .Lhash_page_isi_cont:
357 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
358 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
359 andi. r11, r11, MSR_PR
363 #else /* CONFIG_VMAP_STACK */
365 andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */
366 beq 1f /* if so, try to put a PTE */
367 li r3,0 /* into the hash table */
368 mr r4,r12 /* SRR0 is fault address */
369 BEGIN_MMU_FTR_SECTION
371 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
372 #endif /* CONFIG_VMAP_STACK */
374 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
376 EXC_XFER_LITE(0x400, handle_page_fault)
378 /* External interrupt */
379 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
381 /* Alignment exception */
385 EXCEPTION_PROLOG handle_dar_dsisr=1
386 save_dar_dsisr_on_stack r4, r5, r11
387 addi r3,r1,STACK_FRAME_OVERHEAD
388 b alignment_exception_tramp
390 /* Program check exception */
391 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
393 /* Floating-point unavailable */
399 * Certain Freescale cores don't have a FPU and treat fp instructions
400 * as a FP Unavailable exception. Redirect to illegal/emulation handling.
403 END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
406 bl load_up_fpu /* if from user, just load it up */
407 b fast_exception_return
408 1: addi r3,r1,STACK_FRAME_OVERHEAD
409 EXC_XFER_LITE(0x800, kernel_fp_unavailable_exception)
412 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
414 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD)
415 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD)
423 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
424 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD)
427 * The Altivec unavailable trap is at 0x0f20. Foo.
428 * We effectively remap it to 0x3000.
429 * We include an altivec unavailable exception vector even if
430 * not configured for Altivec, so that you can't panic a
431 * non-altivec kernel running on a machine with altivec just
432 * by executing an altivec instruction.
443 * Handle TLB miss for instruction on 603/603e.
444 * Note: we get an alternate set of r0 - r3 to use automatically.
450 * r1: linux style pte ( later becomes ppc hardware pte )
451 * r2: ptr to linux-style pte
454 /* Get PTE (linux-style) and check access */
456 #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
457 lis r1, TASK_SIZE@h /* check if kernel address */
460 mfspr r2, SPRN_SPRG_PGDIR
461 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
462 #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
464 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
465 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
467 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
468 lwz r2,0(r2) /* get pmd entry */
469 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
470 beq- InstructionAddressInvalid /* return if no mapping */
471 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
472 lwz r0,0(r2) /* get linux-style pte */
473 andc. r1,r1,r0 /* check access & ~permission */
474 bne- InstructionAddressInvalid /* return if access not permitted */
475 /* Convert linux-style PTE to low word of PPC-style PTE */
476 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
477 ori r1, r1, 0xe06 /* clear out reserved bits */
478 andc r1, r0, r1 /* PP = user? 1 : 0 */
480 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
481 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
484 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
487 InstructionAddressInvalid:
489 rlwinm r1,r3,9,6,6 /* Get load/store bit */
492 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
493 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
496 mfspr r1,SPRN_IMISS /* Get failing address */
497 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
498 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
500 mtspr SPRN_DAR,r1 /* Set fault address */
501 mfmsr r0 /* Restore "normal" registers */
502 xoris r0,r0,MSR_TGPR>>16
503 mtcrf 0x80,r3 /* Restore CR0 */
508 * Handle TLB miss for DATA Load operation on 603/603e
514 * r1: linux style pte ( later becomes ppc hardware pte )
515 * r2: ptr to linux-style pte
518 /* Get PTE (linux-style) and check access */
520 lis r1, TASK_SIZE@h /* check if kernel address */
522 mfspr r2, SPRN_SPRG_PGDIR
523 li r1, _PAGE_PRESENT | _PAGE_ACCESSED
525 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
526 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
527 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
528 lwz r2,0(r2) /* get pmd entry */
529 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
530 beq- DataAddressInvalid /* return if no mapping */
531 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
532 lwz r0,0(r2) /* get linux-style pte */
533 andc. r1,r1,r0 /* check access & ~permission */
534 bne- DataAddressInvalid /* return if access not permitted */
536 * NOTE! We are assuming this is not an SMP system, otherwise
537 * we would need to update the pte atomically with lwarx/stwcx.
539 /* Convert linux-style PTE to low word of PPC-style PTE */
540 rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */
541 rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
542 rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
543 ori r1,r1,0xe04 /* clear out reserved bits */
544 andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
546 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
547 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
549 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
551 BEGIN_MMU_FTR_SECTION
553 mfspr r1,SPRN_SPRG_603_LRU
554 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
558 mtspr SPRN_SPRG_603_LRU,r1
560 rlwimi r2,r0,31-14,14,14
562 END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
567 rlwinm r1,r3,9,6,6 /* Get load/store bit */
570 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
572 mfspr r1,SPRN_DMISS /* Get failing address */
573 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
574 beq 20f /* Jump if big endian */
576 20: mtspr SPRN_DAR,r1 /* Set fault address */
577 mfmsr r0 /* Restore "normal" registers */
578 xoris r0,r0,MSR_TGPR>>16
579 mtcrf 0x80,r3 /* Restore CR0 */
584 * Handle TLB miss for DATA Store on 603/603e
590 * r1: linux style pte ( later becomes ppc hardware pte )
591 * r2: ptr to linux-style pte
594 /* Get PTE (linux-style) and check access */
596 lis r1, TASK_SIZE@h /* check if kernel address */
598 mfspr r2, SPRN_SPRG_PGDIR
599 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
601 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
602 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
603 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
604 lwz r2,0(r2) /* get pmd entry */
605 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
606 beq- DataAddressInvalid /* return if no mapping */
607 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
608 lwz r0,0(r2) /* get linux-style pte */
609 andc. r1,r1,r0 /* check access & ~permission */
610 bne- DataAddressInvalid /* return if access not permitted */
612 * NOTE! We are assuming this is not an SMP system, otherwise
613 * we would need to update the pte atomically with lwarx/stwcx.
615 /* Convert linux-style PTE to low word of PPC-style PTE */
616 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
617 li r1,0xe06 /* clear out reserved bits & PP msb */
618 andc r1,r0,r1 /* PP = user? 1: 0 */
620 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
621 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
623 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
625 BEGIN_MMU_FTR_SECTION
627 mfspr r1,SPRN_SPRG_603_LRU
628 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
632 mtspr SPRN_SPRG_603_LRU,r1
634 rlwimi r2,r0,31-14,14,14
636 END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
640 #ifndef CONFIG_ALTIVEC
641 #define altivec_assist_exception unknown_exception
644 #ifndef CONFIG_TAU_INT
645 #define TAUException unknown_exception
648 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD)
649 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_STD)
650 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
651 EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_STD)
652 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
653 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
654 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
655 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD)
656 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD)
657 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_STD)
658 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
659 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
660 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
661 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_STD)
662 EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_STD)
663 EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_STD)
664 EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_STD)
665 EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_STD)
666 EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_STD)
667 EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_STD)
668 EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_STD)
669 EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_STD)
670 EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_STD)
671 EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_STD)
672 EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_STD)
673 EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_STD)
674 EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_STD)
675 EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_STD)
676 EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_STD)
681 EXC_XFER_STD(0x200, machine_check_exception)
683 alignment_exception_tramp:
684 EXC_XFER_STD(0x600, alignment_exception)
686 handle_page_fault_tramp_1:
687 #ifdef CONFIG_VMAP_STACK
688 EXCEPTION_PROLOG_2 handle_dar_dsisr=1
693 handle_page_fault_tramp_2:
694 EXC_XFER_LITE(0x300, handle_page_fault)
696 #ifdef CONFIG_VMAP_STACK
697 .macro save_regs_thread thread
698 stw r0, THR0(\thread)
699 stw r3, THR3(\thread)
700 stw r4, THR4(\thread)
701 stw r5, THR5(\thread)
702 stw r6, THR6(\thread)
703 stw r8, THR8(\thread)
704 stw r9, THR9(\thread)
706 stw r0, THLR(\thread)
708 stw r0, THCTR(\thread)
711 .macro restore_regs_thread thread
712 lwz r0, THLR(\thread)
714 lwz r0, THCTR(\thread)
716 lwz r0, THR0(\thread)
717 lwz r3, THR3(\thread)
718 lwz r4, THR4(\thread)
719 lwz r5, THR5(\thread)
720 lwz r6, THR6(\thread)
721 lwz r8, THR8(\thread)
722 lwz r9, THR9(\thread)
731 rlwinm r3, r3, 32 - 15, _PAGE_RW /* DSISR_STORE -> _PAGE_RW */
733 mfspr r10, SPRN_SPRG_THREAD
734 restore_regs_thread r10
735 b .Lhash_page_dsi_cont
739 mfspr r10, SPRN_SPRG_THREAD
745 mfspr r10, SPRN_SPRG_THREAD
746 restore_regs_thread r10
748 b .Lhash_page_isi_cont
750 .globl fast_hash_page_return
751 fast_hash_page_return:
752 andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */
753 mfspr r10, SPRN_SPRG_THREAD
754 restore_regs_thread r10
760 mfspr r10, SPRN_SPRG_SCRATCH0
765 mfspr r11, SPRN_SPRG_SCRATCH1
766 mfspr r10, SPRN_SPRG_SCRATCH0
770 vmap_stack_overflow_exception
775 #ifdef CONFIG_ALTIVEC
777 bl load_up_altivec /* if from user, just load it up */
778 b fast_exception_return
779 #endif /* CONFIG_ALTIVEC */
780 1: addi r3,r1,STACK_FRAME_OVERHEAD
781 EXC_XFER_LITE(0xf20, altivec_unavailable_exception)
785 addi r3,r1,STACK_FRAME_OVERHEAD
786 EXC_XFER_STD(0xf00, performance_monitor_exception)
790 * This code is jumped to from the startup code to copy
791 * the kernel image to physical address PHYSICAL_START.
794 addis r9,r26,klimit@ha /* fetch klimit */
796 addis r25,r25,-KERNELBASE@h
797 lis r3,PHYSICAL_START@h /* Destination base address */
798 li r6,0 /* Destination offset */
799 li r5,0x4000 /* # bytes of memory to copy */
800 bl copy_and_flush /* copy the first 0x4000 bytes */
801 addi r0,r3,4f@l /* jump to the address of 4f */
802 mtctr r0 /* in copy and do the rest. */
803 bctr /* jump to the copy */
805 bl copy_and_flush /* copy the rest */
809 * Copy routine used to copy the kernel to start at physical address 0
810 * and flush and invalidate the caches as needed.
811 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
812 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
814 _ENTRY(copy_and_flush)
817 4: li r0,L1_CACHE_BYTES/4
819 3: addi r6,r6,4 /* copy a cache line */
823 dcbst r6,r3 /* write it to memory */
825 icbi r6,r3 /* flush the icache line */
828 sync /* additional sync needed on g4 */
835 .globl __secondary_start_mpc86xx
836 __secondary_start_mpc86xx:
838 stw r3, __secondary_hold_acknowledge@l(0)
839 mr r24, r3 /* cpu # */
842 .globl __secondary_start_pmac_0
843 __secondary_start_pmac_0:
844 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
853 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
854 set to map the 0xf0000000 - 0xffffffff region */
856 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
860 .globl __secondary_start
862 /* Copy some CPU settings from CPU 0 */
863 bl __restore_cpu_setup
867 bl call_setup_cpu /* Call setup_cpu for this CPU */
871 /* get current's stack and current */
872 lis r2,secondary_current@ha
874 lwz r2,secondary_current@l(r2)
876 lwz r1,TASK_STACK(r1)
879 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
884 /* load up the MMU */
885 bl load_segment_registers
888 /* ptr to phys current thread */
890 addi r4,r4,THREAD /* phys address of our thread_struct */
891 mtspr SPRN_SPRG_THREAD,r4
892 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
893 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
894 mtspr SPRN_SPRG_PGDIR, r4
896 /* enable MMU and jump to start_secondary */
898 lis r3,start_secondary@h
899 ori r3,r3,start_secondary@l
903 #endif /* CONFIG_SMP */
905 #ifdef CONFIG_KVM_BOOK3S_HANDLER
906 #include "../kvm/book3s_rmhandlers.S"
910 * Load stuff into the MMU. Intended to be called with
914 sync /* Force all PTE updates to finish */
916 tlbia /* Clear all TLB entries */
917 sync /* wait for tlbia/tlbie to finish */
918 TLBSYNC /* ... on all CPUs */
919 /* Load the SDR1 register (hash table base & size) */
920 lis r6, early_hash - PAGE_OFFSET@h
921 ori r6, r6, 3 /* 256kB table */
924 addis r3, r3, Hash@ha
929 sync /* Force all PTE updates to finish */
931 tlbia /* Clear all TLB entries */
932 sync /* wait for tlbia/tlbie to finish */
933 TLBSYNC /* ... on all CPUs */
934 /* Load the SDR1 register (hash table base & size) */
940 /* Load the BAT registers with the values set up by MMU_init. */
948 BEGIN_MMU_FTR_SECTION
953 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
956 _GLOBAL(load_segment_registers)
957 li r0, NUM_USER_SEGMENTS /* load up user segment register values */
958 mtctr r0 /* for context 0 */
959 li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
960 #ifdef CONFIG_PPC_KUEP
961 oris r3, r3, SR_NX@h /* Set Nx */
963 #ifdef CONFIG_PPC_KUAP
964 oris r3, r3, SR_KS@h /* Set Ks */
968 addi r3, r3, 0x111 /* increment VSID */
969 addis r4, r4, 0x1000 /* address of next segment */
971 li r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
972 mtctr r0 /* for context 0 */
973 rlwinm r3, r3, 0, ~SR_NX /* Nx = 0 */
974 rlwinm r3, r3, 0, ~SR_KS /* Ks = 0 */
975 oris r3, r3, SR_KP@h /* Kp = 1 */
977 addi r3, r3, 0x111 /* increment VSID */
978 addis r4, r4, 0x1000 /* address of next segment */
983 * This is where the main kernel code starts.
988 ori r2,r2,init_task@l
989 /* Set up for using our exception vectors */
990 /* ptr to phys current thread */
992 addi r4,r4,THREAD /* init task's THREAD */
993 mtspr SPRN_SPRG_THREAD,r4
994 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
995 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
996 mtspr SPRN_SPRG_PGDIR, r4
999 lis r1,init_thread_union@ha
1000 addi r1,r1,init_thread_union@l
1002 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
1004 * Do early platform-specific initialization,
1005 * and set up the MMU.
1015 bl MMU_init_hw_patch
1018 * Go back to running unmapped so we can load up new values
1019 * for SDR1 (hash table pointer) and the segment registers
1020 * and change to using our exception vectors.
1025 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1031 /* Load up the kernel context */
1034 #ifdef CONFIG_BDI_SWITCH
1035 /* Add helper information for the Abatron bdiGDB debugger.
1036 * We do this here because we know the mmu is disabled, and
1037 * will be enabled for real in just a few instructions.
1039 lis r5, abatron_pteptrs@h
1040 ori r5, r5, abatron_pteptrs@l
1041 stw r5, 0xf0(0) /* This much match your Abatron config */
1042 lis r6, swapper_pg_dir@h
1043 ori r6, r6, swapper_pg_dir@l
1046 #endif /* CONFIG_BDI_SWITCH */
1048 /* Now turn on the MMU for real! */
1050 lis r3,start_kernel@h
1051 ori r3,r3,start_kernel@l
1057 * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
1059 * Set up the segment registers for a new context.
1061 _ENTRY(switch_mmu_context)
1062 lwz r3,MMCONTEXTID(r4)
1065 mulli r3,r3,897 /* multiply context by skew factor */
1066 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
1067 #ifdef CONFIG_PPC_KUEP
1068 oris r3, r3, SR_NX@h /* Set Nx */
1070 #ifdef CONFIG_PPC_KUAP
1071 oris r3, r3, SR_KS@h /* Set Ks */
1073 li r0,NUM_USER_SEGMENTS
1077 #ifdef CONFIG_BDI_SWITCH
1078 /* Context switch the PTE pointer for the Abatron BDI2000.
1079 * The PGDIR is passed as second argument.
1081 lis r5, abatron_pteptrs@ha
1082 stw r4, abatron_pteptrs@l + 0x4(r5)
1085 mtspr SPRN_SPRG_PGDIR, r4
1090 addi r3,r3,0x111 /* next VSID */
1091 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
1092 addis r4,r4,0x1000 /* address of next segment */
1098 EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
1100 EXPORT_SYMBOL(switch_mmu_context)
1103 * An undocumented "feature" of 604e requires that the v bit
1104 * be cleared before changing BAT values.
1106 * Also, newer IBM firmware does not clear bat3 and 4 so
1107 * this makes sure it's done.
1113 mtspr SPRN_DBAT0U,r10
1114 mtspr SPRN_DBAT0L,r10
1115 mtspr SPRN_DBAT1U,r10
1116 mtspr SPRN_DBAT1L,r10
1117 mtspr SPRN_DBAT2U,r10
1118 mtspr SPRN_DBAT2L,r10
1119 mtspr SPRN_DBAT3U,r10
1120 mtspr SPRN_DBAT3L,r10
1121 mtspr SPRN_IBAT0U,r10
1122 mtspr SPRN_IBAT0L,r10
1123 mtspr SPRN_IBAT1U,r10
1124 mtspr SPRN_IBAT1L,r10
1125 mtspr SPRN_IBAT2U,r10
1126 mtspr SPRN_IBAT2L,r10
1127 mtspr SPRN_IBAT3U,r10
1128 mtspr SPRN_IBAT3L,r10
1129 BEGIN_MMU_FTR_SECTION
1130 /* Here's a tweak: at this point, CPU setup have
1131 * not been called yet, so HIGH_BAT_EN may not be
1132 * set in HID0 for the 745x processors. However, it
1133 * seems that doesn't affect our ability to actually
1134 * write to these SPRs.
1136 mtspr SPRN_DBAT4U,r10
1137 mtspr SPRN_DBAT4L,r10
1138 mtspr SPRN_DBAT5U,r10
1139 mtspr SPRN_DBAT5L,r10
1140 mtspr SPRN_DBAT6U,r10
1141 mtspr SPRN_DBAT6L,r10
1142 mtspr SPRN_DBAT7U,r10
1143 mtspr SPRN_DBAT7L,r10
1144 mtspr SPRN_IBAT4U,r10
1145 mtspr SPRN_IBAT4L,r10
1146 mtspr SPRN_IBAT5U,r10
1147 mtspr SPRN_IBAT5L,r10
1148 mtspr SPRN_IBAT6U,r10
1149 mtspr SPRN_IBAT6L,r10
1150 mtspr SPRN_IBAT7U,r10
1151 mtspr SPRN_IBAT7L,r10
1152 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1161 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1162 rlwinm r0, r6, 0, ~MSR_RI
1163 rlwinm r0, r0, 0, ~MSR_EE
1174 LOAD_BAT(0, r3, r4, r5)
1175 LOAD_BAT(1, r3, r4, r5)
1176 LOAD_BAT(2, r3, r4, r5)
1177 LOAD_BAT(3, r3, r4, r5)
1178 BEGIN_MMU_FTR_SECTION
1179 LOAD_BAT(4, r3, r4, r5)
1180 LOAD_BAT(5, r3, r4, r5)
1181 LOAD_BAT(6, r3, r4, r5)
1182 LOAD_BAT(7, r3, r4, r5)
1183 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1184 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1192 1: addic. r10, r10, -0x1000
1199 addi r4, r3, __after_mmu_off - _start
1201 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1211 /* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
1213 lis r11,PAGE_OFFSET@h
1216 ori r8,r8,0x12 /* R/W access, M=1 */
1218 ori r8,r8,2 /* R/W access */
1219 #endif /* CONFIG_SMP */
1220 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1222 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx have valid */
1223 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1224 mtspr SPRN_IBAT0L,r8
1225 mtspr SPRN_IBAT0U,r11
1229 #ifdef CONFIG_BOOTX_TEXT
1232 * setup the display bat prepared for us in prom.c
1237 addis r8,r3,disp_BAT@ha
1238 addi r8,r8,disp_BAT@l
1243 mtspr SPRN_DBAT3L,r8
1244 mtspr SPRN_DBAT3U,r11
1246 #endif /* CONFIG_BOOTX_TEXT */
1248 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1252 mtspr SPRN_DBAT1L, r8
1255 ori r11, r11, (BL_1M << 2) | 2
1256 mtspr SPRN_DBAT1U, r11
1261 #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1263 /* prepare a BAT for early io */
1264 #if defined(CONFIG_GAMECUBE)
1266 #elif defined(CONFIG_WII)
1269 #error Invalid platform for USB Gecko based early debugging.
1272 * The virtual address used must match the virtual address
1273 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1275 lis r11, 0xfffe /* top 128K */
1276 ori r8, r8, 0x002a /* uncached, guarded ,rw */
1277 ori r11, r11, 0x2 /* 128K, Vs=1, Vp=0 */
1278 mtspr SPRN_DBAT1L, r8
1279 mtspr SPRN_DBAT1U, r11
1284 /* Jump into the system reset for the rom.
1285 * We first disable the MMU, and then jump to the ROM reset address.
1287 * r3 is the board info structure, r4 is the location for starting.
1288 * I use this for building a small kernel that can load other kernels,
1289 * rather than trying to write or rely on a rom monitor that can tftp load.
1294 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
1298 mfspr r11, SPRN_HID0
1300 ori r10,r10,HID0_ICE|HID0_DCE
1302 mtspr SPRN_HID0, r11
1304 li r5, MSR_ME|MSR_RI
1306 addis r6,r6,-KERNELBASE@h
1320 * We put a few things here that have to be page-aligned.
1321 * This stuff goes at the beginning of the data segment,
1322 * which is page-aligned.
1327 .globl empty_zero_page
1330 EXPORT_SYMBOL(empty_zero_page)
1332 .globl swapper_pg_dir
1334 .space PGD_TABLE_SIZE
1336 /* Room for two PTE pointers, usually the kernel and current user pointers
1337 * to their respective root page table.