1 /* SPDX-License-Identifier: GPL-2.0-or-later */
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
14 * This file contains the low-level support and setup for the
15 * PowerPC platform, including trap and interrupt dispatch.
16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
19 #include <linux/init.h>
20 #include <linux/pgtable.h>
24 #include <asm/cputable.h>
25 #include <asm/cache.h>
26 #include <asm/thread_info.h>
27 #include <asm/ppc_asm.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/ptrace.h>
31 #include <asm/kvm_book3s_asm.h>
32 #include <asm/export.h>
33 #include <asm/feature-fixups.h>
34 #include <asm/interrupt.h>
38 #define LOAD_BAT(n, reg, RA, RB) \
39 /* see the comment for clear_bats() -- Cort */ \
41 mtspr SPRN_IBAT##n##U,RA; \
42 mtspr SPRN_DBAT##n##U,RA; \
43 lwz RA,(n*16)+0(reg); \
44 lwz RB,(n*16)+4(reg); \
45 mtspr SPRN_IBAT##n##U,RA; \
46 mtspr SPRN_IBAT##n##L,RB; \
47 lwz RA,(n*16)+8(reg); \
48 lwz RB,(n*16)+12(reg); \
49 mtspr SPRN_DBAT##n##U,RA; \
50 mtspr SPRN_DBAT##n##L,RB
56 * _start is defined this way because the XCOFF loader in the OpenFirmware
57 * on the powermac expects the entry point to be a procedure descriptor.
61 * These are here for legacy reasons, the kernel used to
62 * need to look like a coff function entry for the pmac
63 * but we're always started by some kind of bootloader now.
66 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
67 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
71 * Enter here with the kernel text, data and bss loaded starting at
72 * 0, running with virtual == physical mapping.
73 * r5 points to the prom entry point (the client interface handler
74 * address). Address translation is turned on, with the prom
75 * managing the hash table. Interrupts are disabled. The stack
76 * pointer (r1) points to just below the end of the half-meg region
77 * from 0x380000 - 0x400000, which is mapped in already.
79 * If we are booted from MacOS via BootX, we enter with the kernel
80 * image loaded somewhere, and the following values in registers:
81 * r3: 'BooX' (0x426f6f58)
82 * r4: virtual address of boot_infos_t
86 * This is jumped to on prep systems right after the kernel is relocated
87 * to its proper place in memory by the boot loader. The expected layout
89 * r3: ptr to residual data
90 * r4: initrd_start or if no initrd then 0
91 * r5: initrd_end - unused if r4 is 0
92 * r6: Start of command line string
93 * r7: End of command line string
95 * This just gets a minimal mmu environment setup so we can call
96 * start_here() to do the real work.
103 * We have to do any OF calls before we map ourselves to KERNELBASE,
104 * because OF may have I/O devices mapped into that area
105 * (particularly on CHRP).
110 #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
111 /* find out where we are now */
113 0: mflr r8 /* r8 = runtime addr here */
114 addis r8,r8,(_stext - 0b)@ha
115 addi r8,r8,(_stext - 0b)@l /* current runtime base addr */
117 #endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
119 /* We never return. We also hit that trap if trying to boot
120 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
124 * Check for BootX signature when supporting PowerMac and branch to
125 * appropriate trampoline if it's present
127 #ifdef CONFIG_PPC_PMAC
134 #endif /* CONFIG_PPC_PMAC */
136 1: mr r31,r3 /* save device tree ptr */
140 * early_init() does the early machine identification and does
141 * the necessary low-level setup and clears the BSS
142 * -- Cort <cort@fsmlabs.com>
146 /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
147 * the physical address we are running at, returned by early_init()
155 bl load_segment_registers
158 #if defined(CONFIG_BOOTX_TEXT)
161 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
164 #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
165 bl setup_usbgecko_bat
169 * Call setup_cpu for CPU 0 and initialize 6xx Idle
173 bl call_setup_cpu /* Call setup_cpu for this CPU */
179 * We need to run with _start at physical address 0.
180 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
181 * the exception vectors at 0 (and therefore this copy
182 * overwrites OF's exception vectors with our own).
183 * The MMU is off at this point.
187 addis r4,r3,KERNELBASE@h /* current address of _start */
188 lis r5,PHYSICAL_START@h
189 cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
192 * we now have the 1st 16M of ram mapped with the bats.
193 * prep needs the mmu to be turned on here, but pmac already has it on.
194 * this shouldn't bother the pmac since it just gets turned on again
195 * as we jump to our code at KERNELBASE. -- Cort
196 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
197 * off, and in other cases, we now turn it off before changing BATs above.
201 ori r0,r0,MSR_DR|MSR_IR|MSR_RI
204 ori r0,r0,start_here@l
206 rfi /* enables MMU */
209 * We need __secondary_hold as a place to hold the other cpus on
210 * an SMP machine, even when we are running a UP kernel.
212 . = 0xc0 /* for prep bootloader */
213 li r3,1 /* MTX only has 1 cpu */
214 .globl __secondary_hold
216 /* tell the master we're here */
217 stw r3,__secondary_hold_acknowledge@l(0)
220 /* wait until we're told to start */
223 /* our cpu # was at addr 0 - go */
224 mr r24,r3 /* cpu # */
228 #endif /* CONFIG_SMP */
230 .globl __secondary_hold_spinloop
231 __secondary_hold_spinloop:
233 .globl __secondary_hold_acknowledge
234 __secondary_hold_acknowledge:
238 /* core99 pmac starts the seconary here by changing the vector, and
239 putting it back to what it was (unknown_async_exception) when done. */
240 EXCEPTION(INTERRUPT_SYSTEM_RESET, Reset, unknown_async_exception)
244 * On CHRP, this is complicated by the fact that we could get a
245 * machine check inside RTAS, and we have no guarantee that certain
246 * critical registers will have the values we expect. The set of
247 * registers that might have bad values includes all the GPRs
248 * and all the BATs. We indicate that we are in RTAS by putting
249 * a non-zero value, the address of the exception frame to use,
250 * in thread.rtas_sp. The machine check handler checks thread.rtas_sp
251 * and uses its value if it is non-zero.
252 * (Other exception handlers assume that r1 is a valid kernel stack
253 * pointer when we take an exception from supervisor mode.)
256 START_EXCEPTION(INTERRUPT_MACHINE_CHECK, MachineCheck)
258 #ifdef CONFIG_PPC_CHRP
259 mtspr SPRN_SPRG_SCRATCH2,r1
260 mfspr r1, SPRN_SPRG_THREAD
264 mfspr r1, SPRN_SPRG_SCRATCH2
265 #endif /* CONFIG_PPC_CHRP */
267 7: EXCEPTION_PROLOG_2 0x200 MachineCheck
268 #ifdef CONFIG_PPC_CHRP
272 1: prepare_transfer_to_handler
273 bl machine_check_exception
276 /* Data access exception. */
277 START_EXCEPTION(INTERRUPT_DATA_STORAGE, DataAccess)
278 #ifdef CONFIG_PPC_BOOK3S_604
279 BEGIN_MMU_FTR_SECTION
280 mtspr SPRN_SPRG_SCRATCH2,r10
281 mfspr r10, SPRN_SPRG_THREAD
283 mfspr r10, SPRN_DSISR
285 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
286 mfspr r10, SPRN_SPRG_THREAD
288 .Lhash_page_dsi_cont:
291 mfspr r10, SPRN_SPRG_SCRATCH2
294 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
296 1: EXCEPTION_PROLOG_0 handle_dar_dsisr=1
298 EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
299 prepare_transfer_to_handler
301 andis. r0, r5, DSISR_DABRMATCH@h
310 /* Instruction access exception. */
311 START_EXCEPTION(INTERRUPT_INST_STORAGE, InstructionAccess)
312 mtspr SPRN_SPRG_SCRATCH0,r10
313 mtspr SPRN_SPRG_SCRATCH1,r11
314 mfspr r10, SPRN_SPRG_THREAD
317 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
320 #ifdef CONFIG_PPC_BOOK3S_604
321 BEGIN_MMU_FTR_SECTION
322 andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */
324 .Lhash_page_isi_cont:
325 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
326 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
328 andi. r11, r11, MSR_PR
331 EXCEPTION_PROLOG_2 INTERRUPT_INST_STORAGE InstructionAccess
332 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
335 prepare_transfer_to_handler
339 /* External interrupt */
340 EXCEPTION(INTERRUPT_EXTERNAL, HardwareInterrupt, do_IRQ)
342 /* Alignment exception */
343 START_EXCEPTION(INTERRUPT_ALIGNMENT, Alignment)
344 EXCEPTION_PROLOG INTERRUPT_ALIGNMENT Alignment handle_dar_dsisr=1
345 prepare_transfer_to_handler
346 bl alignment_exception
350 /* Program check exception */
351 START_EXCEPTION(INTERRUPT_PROGRAM, ProgramCheck)
352 EXCEPTION_PROLOG INTERRUPT_PROGRAM ProgramCheck
353 prepare_transfer_to_handler
354 bl program_check_exception
358 /* Floating-point unavailable */
359 START_EXCEPTION(0x800, FPUnavailable)
360 #ifdef CONFIG_PPC_FPU
363 * Certain Freescale cores don't have a FPU and treat fp instructions
364 * as a FP Unavailable exception. Redirect to illegal/emulation handling.
367 END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
368 EXCEPTION_PROLOG INTERRUPT_FP_UNAVAIL FPUnavailable
370 bl load_up_fpu /* if from user, just load it up */
371 b fast_exception_return
372 1: prepare_transfer_to_handler
373 bl kernel_fp_unavailable_exception
380 EXCEPTION(INTERRUPT_DECREMENTER, Decrementer, timer_interrupt)
382 EXCEPTION(0xa00, Trap_0a, unknown_exception)
383 EXCEPTION(0xb00, Trap_0b, unknown_exception)
386 START_EXCEPTION(INTERRUPT_SYSCALL, SystemCall)
387 SYSCALL_ENTRY INTERRUPT_SYSCALL
389 EXCEPTION(INTERRUPT_TRACE, SingleStep, single_step_exception)
390 EXCEPTION(0xe00, Trap_0e, unknown_exception)
393 * The Altivec unavailable trap is at 0x0f20. Foo.
394 * We effectively remap it to 0x3000.
395 * We include an altivec unavailable exception vector even if
396 * not configured for Altivec, so that you can't panic a
397 * non-altivec kernel running on a machine with altivec just
398 * by executing an altivec instruction.
400 START_EXCEPTION(INTERRUPT_PERFMON, PerformanceMonitorTrap)
403 START_EXCEPTION(INTERRUPT_ALTIVEC_UNAVAIL, AltiVecUnavailableTrap)
408 * Handle TLB miss for instruction on 603/603e.
409 * Note: we get an alternate set of r0 - r3 to use automatically.
411 . = INTERRUPT_INST_TLB_MISS_603
415 * r1: linux style pte ( later becomes ppc hardware pte )
416 * r2: ptr to linux-style pte
419 /* Get PTE (linux-style) and check access */
421 #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
422 lis r1, TASK_SIZE@h /* check if kernel address */
426 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
427 rlwinm r2, r2, 28, 0xfffff000
428 #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
430 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
431 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
432 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
434 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
435 lwz r2,0(r2) /* get pmd entry */
436 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
437 beq- InstructionAddressInvalid /* return if no mapping */
438 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
439 lwz r0,0(r2) /* get linux-style pte */
440 andc. r1,r1,r0 /* check access & ~permission */
441 bne- InstructionAddressInvalid /* return if access not permitted */
442 /* Convert linux-style PTE to low word of PPC-style PTE */
443 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
444 ori r1, r1, 0xe06 /* clear out reserved bits */
445 andc r1, r0, r1 /* PP = user? 1 : 0 */
447 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
448 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
451 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
454 InstructionAddressInvalid:
456 rlwinm r1,r3,9,6,6 /* Get load/store bit */
459 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
460 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
463 mfspr r1,SPRN_IMISS /* Get failing address */
464 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
465 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
467 mtspr SPRN_DAR,r1 /* Set fault address */
468 mfmsr r0 /* Restore "normal" registers */
469 xoris r0,r0,MSR_TGPR>>16
470 mtcrf 0x80,r3 /* Restore CR0 */
475 * Handle TLB miss for DATA Load operation on 603/603e
477 . = INTERRUPT_DATA_LOAD_TLB_MISS_603
481 * r1: linux style pte ( later becomes ppc hardware pte )
482 * r2: ptr to linux-style pte
485 /* Get PTE (linux-style) and check access */
487 lis r1, TASK_SIZE@h /* check if kernel address */
490 li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
491 rlwinm r2, r2, 28, 0xfffff000
493 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
494 li r1, _PAGE_PRESENT | _PAGE_ACCESSED
495 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
496 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
497 lwz r2,0(r2) /* get pmd entry */
498 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
499 beq- DataAddressInvalid /* return if no mapping */
500 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
501 lwz r0,0(r2) /* get linux-style pte */
502 andc. r1,r1,r0 /* check access & ~permission */
503 bne- DataAddressInvalid /* return if access not permitted */
504 /* Convert linux-style PTE to low word of PPC-style PTE */
505 rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */
506 rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
507 rlwimi r1,r0,32-3,24,24 /* _PAGE_RW -> _PAGE_DIRTY */
508 rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
509 xori r1,r1,_PAGE_DIRTY /* clear dirty when not rw */
510 ori r1,r1,0xe04 /* clear out reserved bits */
511 andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
513 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
514 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
516 BEGIN_MMU_FTR_SECTION
518 mfspr r1,SPRN_SPRG_603_LRU
519 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
523 mtspr SPRN_SPRG_603_LRU,r1
525 rlwimi r2,r0,31-14,14,14
531 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
535 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
538 rlwinm r1,r3,9,6,6 /* Get load/store bit */
541 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
543 mfspr r1,SPRN_DMISS /* Get failing address */
544 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
545 beq 20f /* Jump if big endian */
547 20: mtspr SPRN_DAR,r1 /* Set fault address */
548 mfmsr r0 /* Restore "normal" registers */
549 xoris r0,r0,MSR_TGPR>>16
550 mtcrf 0x80,r3 /* Restore CR0 */
555 * Handle TLB miss for DATA Store on 603/603e
557 . = INTERRUPT_DATA_STORE_TLB_MISS_603
561 * r1: linux style pte ( later becomes ppc hardware pte )
562 * r2: ptr to linux-style pte
565 /* Get PTE (linux-style) and check access */
567 lis r1, TASK_SIZE@h /* check if kernel address */
570 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
571 rlwinm r2, r2, 28, 0xfffff000
573 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
574 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
575 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
576 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
577 lwz r2,0(r2) /* get pmd entry */
578 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
579 beq- DataAddressInvalid /* return if no mapping */
580 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
581 lwz r0,0(r2) /* get linux-style pte */
582 andc. r1,r1,r0 /* check access & ~permission */
583 bne- DataAddressInvalid /* return if access not permitted */
584 /* Convert linux-style PTE to low word of PPC-style PTE */
585 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
586 li r1,0xe06 /* clear out reserved bits & PP msb */
587 andc r1,r0,r1 /* PP = user? 1: 0 */
589 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
590 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
592 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
594 BEGIN_MMU_FTR_SECTION
596 mfspr r1,SPRN_SPRG_603_LRU
597 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
601 mtspr SPRN_SPRG_603_LRU,r1
603 rlwimi r2,r0,31-14,14,14
609 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
613 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
615 #ifndef CONFIG_ALTIVEC
616 #define altivec_assist_exception unknown_exception
619 #ifndef CONFIG_TAU_INT
620 #define TAUException unknown_async_exception
623 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception)
624 EXCEPTION(0x1400, SMI, SMIException)
625 EXCEPTION(0x1500, Trap_15, unknown_exception)
626 EXCEPTION(0x1600, Trap_16, altivec_assist_exception)
627 EXCEPTION(0x1700, Trap_17, TAUException)
628 EXCEPTION(0x1800, Trap_18, unknown_exception)
629 EXCEPTION(0x1900, Trap_19, unknown_exception)
630 EXCEPTION(0x1a00, Trap_1a, unknown_exception)
631 EXCEPTION(0x1b00, Trap_1b, unknown_exception)
632 EXCEPTION(0x1c00, Trap_1c, unknown_exception)
633 EXCEPTION(0x1d00, Trap_1d, unknown_exception)
634 EXCEPTION(0x1e00, Trap_1e, unknown_exception)
635 EXCEPTION(0x1f00, Trap_1f, unknown_exception)
636 EXCEPTION(0x2000, RunMode, RunModeException)
637 EXCEPTION(0x2100, Trap_21, unknown_exception)
638 EXCEPTION(0x2200, Trap_22, unknown_exception)
639 EXCEPTION(0x2300, Trap_23, unknown_exception)
640 EXCEPTION(0x2400, Trap_24, unknown_exception)
641 EXCEPTION(0x2500, Trap_25, unknown_exception)
642 EXCEPTION(0x2600, Trap_26, unknown_exception)
643 EXCEPTION(0x2700, Trap_27, unknown_exception)
644 EXCEPTION(0x2800, Trap_28, unknown_exception)
645 EXCEPTION(0x2900, Trap_29, unknown_exception)
646 EXCEPTION(0x2a00, Trap_2a, unknown_exception)
647 EXCEPTION(0x2b00, Trap_2b, unknown_exception)
648 EXCEPTION(0x2c00, Trap_2c, unknown_exception)
649 EXCEPTION(0x2d00, Trap_2d, unknown_exception)
650 EXCEPTION(0x2e00, Trap_2e, unknown_exception)
651 EXCEPTION(0x2f00, Trap_2f, unknown_exception)
656 #ifdef CONFIG_PPC_BOOK3S_604
657 .macro save_regs_thread thread
658 stw r0, THR0(\thread)
659 stw r3, THR3(\thread)
660 stw r4, THR4(\thread)
661 stw r5, THR5(\thread)
662 stw r6, THR6(\thread)
663 stw r8, THR8(\thread)
664 stw r9, THR9(\thread)
666 stw r0, THLR(\thread)
668 stw r0, THCTR(\thread)
671 .macro restore_regs_thread thread
672 lwz r0, THLR(\thread)
674 lwz r0, THCTR(\thread)
676 lwz r0, THR0(\thread)
677 lwz r3, THR3(\thread)
678 lwz r4, THR4(\thread)
679 lwz r5, THR5(\thread)
680 lwz r6, THR6(\thread)
681 lwz r8, THR8(\thread)
682 lwz r9, THR9(\thread)
691 rlwinm r3, r3, 32 - 15, _PAGE_RW /* DSISR_STORE -> _PAGE_RW */
693 mfspr r10, SPRN_SPRG_THREAD
694 restore_regs_thread r10
695 b .Lhash_page_dsi_cont
699 mfspr r10, SPRN_SPRG_THREAD
705 mfspr r10, SPRN_SPRG_THREAD
706 restore_regs_thread r10
708 b .Lhash_page_isi_cont
710 .globl fast_hash_page_return
711 fast_hash_page_return:
712 andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */
713 mfspr r10, SPRN_SPRG_THREAD
714 restore_regs_thread r10
720 mfspr r10, SPRN_SPRG_SCRATCH2
725 mfspr r11, SPRN_SPRG_SCRATCH1
726 mfspr r10, SPRN_SPRG_SCRATCH0
728 #endif /* CONFIG_PPC_BOOK3S_604 */
730 #ifdef CONFIG_VMAP_STACK
731 vmap_stack_overflow_exception
736 EXCEPTION_PROLOG 0xf20 AltiVecUnavailable
737 #ifdef CONFIG_ALTIVEC
739 bl load_up_altivec /* if from user, just load it up */
740 b fast_exception_return
741 #endif /* CONFIG_ALTIVEC */
742 1: prepare_transfer_to_handler
743 bl altivec_unavailable_exception
748 EXCEPTION_PROLOG 0xf00 PerformanceMonitor
749 prepare_transfer_to_handler
750 bl performance_monitor_exception
756 * This code is jumped to from the startup code to copy
757 * the kernel image to physical address PHYSICAL_START.
760 lis r3,PHYSICAL_START@h /* Destination base address */
761 li r6,0 /* Destination offset */
762 li r5,0x4000 /* # bytes of memory to copy */
763 bl copy_and_flush /* copy the first 0x4000 bytes */
764 addi r0,r3,4f@l /* jump to the address of 4f */
765 mtctr r0 /* in copy and do the rest. */
766 bctr /* jump to the copy */
767 4: lis r5,_end-KERNELBASE@h
768 ori r5,r5,_end-KERNELBASE@l
769 bl copy_and_flush /* copy the rest */
773 * Copy routine used to copy the kernel to start at physical address 0
774 * and flush and invalidate the caches as needed.
775 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
776 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
778 _GLOBAL(copy_and_flush)
781 4: li r0,L1_CACHE_BYTES/4
783 3: addi r6,r6,4 /* copy a cache line */
787 dcbst r6,r3 /* write it to memory */
789 icbi r6,r3 /* flush the icache line */
792 sync /* additional sync needed on g4 */
799 .globl __secondary_start_mpc86xx
800 __secondary_start_mpc86xx:
802 stw r3, __secondary_hold_acknowledge@l(0)
803 mr r24, r3 /* cpu # */
806 .globl __secondary_start_pmac_0
807 __secondary_start_pmac_0:
808 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
817 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
818 set to map the 0xf0000000 - 0xffffffff region */
820 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
824 .globl __secondary_start
826 /* Copy some CPU settings from CPU 0 */
827 bl __restore_cpu_setup
831 bl call_setup_cpu /* Call setup_cpu for this CPU */
835 /* get current's stack and current */
836 lis r2,secondary_current@ha
838 lwz r2,secondary_current@l(r2)
840 lwz r1,TASK_STACK(r1)
843 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
848 /* load up the MMU */
849 bl load_segment_registers
852 /* ptr to phys current thread */
854 addi r4,r4,THREAD /* phys address of our thread_struct */
855 mtspr SPRN_SPRG_THREAD,r4
856 BEGIN_MMU_FTR_SECTION
857 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
858 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
859 rlwinm r4, r4, 4, 0xffff01ff
861 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
863 /* enable MMU and jump to start_secondary */
865 lis r3,start_secondary@h
866 ori r3,r3,start_secondary@l
870 #endif /* CONFIG_SMP */
872 #ifdef CONFIG_KVM_BOOK3S_HANDLER
873 #include "../kvm/book3s_rmhandlers.S"
877 * Load stuff into the MMU. Intended to be called with
881 sync /* Force all PTE updates to finish */
883 tlbia /* Clear all TLB entries */
884 sync /* wait for tlbia/tlbie to finish */
885 TLBSYNC /* ... on all CPUs */
886 /* Load the SDR1 register (hash table base & size) */
887 lis r6, early_hash - PAGE_OFFSET@h
888 ori r6, r6, 3 /* 256kB table */
893 sync /* Force all PTE updates to finish */
895 tlbia /* Clear all TLB entries */
896 sync /* wait for tlbia/tlbie to finish */
897 TLBSYNC /* ... on all CPUs */
898 BEGIN_MMU_FTR_SECTION
899 /* Load the SDR1 register (hash table base & size) */
904 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
906 /* Load the BAT registers with the values set up by MMU_init. */
914 BEGIN_MMU_FTR_SECTION
919 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
922 _GLOBAL(load_segment_registers)
923 li r0, NUM_USER_SEGMENTS /* load up user segment register values */
924 mtctr r0 /* for context 0 */
925 #ifdef CONFIG_PPC_KUEP
926 lis r3, SR_NX@h /* Kp = 0, Ks = 0, VSID = 0 */
928 li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
932 addi r3, r3, 0x111 /* increment VSID */
933 addis r4, r4, 0x1000 /* address of next segment */
935 li r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
936 mtctr r0 /* for context 0 */
937 rlwinm r3, r3, 0, ~SR_NX /* Nx = 0 */
938 rlwinm r3, r3, 0, ~SR_KS /* Ks = 0 */
939 oris r3, r3, SR_KP@h /* Kp = 1 */
941 addi r3, r3, 0x111 /* increment VSID */
942 addis r4, r4, 0x1000 /* address of next segment */
947 * This is where the main kernel code starts.
952 ori r2,r2,init_task@l
953 /* Set up for using our exception vectors */
954 /* ptr to phys current thread */
956 addi r4,r4,THREAD /* init task's THREAD */
957 mtspr SPRN_SPRG_THREAD,r4
958 BEGIN_MMU_FTR_SECTION
959 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
960 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
961 rlwinm r4, r4, 4, 0xffff01ff
963 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
966 lis r1,init_thread_union@ha
967 addi r1,r1,init_thread_union@l
969 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
971 * Do early platform-specific initialization,
972 * and set up the MMU.
985 * Go back to running unmapped so we can load up new values
986 * for SDR1 (hash table pointer) and the segment registers
987 * and change to using our exception vectors.
992 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
998 /* Load up the kernel context */
1001 #ifdef CONFIG_BDI_SWITCH
1002 /* Add helper information for the Abatron bdiGDB debugger.
1003 * We do this here because we know the mmu is disabled, and
1004 * will be enabled for real in just a few instructions.
1006 lis r5, abatron_pteptrs@h
1007 ori r5, r5, abatron_pteptrs@l
1008 stw r5, 0xf0(0) /* This much match your Abatron config */
1009 lis r6, swapper_pg_dir@h
1010 ori r6, r6, swapper_pg_dir@l
1013 #endif /* CONFIG_BDI_SWITCH */
1015 /* Now turn on the MMU for real! */
1017 lis r3,start_kernel@h
1018 ori r3,r3,start_kernel@l
1024 * An undocumented "feature" of 604e requires that the v bit
1025 * be cleared before changing BAT values.
1027 * Also, newer IBM firmware does not clear bat3 and 4 so
1028 * this makes sure it's done.
1034 mtspr SPRN_DBAT0U,r10
1035 mtspr SPRN_DBAT0L,r10
1036 mtspr SPRN_DBAT1U,r10
1037 mtspr SPRN_DBAT1L,r10
1038 mtspr SPRN_DBAT2U,r10
1039 mtspr SPRN_DBAT2L,r10
1040 mtspr SPRN_DBAT3U,r10
1041 mtspr SPRN_DBAT3L,r10
1042 mtspr SPRN_IBAT0U,r10
1043 mtspr SPRN_IBAT0L,r10
1044 mtspr SPRN_IBAT1U,r10
1045 mtspr SPRN_IBAT1L,r10
1046 mtspr SPRN_IBAT2U,r10
1047 mtspr SPRN_IBAT2L,r10
1048 mtspr SPRN_IBAT3U,r10
1049 mtspr SPRN_IBAT3L,r10
1050 BEGIN_MMU_FTR_SECTION
1051 /* Here's a tweak: at this point, CPU setup have
1052 * not been called yet, so HIGH_BAT_EN may not be
1053 * set in HID0 for the 745x processors. However, it
1054 * seems that doesn't affect our ability to actually
1055 * write to these SPRs.
1057 mtspr SPRN_DBAT4U,r10
1058 mtspr SPRN_DBAT4L,r10
1059 mtspr SPRN_DBAT5U,r10
1060 mtspr SPRN_DBAT5L,r10
1061 mtspr SPRN_DBAT6U,r10
1062 mtspr SPRN_DBAT6L,r10
1063 mtspr SPRN_DBAT7U,r10
1064 mtspr SPRN_DBAT7L,r10
1065 mtspr SPRN_IBAT4U,r10
1066 mtspr SPRN_IBAT4L,r10
1067 mtspr SPRN_IBAT5U,r10
1068 mtspr SPRN_IBAT5L,r10
1069 mtspr SPRN_IBAT6U,r10
1070 mtspr SPRN_IBAT6L,r10
1071 mtspr SPRN_IBAT7U,r10
1072 mtspr SPRN_IBAT7L,r10
1073 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1076 _GLOBAL(update_bats)
1082 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1083 rlwinm r0, r6, 0, ~MSR_RI
1084 rlwinm r0, r0, 0, ~MSR_EE
1095 LOAD_BAT(0, r3, r4, r5)
1096 LOAD_BAT(1, r3, r4, r5)
1097 LOAD_BAT(2, r3, r4, r5)
1098 LOAD_BAT(3, r3, r4, r5)
1099 BEGIN_MMU_FTR_SECTION
1100 LOAD_BAT(4, r3, r4, r5)
1101 LOAD_BAT(5, r3, r4, r5)
1102 LOAD_BAT(6, r3, r4, r5)
1103 LOAD_BAT(7, r3, r4, r5)
1104 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1105 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1113 1: addic. r10, r10, -0x1000
1120 addi r4, r3, __after_mmu_off - _start
1122 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1132 /* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
1134 lis r11,PAGE_OFFSET@h
1137 ori r8,r8,0x12 /* R/W access, M=1 */
1139 ori r8,r8,2 /* R/W access */
1140 #endif /* CONFIG_SMP */
1141 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1143 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx have valid */
1144 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1145 mtspr SPRN_IBAT0L,r8
1146 mtspr SPRN_IBAT0U,r11
1150 #ifdef CONFIG_BOOTX_TEXT
1153 * setup the display bat prepared for us in prom.c
1158 addis r8,r3,disp_BAT@ha
1159 addi r8,r8,disp_BAT@l
1164 mtspr SPRN_DBAT3L,r8
1165 mtspr SPRN_DBAT3U,r11
1167 #endif /* CONFIG_BOOTX_TEXT */
1169 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1173 mtspr SPRN_DBAT1L, r8
1176 ori r11, r11, (BL_1M << 2) | 2
1177 mtspr SPRN_DBAT1U, r11
1182 #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1184 /* prepare a BAT for early io */
1185 #if defined(CONFIG_GAMECUBE)
1187 #elif defined(CONFIG_WII)
1190 #error Invalid platform for USB Gecko based early debugging.
1193 * The virtual address used must match the virtual address
1194 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1196 lis r11, 0xfffe /* top 128K */
1197 ori r8, r8, 0x002a /* uncached, guarded ,rw */
1198 ori r11, r11, 0x2 /* 128K, Vs=1, Vp=0 */
1199 mtspr SPRN_DBAT1L, r8
1200 mtspr SPRN_DBAT1U, r11