1 /* SPDX-License-Identifier: GPL-2.0-or-later */
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
14 * This file contains the low-level support and setup for the
15 * PowerPC platform, including trap and interrupt dispatch.
16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
19 #include <linux/init.h>
20 #include <linux/pgtable.h>
24 #include <asm/cputable.h>
25 #include <asm/cache.h>
26 #include <asm/thread_info.h>
27 #include <asm/ppc_asm.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/ptrace.h>
31 #include <asm/kvm_book3s_asm.h>
32 #include <asm/export.h>
33 #include <asm/feature-fixups.h>
34 #include <asm/interrupt.h>
38 #define LOAD_BAT(n, reg, RA, RB) \
39 /* see the comment for clear_bats() -- Cort */ \
41 mtspr SPRN_IBAT##n##U,RA; \
42 mtspr SPRN_DBAT##n##U,RA; \
43 lwz RA,(n*16)+0(reg); \
44 lwz RB,(n*16)+4(reg); \
45 mtspr SPRN_IBAT##n##U,RA; \
46 mtspr SPRN_IBAT##n##L,RB; \
47 lwz RA,(n*16)+8(reg); \
48 lwz RB,(n*16)+12(reg); \
49 mtspr SPRN_DBAT##n##U,RA; \
50 mtspr SPRN_DBAT##n##L,RB
53 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
54 .stabs "head_book3s_32.S",N_SO,0,0,0f
59 * _start is defined this way because the XCOFF loader in the OpenFirmware
60 * on the powermac expects the entry point to be a procedure descriptor.
64 * These are here for legacy reasons, the kernel used to
65 * need to look like a coff function entry for the pmac
66 * but we're always started by some kind of bootloader now.
69 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
70 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
74 * Enter here with the kernel text, data and bss loaded starting at
75 * 0, running with virtual == physical mapping.
76 * r5 points to the prom entry point (the client interface handler
77 * address). Address translation is turned on, with the prom
78 * managing the hash table. Interrupts are disabled. The stack
79 * pointer (r1) points to just below the end of the half-meg region
80 * from 0x380000 - 0x400000, which is mapped in already.
82 * If we are booted from MacOS via BootX, we enter with the kernel
83 * image loaded somewhere, and the following values in registers:
84 * r3: 'BooX' (0x426f6f58)
85 * r4: virtual address of boot_infos_t
89 * This is jumped to on prep systems right after the kernel is relocated
90 * to its proper place in memory by the boot loader. The expected layout
92 * r3: ptr to residual data
93 * r4: initrd_start or if no initrd then 0
94 * r5: initrd_end - unused if r4 is 0
95 * r6: Start of command line string
96 * r7: End of command line string
98 * This just gets a minimal mmu environment setup so we can call
99 * start_here() to do the real work.
106 * We have to do any OF calls before we map ourselves to KERNELBASE,
107 * because OF may have I/O devices mapped into that area
108 * (particularly on CHRP).
113 #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
114 /* find out where we are now */
116 0: mflr r8 /* r8 = runtime addr here */
117 addis r8,r8,(_stext - 0b)@ha
118 addi r8,r8,(_stext - 0b)@l /* current runtime base addr */
120 #endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
122 /* We never return. We also hit that trap if trying to boot
123 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
127 * Check for BootX signature when supporting PowerMac and branch to
128 * appropriate trampoline if it's present
130 #ifdef CONFIG_PPC_PMAC
137 #endif /* CONFIG_PPC_PMAC */
139 1: mr r31,r3 /* save device tree ptr */
143 * early_init() does the early machine identification and does
144 * the necessary low-level setup and clears the BSS
145 * -- Cort <cort@fsmlabs.com>
149 /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
150 * the physical address we are running at, returned by early_init()
158 bl load_segment_registers
161 #if defined(CONFIG_BOOTX_TEXT)
164 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
167 #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
168 bl setup_usbgecko_bat
172 * Call setup_cpu for CPU 0 and initialize 6xx Idle
176 bl call_setup_cpu /* Call setup_cpu for this CPU */
182 * We need to run with _start at physical address 0.
183 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
184 * the exception vectors at 0 (and therefore this copy
185 * overwrites OF's exception vectors with our own).
186 * The MMU is off at this point.
190 addis r4,r3,KERNELBASE@h /* current address of _start */
191 lis r5,PHYSICAL_START@h
192 cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
195 * we now have the 1st 16M of ram mapped with the bats.
196 * prep needs the mmu to be turned on here, but pmac already has it on.
197 * this shouldn't bother the pmac since it just gets turned on again
198 * as we jump to our code at KERNELBASE. -- Cort
199 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
200 * off, and in other cases, we now turn it off before changing BATs above.
204 ori r0,r0,MSR_DR|MSR_IR|MSR_RI
207 ori r0,r0,start_here@l
209 rfi /* enables MMU */
212 * We need __secondary_hold as a place to hold the other cpus on
213 * an SMP machine, even when we are running a UP kernel.
215 . = 0xc0 /* for prep bootloader */
216 li r3,1 /* MTX only has 1 cpu */
217 .globl __secondary_hold
219 /* tell the master we're here */
220 stw r3,__secondary_hold_acknowledge@l(0)
223 /* wait until we're told to start */
226 /* our cpu # was at addr 0 - go */
227 mr r24,r3 /* cpu # */
231 #endif /* CONFIG_SMP */
233 .globl __secondary_hold_spinloop
234 __secondary_hold_spinloop:
236 .globl __secondary_hold_acknowledge
237 __secondary_hold_acknowledge:
241 /* core99 pmac starts the seconary here by changing the vector, and
242 putting it back to what it was (unknown_async_exception) when done. */
243 EXCEPTION(INTERRUPT_SYSTEM_RESET, Reset, unknown_async_exception)
247 * On CHRP, this is complicated by the fact that we could get a
248 * machine check inside RTAS, and we have no guarantee that certain
249 * critical registers will have the values we expect. The set of
250 * registers that might have bad values includes all the GPRs
251 * and all the BATs. We indicate that we are in RTAS by putting
252 * a non-zero value, the address of the exception frame to use,
253 * in thread.rtas_sp. The machine check handler checks thread.rtas_sp
254 * and uses its value if it is non-zero.
255 * (Other exception handlers assume that r1 is a valid kernel stack
256 * pointer when we take an exception from supervisor mode.)
259 START_EXCEPTION(INTERRUPT_MACHINE_CHECK, MachineCheck)
261 #ifdef CONFIG_PPC_CHRP
262 mtspr SPRN_SPRG_SCRATCH2,r1
263 mfspr r1, SPRN_SPRG_THREAD
267 mfspr r1, SPRN_SPRG_SCRATCH2
268 #endif /* CONFIG_PPC_CHRP */
270 7: EXCEPTION_PROLOG_2 0x200 MachineCheck
271 #ifdef CONFIG_PPC_CHRP
275 1: prepare_transfer_to_handler
276 bl machine_check_exception
279 /* Data access exception. */
280 START_EXCEPTION(INTERRUPT_DATA_STORAGE, DataAccess)
281 #ifdef CONFIG_PPC_BOOK3S_604
282 BEGIN_MMU_FTR_SECTION
283 mtspr SPRN_SPRG_SCRATCH2,r10
284 mfspr r10, SPRN_SPRG_THREAD
286 mfspr r10, SPRN_DSISR
288 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
289 mfspr r10, SPRN_SPRG_THREAD
291 .Lhash_page_dsi_cont:
294 mfspr r10, SPRN_SPRG_SCRATCH2
297 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
299 1: EXCEPTION_PROLOG_0 handle_dar_dsisr=1
301 EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
302 prepare_transfer_to_handler
304 andis. r0, r5, DSISR_DABRMATCH@h
313 /* Instruction access exception. */
314 START_EXCEPTION(INTERRUPT_INST_STORAGE, InstructionAccess)
315 mtspr SPRN_SPRG_SCRATCH0,r10
316 mtspr SPRN_SPRG_SCRATCH1,r11
317 mfspr r10, SPRN_SPRG_THREAD
320 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
323 #ifdef CONFIG_PPC_BOOK3S_604
324 BEGIN_MMU_FTR_SECTION
325 andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */
327 .Lhash_page_isi_cont:
328 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
329 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
331 andi. r11, r11, MSR_PR
334 EXCEPTION_PROLOG_2 INTERRUPT_INST_STORAGE InstructionAccess
335 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
338 prepare_transfer_to_handler
342 /* External interrupt */
343 EXCEPTION(INTERRUPT_EXTERNAL, HardwareInterrupt, do_IRQ)
345 /* Alignment exception */
346 START_EXCEPTION(INTERRUPT_ALIGNMENT, Alignment)
347 EXCEPTION_PROLOG INTERRUPT_ALIGNMENT Alignment handle_dar_dsisr=1
348 prepare_transfer_to_handler
349 bl alignment_exception
353 /* Program check exception */
354 START_EXCEPTION(INTERRUPT_PROGRAM, ProgramCheck)
355 EXCEPTION_PROLOG INTERRUPT_PROGRAM ProgramCheck
356 prepare_transfer_to_handler
357 bl program_check_exception
361 /* Floating-point unavailable */
362 START_EXCEPTION(0x800, FPUnavailable)
363 #ifdef CONFIG_PPC_FPU
366 * Certain Freescale cores don't have a FPU and treat fp instructions
367 * as a FP Unavailable exception. Redirect to illegal/emulation handling.
370 END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
371 EXCEPTION_PROLOG INTERRUPT_FP_UNAVAIL FPUnavailable
373 bl load_up_fpu /* if from user, just load it up */
374 b fast_exception_return
375 1: prepare_transfer_to_handler
376 bl kernel_fp_unavailable_exception
383 EXCEPTION(INTERRUPT_DECREMENTER, Decrementer, timer_interrupt)
385 EXCEPTION(0xa00, Trap_0a, unknown_exception)
386 EXCEPTION(0xb00, Trap_0b, unknown_exception)
389 START_EXCEPTION(INTERRUPT_SYSCALL, SystemCall)
390 SYSCALL_ENTRY INTERRUPT_SYSCALL
392 EXCEPTION(INTERRUPT_TRACE, SingleStep, single_step_exception)
393 EXCEPTION(0xe00, Trap_0e, unknown_exception)
396 * The Altivec unavailable trap is at 0x0f20. Foo.
397 * We effectively remap it to 0x3000.
398 * We include an altivec unavailable exception vector even if
399 * not configured for Altivec, so that you can't panic a
400 * non-altivec kernel running on a machine with altivec just
401 * by executing an altivec instruction.
403 START_EXCEPTION(INTERRUPT_PERFMON, PerformanceMonitorTrap)
406 START_EXCEPTION(INTERRUPT_ALTIVEC_UNAVAIL, AltiVecUnavailableTrap)
411 * Handle TLB miss for instruction on 603/603e.
412 * Note: we get an alternate set of r0 - r3 to use automatically.
414 . = INTERRUPT_INST_TLB_MISS_603
418 * r1: linux style pte ( later becomes ppc hardware pte )
419 * r2: ptr to linux-style pte
422 /* Get PTE (linux-style) and check access */
424 #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
425 lis r1, TASK_SIZE@h /* check if kernel address */
429 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
430 rlwinm r2, r2, 28, 0xfffff000
431 #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
433 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
434 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
435 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
437 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
438 lwz r2,0(r2) /* get pmd entry */
439 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
440 beq- InstructionAddressInvalid /* return if no mapping */
441 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
442 lwz r0,0(r2) /* get linux-style pte */
443 andc. r1,r1,r0 /* check access & ~permission */
444 bne- InstructionAddressInvalid /* return if access not permitted */
445 /* Convert linux-style PTE to low word of PPC-style PTE */
446 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
447 ori r1, r1, 0xe06 /* clear out reserved bits */
448 andc r1, r0, r1 /* PP = user? 1 : 0 */
450 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
451 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
454 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
457 InstructionAddressInvalid:
459 rlwinm r1,r3,9,6,6 /* Get load/store bit */
462 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
463 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
466 mfspr r1,SPRN_IMISS /* Get failing address */
467 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
468 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
470 mtspr SPRN_DAR,r1 /* Set fault address */
471 mfmsr r0 /* Restore "normal" registers */
472 xoris r0,r0,MSR_TGPR>>16
473 mtcrf 0x80,r3 /* Restore CR0 */
478 * Handle TLB miss for DATA Load operation on 603/603e
480 . = INTERRUPT_DATA_LOAD_TLB_MISS_603
484 * r1: linux style pte ( later becomes ppc hardware pte )
485 * r2: ptr to linux-style pte
488 /* Get PTE (linux-style) and check access */
490 lis r1, TASK_SIZE@h /* check if kernel address */
493 li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
494 rlwinm r2, r2, 28, 0xfffff000
496 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
497 li r1, _PAGE_PRESENT | _PAGE_ACCESSED
498 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
499 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
500 lwz r2,0(r2) /* get pmd entry */
501 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
502 beq- DataAddressInvalid /* return if no mapping */
503 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
504 lwz r0,0(r2) /* get linux-style pte */
505 andc. r1,r1,r0 /* check access & ~permission */
506 bne- DataAddressInvalid /* return if access not permitted */
508 * NOTE! We are assuming this is not an SMP system, otherwise
509 * we would need to update the pte atomically with lwarx/stwcx.
511 /* Convert linux-style PTE to low word of PPC-style PTE */
512 rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */
513 rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
514 rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
515 ori r1,r1,0xe04 /* clear out reserved bits */
516 andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
518 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
519 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
521 BEGIN_MMU_FTR_SECTION
523 mfspr r1,SPRN_SPRG_603_LRU
524 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
528 mtspr SPRN_SPRG_603_LRU,r1
530 rlwimi r2,r0,31-14,14,14
536 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
540 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
543 rlwinm r1,r3,9,6,6 /* Get load/store bit */
546 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
548 mfspr r1,SPRN_DMISS /* Get failing address */
549 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
550 beq 20f /* Jump if big endian */
552 20: mtspr SPRN_DAR,r1 /* Set fault address */
553 mfmsr r0 /* Restore "normal" registers */
554 xoris r0,r0,MSR_TGPR>>16
555 mtcrf 0x80,r3 /* Restore CR0 */
560 * Handle TLB miss for DATA Store on 603/603e
562 . = INTERRUPT_DATA_STORE_TLB_MISS_603
566 * r1: linux style pte ( later becomes ppc hardware pte )
567 * r2: ptr to linux-style pte
570 /* Get PTE (linux-style) and check access */
572 lis r1, TASK_SIZE@h /* check if kernel address */
575 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
576 rlwinm r2, r2, 28, 0xfffff000
578 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
579 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
580 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
581 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
582 lwz r2,0(r2) /* get pmd entry */
583 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
584 beq- DataAddressInvalid /* return if no mapping */
585 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
586 lwz r0,0(r2) /* get linux-style pte */
587 andc. r1,r1,r0 /* check access & ~permission */
588 bne- DataAddressInvalid /* return if access not permitted */
590 * NOTE! We are assuming this is not an SMP system, otherwise
591 * we would need to update the pte atomically with lwarx/stwcx.
593 /* Convert linux-style PTE to low word of PPC-style PTE */
594 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
595 li r1,0xe06 /* clear out reserved bits & PP msb */
596 andc r1,r0,r1 /* PP = user? 1: 0 */
598 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
599 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
601 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
603 BEGIN_MMU_FTR_SECTION
605 mfspr r1,SPRN_SPRG_603_LRU
606 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
610 mtspr SPRN_SPRG_603_LRU,r1
612 rlwimi r2,r0,31-14,14,14
618 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
622 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
624 #ifndef CONFIG_ALTIVEC
625 #define altivec_assist_exception unknown_exception
628 #ifndef CONFIG_TAU_INT
629 #define TAUException unknown_async_exception
632 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception)
633 EXCEPTION(0x1400, SMI, SMIException)
634 EXCEPTION(0x1500, Trap_15, unknown_exception)
635 EXCEPTION(0x1600, Trap_16, altivec_assist_exception)
636 EXCEPTION(0x1700, Trap_17, TAUException)
637 EXCEPTION(0x1800, Trap_18, unknown_exception)
638 EXCEPTION(0x1900, Trap_19, unknown_exception)
639 EXCEPTION(0x1a00, Trap_1a, unknown_exception)
640 EXCEPTION(0x1b00, Trap_1b, unknown_exception)
641 EXCEPTION(0x1c00, Trap_1c, unknown_exception)
642 EXCEPTION(0x1d00, Trap_1d, unknown_exception)
643 EXCEPTION(0x1e00, Trap_1e, unknown_exception)
644 EXCEPTION(0x1f00, Trap_1f, unknown_exception)
645 EXCEPTION(0x2000, RunMode, RunModeException)
646 EXCEPTION(0x2100, Trap_21, unknown_exception)
647 EXCEPTION(0x2200, Trap_22, unknown_exception)
648 EXCEPTION(0x2300, Trap_23, unknown_exception)
649 EXCEPTION(0x2400, Trap_24, unknown_exception)
650 EXCEPTION(0x2500, Trap_25, unknown_exception)
651 EXCEPTION(0x2600, Trap_26, unknown_exception)
652 EXCEPTION(0x2700, Trap_27, unknown_exception)
653 EXCEPTION(0x2800, Trap_28, unknown_exception)
654 EXCEPTION(0x2900, Trap_29, unknown_exception)
655 EXCEPTION(0x2a00, Trap_2a, unknown_exception)
656 EXCEPTION(0x2b00, Trap_2b, unknown_exception)
657 EXCEPTION(0x2c00, Trap_2c, unknown_exception)
658 EXCEPTION(0x2d00, Trap_2d, unknown_exception)
659 EXCEPTION(0x2e00, Trap_2e, unknown_exception)
660 EXCEPTION(0x2f00, Trap_2f, unknown_exception)
665 #ifdef CONFIG_PPC_BOOK3S_604
666 .macro save_regs_thread thread
667 stw r0, THR0(\thread)
668 stw r3, THR3(\thread)
669 stw r4, THR4(\thread)
670 stw r5, THR5(\thread)
671 stw r6, THR6(\thread)
672 stw r8, THR8(\thread)
673 stw r9, THR9(\thread)
675 stw r0, THLR(\thread)
677 stw r0, THCTR(\thread)
680 .macro restore_regs_thread thread
681 lwz r0, THLR(\thread)
683 lwz r0, THCTR(\thread)
685 lwz r0, THR0(\thread)
686 lwz r3, THR3(\thread)
687 lwz r4, THR4(\thread)
688 lwz r5, THR5(\thread)
689 lwz r6, THR6(\thread)
690 lwz r8, THR8(\thread)
691 lwz r9, THR9(\thread)
700 rlwinm r3, r3, 32 - 15, _PAGE_RW /* DSISR_STORE -> _PAGE_RW */
702 mfspr r10, SPRN_SPRG_THREAD
703 restore_regs_thread r10
704 b .Lhash_page_dsi_cont
708 mfspr r10, SPRN_SPRG_THREAD
714 mfspr r10, SPRN_SPRG_THREAD
715 restore_regs_thread r10
717 b .Lhash_page_isi_cont
719 .globl fast_hash_page_return
720 fast_hash_page_return:
721 andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */
722 mfspr r10, SPRN_SPRG_THREAD
723 restore_regs_thread r10
729 mfspr r10, SPRN_SPRG_SCRATCH2
734 mfspr r11, SPRN_SPRG_SCRATCH1
735 mfspr r10, SPRN_SPRG_SCRATCH0
737 #endif /* CONFIG_PPC_BOOK3S_604 */
739 #ifdef CONFIG_VMAP_STACK
740 vmap_stack_overflow_exception
745 EXCEPTION_PROLOG 0xf20 AltiVecUnavailable
746 #ifdef CONFIG_ALTIVEC
748 bl load_up_altivec /* if from user, just load it up */
749 b fast_exception_return
750 #endif /* CONFIG_ALTIVEC */
751 1: prepare_transfer_to_handler
752 bl altivec_unavailable_exception
757 EXCEPTION_PROLOG 0xf00 PerformanceMonitor
758 prepare_transfer_to_handler
759 bl performance_monitor_exception
765 * This code is jumped to from the startup code to copy
766 * the kernel image to physical address PHYSICAL_START.
769 lis r3,PHYSICAL_START@h /* Destination base address */
770 li r6,0 /* Destination offset */
771 li r5,0x4000 /* # bytes of memory to copy */
772 bl copy_and_flush /* copy the first 0x4000 bytes */
773 addi r0,r3,4f@l /* jump to the address of 4f */
774 mtctr r0 /* in copy and do the rest. */
775 bctr /* jump to the copy */
776 4: lis r5,_end-KERNELBASE@h
777 ori r5,r5,_end-KERNELBASE@l
778 bl copy_and_flush /* copy the rest */
782 * Copy routine used to copy the kernel to start at physical address 0
783 * and flush and invalidate the caches as needed.
784 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
785 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
787 _ENTRY(copy_and_flush)
790 4: li r0,L1_CACHE_BYTES/4
792 3: addi r6,r6,4 /* copy a cache line */
796 dcbst r6,r3 /* write it to memory */
798 icbi r6,r3 /* flush the icache line */
801 sync /* additional sync needed on g4 */
808 .globl __secondary_start_mpc86xx
809 __secondary_start_mpc86xx:
811 stw r3, __secondary_hold_acknowledge@l(0)
812 mr r24, r3 /* cpu # */
815 .globl __secondary_start_pmac_0
816 __secondary_start_pmac_0:
817 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
826 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
827 set to map the 0xf0000000 - 0xffffffff region */
829 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
833 .globl __secondary_start
835 /* Copy some CPU settings from CPU 0 */
836 bl __restore_cpu_setup
840 bl call_setup_cpu /* Call setup_cpu for this CPU */
844 /* get current's stack and current */
845 lis r2,secondary_current@ha
847 lwz r2,secondary_current@l(r2)
849 lwz r1,TASK_STACK(r1)
852 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
857 /* load up the MMU */
858 bl load_segment_registers
861 /* ptr to phys current thread */
863 addi r4,r4,THREAD /* phys address of our thread_struct */
864 mtspr SPRN_SPRG_THREAD,r4
865 BEGIN_MMU_FTR_SECTION
866 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
867 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
868 rlwinm r4, r4, 4, 0xffff01ff
870 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
872 /* enable MMU and jump to start_secondary */
874 lis r3,start_secondary@h
875 ori r3,r3,start_secondary@l
879 #endif /* CONFIG_SMP */
881 #ifdef CONFIG_KVM_BOOK3S_HANDLER
882 #include "../kvm/book3s_rmhandlers.S"
886 * Load stuff into the MMU. Intended to be called with
890 sync /* Force all PTE updates to finish */
892 tlbia /* Clear all TLB entries */
893 sync /* wait for tlbia/tlbie to finish */
894 TLBSYNC /* ... on all CPUs */
895 /* Load the SDR1 register (hash table base & size) */
896 lis r6, early_hash - PAGE_OFFSET@h
897 ori r6, r6, 3 /* 256kB table */
902 sync /* Force all PTE updates to finish */
904 tlbia /* Clear all TLB entries */
905 sync /* wait for tlbia/tlbie to finish */
906 TLBSYNC /* ... on all CPUs */
907 BEGIN_MMU_FTR_SECTION
908 /* Load the SDR1 register (hash table base & size) */
913 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
915 /* Load the BAT registers with the values set up by MMU_init. */
923 BEGIN_MMU_FTR_SECTION
928 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
931 _GLOBAL(load_segment_registers)
932 li r0, NUM_USER_SEGMENTS /* load up user segment register values */
933 mtctr r0 /* for context 0 */
934 #ifdef CONFIG_PPC_KUEP
935 lis r3, SR_NX@h /* Kp = 0, Ks = 0, VSID = 0 */
937 li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
941 addi r3, r3, 0x111 /* increment VSID */
942 addis r4, r4, 0x1000 /* address of next segment */
944 li r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
945 mtctr r0 /* for context 0 */
946 rlwinm r3, r3, 0, ~SR_NX /* Nx = 0 */
947 rlwinm r3, r3, 0, ~SR_KS /* Ks = 0 */
948 oris r3, r3, SR_KP@h /* Kp = 1 */
950 addi r3, r3, 0x111 /* increment VSID */
951 addis r4, r4, 0x1000 /* address of next segment */
956 * This is where the main kernel code starts.
961 ori r2,r2,init_task@l
962 /* Set up for using our exception vectors */
963 /* ptr to phys current thread */
965 addi r4,r4,THREAD /* init task's THREAD */
966 mtspr SPRN_SPRG_THREAD,r4
967 BEGIN_MMU_FTR_SECTION
968 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
969 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
970 rlwinm r4, r4, 4, 0xffff01ff
972 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
975 lis r1,init_thread_union@ha
976 addi r1,r1,init_thread_union@l
978 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
980 * Do early platform-specific initialization,
981 * and set up the MMU.
994 * Go back to running unmapped so we can load up new values
995 * for SDR1 (hash table pointer) and the segment registers
996 * and change to using our exception vectors.
1001 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1007 /* Load up the kernel context */
1010 #ifdef CONFIG_BDI_SWITCH
1011 /* Add helper information for the Abatron bdiGDB debugger.
1012 * We do this here because we know the mmu is disabled, and
1013 * will be enabled for real in just a few instructions.
1015 lis r5, abatron_pteptrs@h
1016 ori r5, r5, abatron_pteptrs@l
1017 stw r5, 0xf0(0) /* This much match your Abatron config */
1018 lis r6, swapper_pg_dir@h
1019 ori r6, r6, swapper_pg_dir@l
1022 #endif /* CONFIG_BDI_SWITCH */
1024 /* Now turn on the MMU for real! */
1026 lis r3,start_kernel@h
1027 ori r3,r3,start_kernel@l
1033 * An undocumented "feature" of 604e requires that the v bit
1034 * be cleared before changing BAT values.
1036 * Also, newer IBM firmware does not clear bat3 and 4 so
1037 * this makes sure it's done.
1043 mtspr SPRN_DBAT0U,r10
1044 mtspr SPRN_DBAT0L,r10
1045 mtspr SPRN_DBAT1U,r10
1046 mtspr SPRN_DBAT1L,r10
1047 mtspr SPRN_DBAT2U,r10
1048 mtspr SPRN_DBAT2L,r10
1049 mtspr SPRN_DBAT3U,r10
1050 mtspr SPRN_DBAT3L,r10
1051 mtspr SPRN_IBAT0U,r10
1052 mtspr SPRN_IBAT0L,r10
1053 mtspr SPRN_IBAT1U,r10
1054 mtspr SPRN_IBAT1L,r10
1055 mtspr SPRN_IBAT2U,r10
1056 mtspr SPRN_IBAT2L,r10
1057 mtspr SPRN_IBAT3U,r10
1058 mtspr SPRN_IBAT3L,r10
1059 BEGIN_MMU_FTR_SECTION
1060 /* Here's a tweak: at this point, CPU setup have
1061 * not been called yet, so HIGH_BAT_EN may not be
1062 * set in HID0 for the 745x processors. However, it
1063 * seems that doesn't affect our ability to actually
1064 * write to these SPRs.
1066 mtspr SPRN_DBAT4U,r10
1067 mtspr SPRN_DBAT4L,r10
1068 mtspr SPRN_DBAT5U,r10
1069 mtspr SPRN_DBAT5L,r10
1070 mtspr SPRN_DBAT6U,r10
1071 mtspr SPRN_DBAT6L,r10
1072 mtspr SPRN_DBAT7U,r10
1073 mtspr SPRN_DBAT7L,r10
1074 mtspr SPRN_IBAT4U,r10
1075 mtspr SPRN_IBAT4L,r10
1076 mtspr SPRN_IBAT5U,r10
1077 mtspr SPRN_IBAT5L,r10
1078 mtspr SPRN_IBAT6U,r10
1079 mtspr SPRN_IBAT6L,r10
1080 mtspr SPRN_IBAT7U,r10
1081 mtspr SPRN_IBAT7L,r10
1082 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1091 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1092 rlwinm r0, r6, 0, ~MSR_RI
1093 rlwinm r0, r0, 0, ~MSR_EE
1104 LOAD_BAT(0, r3, r4, r5)
1105 LOAD_BAT(1, r3, r4, r5)
1106 LOAD_BAT(2, r3, r4, r5)
1107 LOAD_BAT(3, r3, r4, r5)
1108 BEGIN_MMU_FTR_SECTION
1109 LOAD_BAT(4, r3, r4, r5)
1110 LOAD_BAT(5, r3, r4, r5)
1111 LOAD_BAT(6, r3, r4, r5)
1112 LOAD_BAT(7, r3, r4, r5)
1113 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1114 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1122 1: addic. r10, r10, -0x1000
1129 addi r4, r3, __after_mmu_off - _start
1131 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1141 /* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
1143 lis r11,PAGE_OFFSET@h
1146 ori r8,r8,0x12 /* R/W access, M=1 */
1148 ori r8,r8,2 /* R/W access */
1149 #endif /* CONFIG_SMP */
1150 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1152 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx have valid */
1153 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1154 mtspr SPRN_IBAT0L,r8
1155 mtspr SPRN_IBAT0U,r11
1159 #ifdef CONFIG_BOOTX_TEXT
1162 * setup the display bat prepared for us in prom.c
1167 addis r8,r3,disp_BAT@ha
1168 addi r8,r8,disp_BAT@l
1173 mtspr SPRN_DBAT3L,r8
1174 mtspr SPRN_DBAT3U,r11
1176 #endif /* CONFIG_BOOTX_TEXT */
1178 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1182 mtspr SPRN_DBAT1L, r8
1185 ori r11, r11, (BL_1M << 2) | 2
1186 mtspr SPRN_DBAT1U, r11
1191 #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1193 /* prepare a BAT for early io */
1194 #if defined(CONFIG_GAMECUBE)
1196 #elif defined(CONFIG_WII)
1199 #error Invalid platform for USB Gecko based early debugging.
1202 * The virtual address used must match the virtual address
1203 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1205 lis r11, 0xfffe /* top 128K */
1206 ori r8, r8, 0x002a /* uncached, guarded ,rw */
1207 ori r11, r11, 0x2 /* 128K, Vs=1, Vp=0 */
1208 mtspr SPRN_DBAT1L, r8
1209 mtspr SPRN_DBAT1U, r11