1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * linux/arch/arm/kernel/entry-armv.S
5 * Copyright (C) 1996,1997,1998 Russell King.
6 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
7 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
9 * Low-level vector interface routines
11 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
12 * that causes it to save wrong values... Be aware!
15 #include <linux/init.h>
17 #include <asm/assembler.h>
19 #include <asm/glue-df.h>
20 #include <asm/glue-pf.h>
21 #include <asm/vfpmacros.h>
22 #include <asm/thread_notify.h>
23 #include <asm/unwind.h>
24 #include <asm/unistd.h>
26 #include <asm/system_info.h>
27 #include <asm/uaccess-asm.h>
29 #include "entry-header.S"
30 #include <asm/probes.h>
35 .macro irq_handler, from_user:req
37 ldr_this_cpu r2, irq_stack_ptr, r2, r3
40 @ If we took the interrupt while running in the kernel, we may already
41 @ be using the IRQ stack, so revert to the original value in that case.
43 subs r3, r2, r1 @ SP above bottom of IRQ stack?
44 rsbscs r3, r3, #THREAD_SIZE @ ... and below the top?
45 #ifdef CONFIG_VMAP_STACK
46 ldr_va r3, high_memory, cc @ End of the linear region
47 cmpcc r3, r1 @ Stack pointer was below it?
49 bcc 0f @ If not, switch to the IRQ stack
51 bl generic_handle_arch_irq
56 mov_l r0, generic_handle_arch_irq
62 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
64 ldr_va ip, processor, offset=PROCESSOR_PABT_FUNC
74 @ Call the processor-specific abort handler:
77 @ r4 - aborted context pc
78 @ r5 - aborted context psr
80 @ The abort handler must return the aborted address in r0, and
81 @ the fault status register in r1. r9 must be preserved.
84 ldr_va ip, processor, offset=PROCESSOR_DABT_FUNC
91 .section .entry.text,"ax",%progbits
94 * Invalid mode handlers
96 .macro inv_entry, reason
97 sub sp, sp, #PT_REGS_SIZE
98 ARM( stmib sp, {r1 - lr} )
99 THUMB( stmia sp, {r0 - r12} )
100 THUMB( str sp, [sp, #S_SP] )
101 THUMB( str lr, [sp, #S_LR] )
106 inv_entry BAD_PREFETCH
108 ENDPROC(__pabt_invalid)
113 ENDPROC(__dabt_invalid)
118 ENDPROC(__irq_invalid)
121 inv_entry BAD_UNDEFINSTR
124 @ XXX fall through to common_invalid
128 @ common_invalid - generic code for failed exception (re-entrant version of handlers)
134 add r0, sp, #S_PC @ here for interlock avoidance
135 mov r7, #-1 @ "" "" "" ""
136 str r4, [sp] @ save preserved r0
137 stmia r0, {r5 - r7} @ lr_<exception>,
138 @ cpsr_<exception>, "old_r0"
142 ENDPROC(__und_invalid)
148 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
149 #define SPFIX(code...) code
151 #define SPFIX(code...)
154 .macro svc_entry, stack_hole=0, trace=1, uaccess=1, overflow_check=1
156 sub sp, sp, #(SVC_REGS_SIZE + \stack_hole)
157 THUMB( add sp, r1 ) @ get SP in a GPR without
158 THUMB( sub r1, sp, r1 ) @ using a temp register
161 UNWIND(.save {r0 - pc} )
162 do_overflow_check (SVC_REGS_SIZE + \stack_hole)
165 #ifdef CONFIG_THUMB2_KERNEL
166 tst r1, #4 @ test stack pointer alignment
167 sub r1, sp, r1 @ restore original R1
168 sub sp, r1 @ restore original SP
172 SPFIX( subne sp, sp, #4 )
174 ARM( stmib sp, {r1 - r12} )
175 THUMB( stmia sp, {r0 - r12} ) @ No STMIB in Thumb-2
178 add r7, sp, #S_SP @ here for interlock avoidance
179 mov r6, #-1 @ "" "" "" ""
180 add r2, sp, #(SVC_REGS_SIZE + \stack_hole)
181 SPFIX( addne r2, r2, #4 )
182 str r3, [sp] @ save the "real" r0 copied
183 @ from the exception stack
188 @ We are now ready to fill in the remaining blanks on the stack:
192 @ r4 - lr_<exception>, already fixed up for correct return/restart
193 @ r5 - spsr_<exception>
194 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
199 uaccess_entry tsk, r0, r1, r2, \uaccess
202 #ifdef CONFIG_TRACE_IRQFLAGS
203 bl trace_hardirqs_off
213 THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
214 svc_exit r5 @ return from exception
221 irq_handler from_user=0
223 #ifdef CONFIG_PREEMPTION
224 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
225 ldr r0, [tsk, #TI_FLAGS] @ get flags
226 teq r8, #0 @ if preempt count != 0
227 movne r0, #0 @ force flags to 0
228 tst r0, #_TIF_NEED_RESCHED
232 svc_exit r5, irq = 1 @ return from exception
238 #ifdef CONFIG_PREEMPTION
241 1: bl preempt_schedule_irq @ irq en/disable is done inside
242 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
243 tst r0, #_TIF_NEED_RESCHED
249 @ Correct the PC such that it is pointing at the instruction
250 @ which caused the fault. If the faulting instruction was ARM
251 @ the PC will be pointing at the next instruction, and have to
252 @ subtract 4. Otherwise, it is Thumb, and the PC will be
253 @ pointing at the second half of the Thumb instruction. We
254 @ have to subtract 2.
263 #ifdef CONFIG_KPROBES
264 @ If a kprobe is about to simulate a "stmdb sp..." instruction,
265 @ it obviously needs free stack space which then will belong to
267 svc_entry MAX_STACK_SIZE
272 mov r1, #4 @ PC correction to apply
273 THUMB( tst r5, #PSR_T_BIT ) @ exception taken in Thumb mode?
274 THUMB( movne r1, #2 ) @ if so, fix up PC correction
275 mov r0, sp @ struct pt_regs *regs
280 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
281 svc_exit r5 @ return from exception
290 svc_exit r5 @ return from exception
297 mov r0, sp @ struct pt_regs *regs
304 * Abort mode handlers
308 @ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
309 @ and reuses the same macros. However in abort mode we must also
310 @ save/restore lr_abt and spsr_abt to make nested aborts safe.
316 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
317 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
318 THUMB( msr cpsr_c, r0 )
319 mov r1, lr @ Save lr_abt
320 mrs r2, spsr @ Save spsr_abt, abort is now safe
321 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
322 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
323 THUMB( msr cpsr_c, r0 )
326 add r0, sp, #8 @ struct pt_regs *regs
330 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
331 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
332 THUMB( msr cpsr_c, r0 )
333 mov lr, r1 @ Restore lr_abt, abort is unsafe
334 msr spsr_cxsf, r2 @ Restore spsr_abt
335 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
336 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
337 THUMB( msr cpsr_c, r0 )
346 * EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE
349 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7)
350 #error "sizeof(struct pt_regs) must be a multiple of 8"
353 .macro usr_entry, trace=1, uaccess=1
355 UNWIND(.cantunwind ) @ don't unwind the user space
356 sub sp, sp, #PT_REGS_SIZE
357 ARM( stmib sp, {r1 - r12} )
358 THUMB( stmia sp, {r0 - r12} )
360 ATRAP( mrc p15, 0, r7, c1, c0, 0)
361 ATRAP( ldr_va r8, cr_alignment)
364 add r0, sp, #S_PC @ here for interlock avoidance
365 mov r6, #-1 @ "" "" "" ""
367 str r3, [sp] @ save the "real" r0 copied
368 @ from the exception stack
371 @ We are now ready to fill in the remaining blanks on the stack:
373 @ r4 - lr_<exception>, already fixed up for correct return/restart
374 @ r5 - spsr_<exception>
375 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
377 @ Also, separately save sp_usr and lr_usr
380 ARM( stmdb r0, {sp, lr}^ )
381 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
387 @ Enable the alignment trap while in kernel mode
389 ATRAP( mcrne p15, 0, r8, c1, c0, 0)
391 reload_current r7, r8
394 @ Clear FP to mark the first stack frame
399 #ifdef CONFIG_TRACE_IRQFLAGS
400 bl trace_hardirqs_off
402 ct_user_exit save = 0
406 .macro kuser_cmpxchg_check
407 #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
409 #warning "NPTL on non MMU needs fixing"
411 @ Make sure our user space atomic helper is restarted
412 @ if it was interrupted in a critical region. Here we
413 @ perform a quick test inline since it should be false
414 @ 99.9999% of the time. The rest is done out of line.
417 blhs kuser_cmpxchg64_fixup
436 irq_handler from_user=1
439 b ret_to_user_from_irq
449 @ IRQs must be enabled before attempting to read the instruction from
450 @ user space since that could cause a page/translation fault if the
451 @ page table was modified by another CPU.
454 tst r5, #PSR_T_BIT @ Thumb mode?
455 mov r1, #2 @ set insn size to 2 for Thumb
456 bne 0f @ handle as Thumb undef exception
457 #ifdef CONFIG_FPE_NWFPE
458 adr r9, ret_from_exception
459 bl call_fpe @ returns via R9 on success
461 mov r1, #4 @ set insn size to 4 for ARM
477 * This is the return code to user mode for abort handlers
479 ENTRY(ret_from_exception)
487 ENDPROC(ret_from_exception)
493 mov r0, sp @ struct pt_regs *regs
496 restore_user_regs fast = 0, offset = 0
501 * Register switch for ARMv3 and ARMv4 processors
502 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
503 * previous and next are guaranteed not to be the same.
508 add ip, r1, #TI_CPU_SAVE
509 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
510 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
511 THUMB( str sp, [ip], #4 )
512 THUMB( str lr, [ip], #4 )
513 ldr r4, [r2, #TI_TP_VALUE]
514 ldr r5, [r2, #TI_TP_VALUE + 4]
515 #ifdef CONFIG_CPU_USE_DOMAINS
516 mrc p15, 0, r6, c3, c0, 0 @ Get domain register
517 str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
518 ldr r6, [r2, #TI_CPU_DOMAIN]
520 switch_tls r1, r4, r5, r3, r7
521 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
522 !defined(CONFIG_STACKPROTECTOR_PER_TASK)
523 ldr r8, =__stack_chk_guard
524 .if (TSK_STACK_CANARY > IMM12_MASK)
525 add r9, r2, #TSK_STACK_CANARY & ~IMM12_MASK
526 ldr r9, [r9, #TSK_STACK_CANARY & IMM12_MASK]
528 ldr r9, [r2, #TSK_STACK_CANARY & IMM12_MASK]
531 mov r7, r2 @ Preserve 'next'
532 #ifdef CONFIG_CPU_USE_DOMAINS
533 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
536 add r4, r2, #TI_CPU_SAVE
537 ldr r0, =thread_notify_head
538 mov r1, #THREAD_NOTIFY_SWITCH
539 bl atomic_notifier_call_chain
540 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
541 !defined(CONFIG_STACKPROTECTOR_PER_TASK)
545 #if !defined(CONFIG_THUMB2_KERNEL) && !defined(CONFIG_VMAP_STACK)
547 ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
550 ldmia r4, {r4 - sl, fp, ip, lr} @ Load all regs saved previously
551 #ifdef CONFIG_VMAP_STACK
553 @ Do a dummy read from the new stack while running from the old one so
554 @ that we can rely on do_translation_fault() to fix up any stale PMD
555 @ entries covering the vmalloc region.
560 @ When CONFIG_THREAD_INFO_IN_TASK=n, the update of SP itself is what
561 @ effectuates the task switch, as that is what causes the observable
562 @ values of current and current_thread_info to change. When
563 @ CONFIG_THREAD_INFO_IN_TASK=y, setting current (and therefore
564 @ current_thread_info) is done explicitly, and the update of SP just
565 @ switches us to another stack, with few other side effects. In order
566 @ to prevent this distinction from causing any inconsistencies, let's
567 @ keep the 'set_current' call as close as we can to the update of SP.
575 #ifdef CONFIG_VMAP_STACK
580 @ We've just detected an overflow. We need to load the address of this
581 @ CPU's overflow stack into the stack pointer register. We have only one
582 @ scratch register so let's use a sequence of ADDs including one
583 @ involving the PC, and decorate them with PC-relative group
584 @ relocations. As these are ARM only, switch to ARM mode first.
586 @ We enter here with IP clobbered and its value stashed on the mode
592 ldr_this_cpu_armv6 ip, overflow_stack_ptr
594 str sp, [ip, #-4]! @ Preserve original SP value
595 mov sp, ip @ Switch to overflow stack
596 pop {ip} @ Original SP in IP
598 #if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
599 mov ip, ip @ mov expected by unwinder
600 push {fp, ip, lr, pc} @ GCC flavor frame record
602 str ip, [sp, #-8]! @ store original SP
603 push {fpreg, lr} @ Clang flavor frame record
605 UNWIND( ldr ip, [r0, #4] ) @ load exception LR
606 UNWIND( str ip, [sp, #12] ) @ store in the frame record
607 ldr ip, [r0, #12] @ reload IP
609 @ Store the original GPRs to the new stack.
610 svc_entry uaccess=0, overflow_check=0
612 UNWIND( .save {sp, pc} )
613 UNWIND( .save {fpreg, lr} )
614 UNWIND( .setfp fpreg, sp )
616 ldr fpreg, [sp, #S_SP] @ Add our frame record
618 #if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
619 ldr r1, [fp, #4] @ reload SP at entry
624 str r1, [sp, #S_SP] @ store in pt_regs
626 @ Stash the regs for handle_bad_stack
641 * Each segment is 32-byte aligned and will be moved to the top of the high
642 * vector page. New segments (if ever needed) must be added in front of
643 * existing ones. This mechanism should be used only for things that are
644 * really small and justified, and not be abused freely.
646 * See Documentation/arch/arm/kernel_user_helpers.rst for formal definitions.
651 #ifdef CONFIG_ARM_THUMB
658 .macro kuser_pad, sym, size
660 .rept 4 - (. - \sym) & 3
664 .rept (\size - (. - \sym)) / 4
669 #ifdef CONFIG_KUSER_HELPERS
671 .globl __kuser_helper_start
672 __kuser_helper_start:
675 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
676 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
679 __kuser_cmpxchg64: @ 0xffff0f60
681 #if defined(CONFIG_CPU_32v6K)
683 stmfd sp!, {r4, r5, r6, r7}
684 ldrd r4, r5, [r0] @ load old val
685 ldrd r6, r7, [r1] @ load new val
687 1: ldrexd r0, r1, [r2] @ load current val
688 eors r3, r0, r4 @ compare with oldval (1)
689 eorseq r3, r1, r5 @ compare with oldval (2)
690 strexdeq r3, r6, r7, [r2] @ store newval if eq
691 teqeq r3, #1 @ success?
692 beq 1b @ if no then retry
694 rsbs r0, r3, #0 @ set returned val and C flag
695 ldmfd sp!, {r4, r5, r6, r7}
698 #elif !defined(CONFIG_SMP)
703 * The only thing that can break atomicity in this cmpxchg64
704 * implementation is either an IRQ or a data abort exception
705 * causing another process/thread to be scheduled in the middle of
706 * the critical sequence. The same strategy as for cmpxchg is used.
708 stmfd sp!, {r4, r5, r6, lr}
709 ldmia r0, {r4, r5} @ load old val
710 ldmia r1, {r6, lr} @ load new val
711 1: ldmia r2, {r0, r1} @ load current val
712 eors r3, r0, r4 @ compare with oldval (1)
713 eorseq r3, r1, r5 @ compare with oldval (2)
714 2: stmiaeq r2, {r6, lr} @ store newval if eq
715 rsbs r0, r3, #0 @ set return val and C flag
716 ldmfd sp!, {r4, r5, r6, pc}
719 kuser_cmpxchg64_fixup:
720 @ Called from kuser_cmpxchg_fixup.
721 @ r4 = address of interrupted insn (must be preserved).
722 @ sp = saved regs. r7 and r8 are clobbered.
723 @ 1b = first critical insn, 2b = last critical insn.
724 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
726 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
728 rsbscs r8, r8, #(2b - 1b)
729 strcs r7, [sp, #S_PC]
730 #if __LINUX_ARM_ARCH__ < 6
731 bcc kuser_cmpxchg32_fixup
737 #warning "NPTL on non MMU needs fixing"
744 #error "incoherent kernel configuration"
747 kuser_pad __kuser_cmpxchg64, 64
749 __kuser_memory_barrier: @ 0xffff0fa0
753 kuser_pad __kuser_memory_barrier, 32
755 __kuser_cmpxchg: @ 0xffff0fc0
757 #if __LINUX_ARM_ARCH__ < 6
762 * The only thing that can break atomicity in this cmpxchg
763 * implementation is either an IRQ or a data abort exception
764 * causing another process/thread to be scheduled in the middle
765 * of the critical sequence. To prevent this, code is added to
766 * the IRQ and data abort exception handlers to set the pc back
767 * to the beginning of the critical section if it is found to be
768 * within that critical section (see kuser_cmpxchg_fixup).
770 1: ldr r3, [r2] @ load current val
771 subs r3, r3, r0 @ compare with oldval
772 2: streq r1, [r2] @ store newval if eq
773 rsbs r0, r3, #0 @ set return val and C flag
777 kuser_cmpxchg32_fixup:
778 @ Called from kuser_cmpxchg_check macro.
779 @ r4 = address of interrupted insn (must be preserved).
780 @ sp = saved regs. r7 and r8 are clobbered.
781 @ 1b = first critical insn, 2b = last critical insn.
782 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
784 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
786 rsbscs r8, r8, #(2b - 1b)
787 strcs r7, [sp, #S_PC]
792 #warning "NPTL on non MMU needs fixing"
807 /* beware -- each __kuser slot must be 8 instructions max */
808 ALT_SMP(b __kuser_memory_barrier)
813 kuser_pad __kuser_cmpxchg, 32
815 __kuser_get_tls: @ 0xffff0fe0
816 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
818 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
819 kuser_pad __kuser_get_tls, 16
821 .word 0 @ 0xffff0ff0 software TLS value, then
822 .endr @ pad up to __kuser_helper_version
824 __kuser_helper_version: @ 0xffff0ffc
825 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
827 .globl __kuser_helper_end
837 * This code is copied to 0xffff1000 so we can use branches in the
838 * vectors, rather than ldr's. Note that this code must not exceed
841 * Common stub entry macro:
842 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
844 * SP points to a minimal amount of processor-private memory, the address
845 * of which is copied into r0 for the mode specific abort handler.
847 .macro vector_stub, name, mode, correction=0
849 #ifdef CONFIG_HARDEN_BRANCH_HISTORY
850 vector_bhb_bpiall_\name:
851 mcr p15, 0, r0, c7, c5, 6 @ BPIALL
852 @ isb not needed due to "movs pc, lr" in the vector stub
853 @ which gives a "context synchronisation".
858 sub lr, lr, #\correction
861 @ Save r0, lr_<exception> (parent PC)
862 stmia sp, {r0, lr} @ save r0, lr
864 @ Save spsr_<exception> (parent CPSR)
867 str lr, [sp, #8] @ save spsr
870 @ Prepare for SVC32 mode. IRQs remain disabled.
873 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
877 @ the branch table must immediately follow this code
881 THUMB( ldr lr, [r0, lr, lsl #2] )
883 ARM( ldr lr, [pc, lr, lsl #2] )
884 movs pc, lr @ branch to handler in SVC mode
885 ENDPROC(vector_\name)
887 #ifdef CONFIG_HARDEN_BRANCH_HISTORY
890 vector_bhb_loop8_\name:
892 sub lr, lr, #\correction
895 @ Save r0, lr_<exception> (parent PC)
904 @ isb not needed due to "movs pc, lr" in the vector stub
905 @ which gives a "context synchronisation".
907 ENDPROC(vector_bhb_loop8_\name)
912 @ handler addresses follow this label
916 .section .stubs, "ax", %progbits
917 @ These need to remain at the start of the section so that
918 @ they are in range of the 'SWI' entries in the vector tables
922 #ifdef CONFIG_HARDEN_BRANCH_HISTORY
923 .L__vector_bhb_loop8_swi:
924 .word vector_bhb_loop8_swi
925 .L__vector_bhb_bpiall_swi:
926 .word vector_bhb_bpiall_swi
930 ARM( swi SYS_ERROR0 )
936 * Interrupt dispatcher
938 vector_stub irq, IRQ_MODE, 4
940 .long __irq_usr @ 0 (USR_26 / USR_32)
941 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
942 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
943 .long __irq_svc @ 3 (SVC_26 / SVC_32)
944 .long __irq_invalid @ 4
945 .long __irq_invalid @ 5
946 .long __irq_invalid @ 6
947 .long __irq_invalid @ 7
948 .long __irq_invalid @ 8
949 .long __irq_invalid @ 9
950 .long __irq_invalid @ a
951 .long __irq_invalid @ b
952 .long __irq_invalid @ c
953 .long __irq_invalid @ d
954 .long __irq_invalid @ e
955 .long __irq_invalid @ f
958 * Data abort dispatcher
959 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
961 vector_stub dabt, ABT_MODE, 8
963 .long __dabt_usr @ 0 (USR_26 / USR_32)
964 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
965 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
966 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
967 .long __dabt_invalid @ 4
968 .long __dabt_invalid @ 5
969 .long __dabt_invalid @ 6
970 .long __dabt_invalid @ 7
971 .long __dabt_invalid @ 8
972 .long __dabt_invalid @ 9
973 .long __dabt_invalid @ a
974 .long __dabt_invalid @ b
975 .long __dabt_invalid @ c
976 .long __dabt_invalid @ d
977 .long __dabt_invalid @ e
978 .long __dabt_invalid @ f
981 * Prefetch abort dispatcher
982 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
984 vector_stub pabt, ABT_MODE, 4
986 .long __pabt_usr @ 0 (USR_26 / USR_32)
987 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
988 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
989 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
990 .long __pabt_invalid @ 4
991 .long __pabt_invalid @ 5
992 .long __pabt_invalid @ 6
993 .long __pabt_invalid @ 7
994 .long __pabt_invalid @ 8
995 .long __pabt_invalid @ 9
996 .long __pabt_invalid @ a
997 .long __pabt_invalid @ b
998 .long __pabt_invalid @ c
999 .long __pabt_invalid @ d
1000 .long __pabt_invalid @ e
1001 .long __pabt_invalid @ f
1004 * Undef instr entry dispatcher
1005 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1007 vector_stub und, UND_MODE
1009 .long __und_usr @ 0 (USR_26 / USR_32)
1010 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1011 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1012 .long __und_svc @ 3 (SVC_26 / SVC_32)
1013 .long __und_invalid @ 4
1014 .long __und_invalid @ 5
1015 .long __und_invalid @ 6
1016 .long __und_invalid @ 7
1017 .long __und_invalid @ 8
1018 .long __und_invalid @ 9
1019 .long __und_invalid @ a
1020 .long __und_invalid @ b
1021 .long __und_invalid @ c
1022 .long __und_invalid @ d
1023 .long __und_invalid @ e
1024 .long __und_invalid @ f
1028 /*=============================================================================
1029 * Address exception handler
1030 *-----------------------------------------------------------------------------
1031 * These aren't too critical.
1032 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1038 /*=============================================================================
1040 *-----------------------------------------------------------------------------
1041 * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
1042 * systems. This must be the last vector stub, so lets place it in its own
1046 vector_stub fiq, FIQ_MODE, 4
1048 .long __fiq_usr @ 0 (USR_26 / USR_32)
1049 .long __fiq_svc @ 1 (FIQ_26 / FIQ_32)
1050 .long __fiq_svc @ 2 (IRQ_26 / IRQ_32)
1051 .long __fiq_svc @ 3 (SVC_26 / SVC_32)
1067 .section .vectors, "ax", %progbits
1070 ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_swi )
1071 THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_swi )
1075 W(b) vector_addrexcptn
1079 #ifdef CONFIG_HARDEN_BRANCH_HISTORY
1080 .section .vectors.bhb.loop8, "ax", %progbits
1082 W(b) vector_bhb_loop8_und
1083 ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_loop8_swi )
1084 THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_bhb_loop8_swi )
1086 W(b) vector_bhb_loop8_pabt
1087 W(b) vector_bhb_loop8_dabt
1088 W(b) vector_addrexcptn
1089 W(b) vector_bhb_loop8_irq
1090 W(b) vector_bhb_loop8_fiq
1092 .section .vectors.bhb.bpiall, "ax", %progbits
1094 W(b) vector_bhb_bpiall_und
1095 ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_bpiall_swi )
1096 THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_bhb_bpiall_swi )
1098 W(b) vector_bhb_bpiall_pabt
1099 W(b) vector_bhb_bpiall_dabt
1100 W(b) vector_addrexcptn
1101 W(b) vector_bhb_bpiall_irq
1102 W(b) vector_bhb_bpiall_fiq