1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/mm/fault.c
5 * Copyright (C) 1995 Linus Torvalds
6 * Copyright (C) 1995-2004 Russell King
7 * Copyright (C) 2012 ARM Ltd.
10 #include <linux/acpi.h>
11 #include <linux/extable.h>
12 #include <linux/signal.h>
14 #include <linux/hardirq.h>
15 #include <linux/init.h>
16 #include <linux/kprobes.h>
17 #include <linux/uaccess.h>
18 #include <linux/page-flags.h>
19 #include <linux/sched/signal.h>
20 #include <linux/sched/debug.h>
21 #include <linux/highmem.h>
22 #include <linux/perf_event.h>
23 #include <linux/preempt.h>
24 #include <linux/hugetlb.h>
28 #include <asm/cmpxchg.h>
29 #include <asm/cpufeature.h>
30 #include <asm/exception.h>
31 #include <asm/daifflags.h>
32 #include <asm/debug-monitors.h>
34 #include <asm/kasan.h>
35 #include <asm/sysreg.h>
36 #include <asm/system_misc.h>
37 #include <asm/pgtable.h>
38 #include <asm/tlbflush.h>
39 #include <asm/traps.h>
42 int (*fn)(unsigned long addr, unsigned int esr,
43 struct pt_regs *regs);
49 static const struct fault_info fault_info[];
50 static struct fault_info debug_fault_info[];
52 static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
54 return fault_info + (esr & ESR_ELx_FSC);
57 static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr)
59 return debug_fault_info + DBG_ESR_EVT(esr);
62 static void data_abort_decode(unsigned int esr)
64 pr_alert("Data abort info:\n");
66 if (esr & ESR_ELx_ISV) {
67 pr_alert(" Access size = %u byte(s)\n",
68 1U << ((esr & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT));
69 pr_alert(" SSE = %lu, SRT = %lu\n",
70 (esr & ESR_ELx_SSE) >> ESR_ELx_SSE_SHIFT,
71 (esr & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT);
72 pr_alert(" SF = %lu, AR = %lu\n",
73 (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT,
74 (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT);
76 pr_alert(" ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK);
79 pr_alert(" CM = %lu, WnR = %lu\n",
80 (esr & ESR_ELx_CM) >> ESR_ELx_CM_SHIFT,
81 (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT);
84 static void mem_abort_decode(unsigned int esr)
86 pr_alert("Mem abort info:\n");
88 pr_alert(" ESR = 0x%08x\n", esr);
89 pr_alert(" Exception class = %s, IL = %u bits\n",
90 esr_get_class_string(esr),
91 (esr & ESR_ELx_IL) ? 32 : 16);
92 pr_alert(" SET = %lu, FnV = %lu\n",
93 (esr & ESR_ELx_SET_MASK) >> ESR_ELx_SET_SHIFT,
94 (esr & ESR_ELx_FnV) >> ESR_ELx_FnV_SHIFT);
95 pr_alert(" EA = %lu, S1PTW = %lu\n",
96 (esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT,
97 (esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT);
99 if (esr_is_data_abort(esr))
100 data_abort_decode(esr);
103 static inline bool is_ttbr0_addr(unsigned long addr)
105 /* entry assembly clears tags for TTBR0 addrs */
106 return addr < TASK_SIZE;
109 static inline bool is_ttbr1_addr(unsigned long addr)
111 /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
112 return arch_kasan_reset_tag(addr) >= VA_START;
116 * Dump out the page tables associated with 'addr' in the currently active mm.
118 static void show_pte(unsigned long addr)
120 struct mm_struct *mm;
124 if (is_ttbr0_addr(addr)) {
126 mm = current->active_mm;
127 if (mm == &init_mm) {
128 pr_alert("[%016lx] user address but active_mm is swapper\n",
132 } else if (is_ttbr1_addr(addr)) {
136 pr_alert("[%016lx] address between user and kernel address ranges\n",
141 pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n",
142 mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
143 vabits_actual, (unsigned long)virt_to_phys(mm->pgd));
144 pgdp = pgd_offset(mm, addr);
145 pgd = READ_ONCE(*pgdp);
146 pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
153 if (pgd_none(pgd) || pgd_bad(pgd))
156 pudp = pud_offset(pgdp, addr);
157 pud = READ_ONCE(*pudp);
158 pr_cont(", pud=%016llx", pud_val(pud));
159 if (pud_none(pud) || pud_bad(pud))
162 pmdp = pmd_offset(pudp, addr);
163 pmd = READ_ONCE(*pmdp);
164 pr_cont(", pmd=%016llx", pmd_val(pmd));
165 if (pmd_none(pmd) || pmd_bad(pmd))
168 ptep = pte_offset_map(pmdp, addr);
169 pte = READ_ONCE(*ptep);
170 pr_cont(", pte=%016llx", pte_val(pte));
178 * This function sets the access flags (dirty, accessed), as well as write
179 * permission, and only to a more permissive setting.
181 * It needs to cope with hardware update of the accessed/dirty state by other
182 * agents in the system and can safely skip the __sync_icache_dcache() call as,
183 * like set_pte_at(), the PTE is never changed from no-exec to exec here.
185 * Returns whether or not the PTE actually changed.
187 int ptep_set_access_flags(struct vm_area_struct *vma,
188 unsigned long address, pte_t *ptep,
189 pte_t entry, int dirty)
191 pteval_t old_pteval, pteval;
192 pte_t pte = READ_ONCE(*ptep);
194 if (pte_same(pte, entry))
197 /* only preserve the access flags and write permission */
198 pte_val(entry) &= PTE_RDONLY | PTE_AF | PTE_WRITE | PTE_DIRTY;
201 * Setting the flags must be done atomically to avoid racing with the
202 * hardware update of the access/dirty state. The PTE_RDONLY bit must
203 * be set to the most permissive (lowest value) of *ptep and entry
204 * (calculated as: a & b == ~(~a | ~b)).
206 pte_val(entry) ^= PTE_RDONLY;
207 pteval = pte_val(pte);
210 pteval ^= PTE_RDONLY;
211 pteval |= pte_val(entry);
212 pteval ^= PTE_RDONLY;
213 pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
214 } while (pteval != old_pteval);
216 flush_tlb_fix_spurious_fault(vma, address);
220 static bool is_el1_instruction_abort(unsigned int esr)
222 return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
225 static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
226 struct pt_regs *regs)
228 unsigned int ec = ESR_ELx_EC(esr);
229 unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
231 if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR)
234 if (fsc_type == ESR_ELx_FSC_PERM)
237 if (is_ttbr0_addr(addr) && system_uses_ttbr0_pan())
238 return fsc_type == ESR_ELx_FSC_FAULT &&
239 (regs->pstate & PSR_PAN_BIT);
244 static void die_kernel_fault(const char *msg, unsigned long addr,
245 unsigned int esr, struct pt_regs *regs)
249 pr_alert("Unable to handle kernel %s at virtual address %016lx\n", msg,
252 mem_abort_decode(esr);
255 die("Oops", regs, esr);
260 static void __do_kernel_fault(unsigned long addr, unsigned int esr,
261 struct pt_regs *regs)
266 * Are we prepared to handle this kernel fault?
267 * We are almost certainly not prepared to handle instruction faults.
269 if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
272 if (is_el1_permission_fault(addr, esr, regs)) {
273 if (esr & ESR_ELx_WNR)
274 msg = "write to read-only memory";
276 msg = "read from unreadable memory";
277 } else if (addr < PAGE_SIZE) {
278 msg = "NULL pointer dereference";
280 msg = "paging request";
283 die_kernel_fault(msg, addr, esr, regs);
286 static void set_thread_esr(unsigned long address, unsigned int esr)
288 current->thread.fault_address = address;
291 * If the faulting address is in the kernel, we must sanitize the ESR.
292 * From userspace's point of view, kernel-only mappings don't exist
293 * at all, so we report them as level 0 translation faults.
294 * (This is not quite the way that "no mapping there at all" behaves:
295 * an alignment fault not caused by the memory type would take
296 * precedence over translation fault for a real access to empty
297 * space. Unfortunately we can't easily distinguish "alignment fault
298 * not caused by memory type" from "alignment fault caused by memory
299 * type", so we ignore this wrinkle and just return the translation
302 if (!is_ttbr0_addr(current->thread.fault_address)) {
303 switch (ESR_ELx_EC(esr)) {
304 case ESR_ELx_EC_DABT_LOW:
306 * These bits provide only information about the
307 * faulting instruction, which userspace knows already.
308 * We explicitly clear bits which are architecturally
309 * RES0 in case they are given meanings in future.
310 * We always report the ESR as if the fault was taken
311 * to EL1 and so ISV and the bits in ISS[23:14] are
312 * clear. (In fact it always will be a fault to EL1.)
314 esr &= ESR_ELx_EC_MASK | ESR_ELx_IL |
315 ESR_ELx_CM | ESR_ELx_WNR;
316 esr |= ESR_ELx_FSC_FAULT;
318 case ESR_ELx_EC_IABT_LOW:
320 * Claim a level 0 translation fault.
321 * All other bits are architecturally RES0 for faults
322 * reported with that DFSC value, so we clear them.
324 esr &= ESR_ELx_EC_MASK | ESR_ELx_IL;
325 esr |= ESR_ELx_FSC_FAULT;
329 * This should never happen (entry.S only brings us
330 * into this code for insn and data aborts from a lower
331 * exception level). Fail safe by not providing an ESR
332 * context record at all.
334 WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr);
340 current->thread.fault_code = esr;
343 static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
346 * If we are in kernel mode at this point, we have no context to
347 * handle this fault with.
349 if (user_mode(regs)) {
350 const struct fault_info *inf = esr_to_fault_info(esr);
352 set_thread_esr(addr, esr);
353 arm64_force_sig_fault(inf->sig, inf->code, (void __user *)addr,
356 __do_kernel_fault(addr, esr, regs);
360 #define VM_FAULT_BADMAP 0x010000
361 #define VM_FAULT_BADACCESS 0x020000
363 static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
364 unsigned int mm_flags, unsigned long vm_flags)
366 struct vm_area_struct *vma = find_vma(mm, addr);
369 return VM_FAULT_BADMAP;
372 * Ok, we have a good vm_area for this memory access, so we can handle
375 if (unlikely(vma->vm_start > addr)) {
376 if (!(vma->vm_flags & VM_GROWSDOWN))
377 return VM_FAULT_BADMAP;
378 if (expand_stack(vma, addr))
379 return VM_FAULT_BADMAP;
383 * Check that the permissions on the VMA allow for the fault which
386 if (!(vma->vm_flags & vm_flags))
387 return VM_FAULT_BADACCESS;
388 return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags);
391 static bool is_el0_instruction_abort(unsigned int esr)
393 return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW;
397 * Note: not valid for EL1 DC IVAC, but we never use that such that it
398 * should fault. EL0 cannot issue DC IVAC (undef).
400 static bool is_write_abort(unsigned int esr)
402 return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM);
405 static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
406 struct pt_regs *regs)
408 const struct fault_info *inf;
409 struct mm_struct *mm = current->mm;
410 vm_fault_t fault, major = 0;
411 unsigned long vm_flags = VM_READ | VM_WRITE;
412 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
414 if (kprobe_page_fault(regs, esr))
418 * If we're in an interrupt or have no user context, we must not take
421 if (faulthandler_disabled() || !mm)
425 mm_flags |= FAULT_FLAG_USER;
427 if (is_el0_instruction_abort(esr)) {
429 mm_flags |= FAULT_FLAG_INSTRUCTION;
430 } else if (is_write_abort(esr)) {
432 mm_flags |= FAULT_FLAG_WRITE;
435 if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) {
436 /* regs->orig_addr_limit may be 0 if we entered from EL0 */
437 if (regs->orig_addr_limit == KERNEL_DS)
438 die_kernel_fault("access to user memory with fs=KERNEL_DS",
441 if (is_el1_instruction_abort(esr))
442 die_kernel_fault("execution of user memory",
445 if (!search_exception_tables(regs->pc))
446 die_kernel_fault("access to user memory outside uaccess routines",
450 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
453 * As per x86, we may deadlock here. However, since the kernel only
454 * validly references user space from well defined areas of the code,
455 * we can bug out early if this is from code which shouldn't.
457 if (!down_read_trylock(&mm->mmap_sem)) {
458 if (!user_mode(regs) && !search_exception_tables(regs->pc))
461 down_read(&mm->mmap_sem);
464 * The above down_read_trylock() might have succeeded in which
465 * case, we'll have missed the might_sleep() from down_read().
468 #ifdef CONFIG_DEBUG_VM
469 if (!user_mode(regs) && !search_exception_tables(regs->pc)) {
470 up_read(&mm->mmap_sem);
476 fault = __do_page_fault(mm, addr, mm_flags, vm_flags);
477 major |= fault & VM_FAULT_MAJOR;
479 if (fault & VM_FAULT_RETRY) {
481 * If we need to retry but a fatal signal is pending,
482 * handle the signal first. We do not need to release
483 * the mmap_sem because it would already be released
484 * in __lock_page_or_retry in mm/filemap.c.
486 if (fatal_signal_pending(current)) {
487 if (!user_mode(regs))
493 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
496 if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
497 mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
498 mm_flags |= FAULT_FLAG_TRIED;
502 up_read(&mm->mmap_sem);
505 * Handle the "normal" (no error) case first.
507 if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
508 VM_FAULT_BADACCESS)))) {
510 * Major/minor page fault accounting is only done
511 * once. If we go through a retry, it is extremely
512 * likely that the page will be found in page cache at
517 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
521 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
529 * If we are in kernel mode at this point, we have no context to
530 * handle this fault with.
532 if (!user_mode(regs))
535 if (fault & VM_FAULT_OOM) {
537 * We ran out of memory, call the OOM killer, and return to
538 * userspace (which will retry the fault, or kill us if we got
541 pagefault_out_of_memory();
545 inf = esr_to_fault_info(esr);
546 set_thread_esr(addr, esr);
547 if (fault & VM_FAULT_SIGBUS) {
549 * We had some memory, but were unable to successfully fix up
552 arm64_force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr,
554 } else if (fault & (VM_FAULT_HWPOISON_LARGE | VM_FAULT_HWPOISON)) {
558 if (fault & VM_FAULT_HWPOISON_LARGE)
559 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
561 arm64_force_sig_mceerr(BUS_MCEERR_AR, (void __user *)addr, lsb,
565 * Something tried to access memory that isn't in our memory
568 arm64_force_sig_fault(SIGSEGV,
569 fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR,
577 __do_kernel_fault(addr, esr, regs);
581 static int __kprobes do_translation_fault(unsigned long addr,
583 struct pt_regs *regs)
585 if (is_ttbr0_addr(addr))
586 return do_page_fault(addr, esr, regs);
588 do_bad_area(addr, esr, regs);
592 static int do_alignment_fault(unsigned long addr, unsigned int esr,
593 struct pt_regs *regs)
595 do_bad_area(addr, esr, regs);
599 static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
601 return 1; /* "fault" */
604 static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
606 const struct fault_info *inf;
609 inf = esr_to_fault_info(esr);
612 * Return value ignored as we rely on signal merging.
613 * Future patches will make this more robust.
615 apei_claim_sea(regs);
617 if (esr & ESR_ELx_FnV)
620 siaddr = (void __user *)addr;
621 arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr);
626 static const struct fault_info fault_info[] = {
627 { do_bad, SIGKILL, SI_KERNEL, "ttbr address size fault" },
628 { do_bad, SIGKILL, SI_KERNEL, "level 1 address size fault" },
629 { do_bad, SIGKILL, SI_KERNEL, "level 2 address size fault" },
630 { do_bad, SIGKILL, SI_KERNEL, "level 3 address size fault" },
631 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" },
632 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
633 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
634 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
635 { do_bad, SIGKILL, SI_KERNEL, "unknown 8" },
636 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
637 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
638 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
639 { do_bad, SIGKILL, SI_KERNEL, "unknown 12" },
640 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
641 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
642 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
643 { do_sea, SIGBUS, BUS_OBJERR, "synchronous external abort" },
644 { do_bad, SIGKILL, SI_KERNEL, "unknown 17" },
645 { do_bad, SIGKILL, SI_KERNEL, "unknown 18" },
646 { do_bad, SIGKILL, SI_KERNEL, "unknown 19" },
647 { do_sea, SIGKILL, SI_KERNEL, "level 0 (translation table walk)" },
648 { do_sea, SIGKILL, SI_KERNEL, "level 1 (translation table walk)" },
649 { do_sea, SIGKILL, SI_KERNEL, "level 2 (translation table walk)" },
650 { do_sea, SIGKILL, SI_KERNEL, "level 3 (translation table walk)" },
651 { do_sea, SIGBUS, BUS_OBJERR, "synchronous parity or ECC error" }, // Reserved when RAS is implemented
652 { do_bad, SIGKILL, SI_KERNEL, "unknown 25" },
653 { do_bad, SIGKILL, SI_KERNEL, "unknown 26" },
654 { do_bad, SIGKILL, SI_KERNEL, "unknown 27" },
655 { do_sea, SIGKILL, SI_KERNEL, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
656 { do_sea, SIGKILL, SI_KERNEL, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
657 { do_sea, SIGKILL, SI_KERNEL, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
658 { do_sea, SIGKILL, SI_KERNEL, "level 3 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
659 { do_bad, SIGKILL, SI_KERNEL, "unknown 32" },
660 { do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" },
661 { do_bad, SIGKILL, SI_KERNEL, "unknown 34" },
662 { do_bad, SIGKILL, SI_KERNEL, "unknown 35" },
663 { do_bad, SIGKILL, SI_KERNEL, "unknown 36" },
664 { do_bad, SIGKILL, SI_KERNEL, "unknown 37" },
665 { do_bad, SIGKILL, SI_KERNEL, "unknown 38" },
666 { do_bad, SIGKILL, SI_KERNEL, "unknown 39" },
667 { do_bad, SIGKILL, SI_KERNEL, "unknown 40" },
668 { do_bad, SIGKILL, SI_KERNEL, "unknown 41" },
669 { do_bad, SIGKILL, SI_KERNEL, "unknown 42" },
670 { do_bad, SIGKILL, SI_KERNEL, "unknown 43" },
671 { do_bad, SIGKILL, SI_KERNEL, "unknown 44" },
672 { do_bad, SIGKILL, SI_KERNEL, "unknown 45" },
673 { do_bad, SIGKILL, SI_KERNEL, "unknown 46" },
674 { do_bad, SIGKILL, SI_KERNEL, "unknown 47" },
675 { do_bad, SIGKILL, SI_KERNEL, "TLB conflict abort" },
676 { do_bad, SIGKILL, SI_KERNEL, "Unsupported atomic hardware update fault" },
677 { do_bad, SIGKILL, SI_KERNEL, "unknown 50" },
678 { do_bad, SIGKILL, SI_KERNEL, "unknown 51" },
679 { do_bad, SIGKILL, SI_KERNEL, "implementation fault (lockdown abort)" },
680 { do_bad, SIGBUS, BUS_OBJERR, "implementation fault (unsupported exclusive)" },
681 { do_bad, SIGKILL, SI_KERNEL, "unknown 54" },
682 { do_bad, SIGKILL, SI_KERNEL, "unknown 55" },
683 { do_bad, SIGKILL, SI_KERNEL, "unknown 56" },
684 { do_bad, SIGKILL, SI_KERNEL, "unknown 57" },
685 { do_bad, SIGKILL, SI_KERNEL, "unknown 58" },
686 { do_bad, SIGKILL, SI_KERNEL, "unknown 59" },
687 { do_bad, SIGKILL, SI_KERNEL, "unknown 60" },
688 { do_bad, SIGKILL, SI_KERNEL, "section domain fault" },
689 { do_bad, SIGKILL, SI_KERNEL, "page domain fault" },
690 { do_bad, SIGKILL, SI_KERNEL, "unknown 63" },
693 asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
694 struct pt_regs *regs)
696 const struct fault_info *inf = esr_to_fault_info(esr);
698 if (!inf->fn(addr, esr, regs))
701 if (!user_mode(regs)) {
702 pr_alert("Unhandled fault at 0x%016lx\n", addr);
703 mem_abort_decode(esr);
707 arm64_notify_die(inf->name, regs,
708 inf->sig, inf->code, (void __user *)addr, esr);
711 asmlinkage void __exception do_el0_irq_bp_hardening(void)
713 /* PC has already been checked in entry.S */
714 arm64_apply_bp_hardening();
717 asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
719 struct pt_regs *regs)
722 * We've taken an instruction abort from userspace and not yet
723 * re-enabled IRQs. If the address is a kernel address, apply
724 * BP hardening prior to enabling IRQs and pre-emption.
726 if (!is_ttbr0_addr(addr))
727 arm64_apply_bp_hardening();
729 local_daif_restore(DAIF_PROCCTX);
730 do_mem_abort(addr, esr, regs);
734 asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
736 struct pt_regs *regs)
738 if (user_mode(regs)) {
739 if (!is_ttbr0_addr(instruction_pointer(regs)))
740 arm64_apply_bp_hardening();
741 local_daif_restore(DAIF_PROCCTX);
744 arm64_notify_die("SP/PC alignment exception", regs,
745 SIGBUS, BUS_ADRALN, (void __user *)addr, esr);
748 int __init early_brk64(unsigned long addr, unsigned int esr,
749 struct pt_regs *regs);
752 * __refdata because early_brk64 is __init, but the reference to it is
753 * clobbered at arch_initcall time.
754 * See traps.c and debug-monitors.c:debug_traps_init().
756 static struct fault_info __refdata debug_fault_info[] = {
757 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" },
758 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" },
759 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" },
760 { do_bad, SIGKILL, SI_KERNEL, "unknown 3" },
761 { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" },
762 { do_bad, SIGKILL, SI_KERNEL, "aarch32 vector catch" },
763 { early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" },
764 { do_bad, SIGKILL, SI_KERNEL, "unknown 7" },
767 void __init hook_debug_fault_code(int nr,
768 int (*fn)(unsigned long, unsigned int, struct pt_regs *),
769 int sig, int code, const char *name)
771 BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info));
773 debug_fault_info[nr].fn = fn;
774 debug_fault_info[nr].sig = sig;
775 debug_fault_info[nr].code = code;
776 debug_fault_info[nr].name = name;
780 * In debug exception context, we explicitly disable preemption despite
781 * having interrupts disabled.
782 * This serves two purposes: it makes it much less likely that we would
783 * accidentally schedule in exception context and it will force a warning
784 * if we somehow manage to schedule by accident.
786 static void debug_exception_enter(struct pt_regs *regs)
789 * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
790 * already disabled to preserve the last enabled/disabled addresses.
792 if (interrupts_enabled(regs))
793 trace_hardirqs_off();
795 if (user_mode(regs)) {
796 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
799 * We might have interrupted pretty much anything. In
800 * fact, if we're a debug exception, we can even interrupt
801 * NMI processing. We don't want this code makes in_nmi()
802 * to return true, but we need to notify RCU.
809 /* This code is a bit fragile. Test it. */
810 RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work");
812 NOKPROBE_SYMBOL(debug_exception_enter);
814 static void debug_exception_exit(struct pt_regs *regs)
816 preempt_enable_no_resched();
818 if (!user_mode(regs))
821 if (interrupts_enabled(regs))
824 NOKPROBE_SYMBOL(debug_exception_exit);
826 #ifdef CONFIG_ARM64_ERRATUM_1463225
827 DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
829 static int __exception
830 cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
835 if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
839 * We've taken a dummy step exception from the kernel to ensure
840 * that interrupts are re-enabled on the syscall path. Return back
841 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
842 * masked so that we can safely restore the mdscr and get on with
843 * handling the syscall.
845 regs->pstate |= PSR_D_BIT;
849 static int __exception
850 cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
854 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
856 asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
858 struct pt_regs *regs)
860 const struct fault_info *inf = esr_to_debug_fault_info(esr);
861 unsigned long pc = instruction_pointer(regs);
863 if (cortex_a76_erratum_1463225_debug_handler(regs))
866 debug_exception_enter(regs);
868 if (user_mode(regs) && !is_ttbr0_addr(pc))
869 arm64_apply_bp_hardening();
871 if (inf->fn(addr_if_watchpoint, esr, regs)) {
872 arm64_notify_die(inf->name, regs,
873 inf->sig, inf->code, (void __user *)pc, esr);
876 debug_exception_exit(regs);
878 NOKPROBE_SYMBOL(do_debug_exception);