1 // TODO VM_EXEC flag work-around, cache aliasing
3 * arch/xtensa/mm/fault.c
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
9 * Copyright (C) 2001 - 2010 Tensilica Inc.
11 * Chris Zankel <chris@zankel.net>
12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
16 #include <linux/extable.h>
17 #include <linux/hardirq.h>
18 #include <linux/perf_event.h>
19 #include <linux/uaccess.h>
20 #include <asm/mmu_context.h>
21 #include <asm/cacheflush.h>
22 #include <asm/hardirq.h>
24 void bad_page_fault(struct pt_regs*, unsigned long, int);
26 static void vmalloc_fault(struct pt_regs *regs, unsigned int address)
29 /* Synchronize this task's top level page-table
30 * with the 'reference' page table.
32 struct mm_struct *act_mm = current->active_mm;
33 int index = pgd_index(address);
43 pgd = act_mm->pgd + index;
44 pgd_k = init_mm.pgd + index;
46 if (!pgd_present(*pgd_k))
49 pgd_val(*pgd) = pgd_val(*pgd_k);
51 p4d = p4d_offset(pgd, address);
52 p4d_k = p4d_offset(pgd_k, address);
53 if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
56 pud = pud_offset(p4d, address);
57 pud_k = pud_offset(p4d_k, address);
58 if (!pud_present(*pud) || !pud_present(*pud_k))
61 pmd = pmd_offset(pud, address);
62 pmd_k = pmd_offset(pud_k, address);
63 if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
66 pmd_val(*pmd) = pmd_val(*pmd_k);
67 pte_k = pte_offset_kernel(pmd_k, address);
69 if (!pte_present(*pte_k))
74 bad_page_fault(regs, address, SIGKILL);
76 WARN_ONCE(1, "%s in noMMU configuration\n", __func__);
80 * This routine handles page faults. It determines the address,
81 * and the problem, and then passes it off to one of the appropriate
84 * Note: does not handle Miss and MultiHit.
87 void do_page_fault(struct pt_regs *regs)
89 struct vm_area_struct * vma;
90 struct mm_struct *mm = current->mm;
91 unsigned int exccause = regs->exccause;
92 unsigned int address = regs->excvaddr;
95 int is_write, is_exec;
97 unsigned int flags = FAULT_FLAG_DEFAULT;
101 /* We fault-in kernel-space virtual memory on-demand. The
102 * 'reference' page table is init_mm.pgd.
104 if (address >= TASK_SIZE && !user_mode(regs)) {
105 vmalloc_fault(regs, address);
109 /* If we're in an interrupt or have no user
110 * context, we must not take the fault..
112 if (faulthandler_disabled() || !mm) {
113 bad_page_fault(regs, address, SIGSEGV);
117 is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
118 is_exec = (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
119 exccause == EXCCAUSE_ITLB_MISS ||
120 exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
122 pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n",
123 current->comm, current->pid,
124 address, exccause, regs->pc,
125 is_write ? "w" : "", is_exec ? "x" : "");
128 flags |= FAULT_FLAG_USER;
130 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
134 vma = find_vma(mm, address);
138 if (vma->vm_start <= address)
140 if (!(vma->vm_flags & VM_GROWSDOWN))
142 if (expand_stack(vma, address))
145 /* Ok, we have a good vm_area for this memory access, so
153 if (!(vma->vm_flags & VM_WRITE))
155 flags |= FAULT_FLAG_WRITE;
156 } else if (is_exec) {
157 if (!(vma->vm_flags & VM_EXEC))
159 } else /* Allow read even from write-only pages. */
160 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
163 /* If for any reason at all we couldn't handle the fault,
164 * make sure we exit gracefully rather than endlessly redo
167 fault = handle_mm_fault(vma, address, flags, regs);
169 if (fault_signal_pending(fault, regs)) {
170 if (!user_mode(regs))
171 bad_page_fault(regs, address, SIGKILL);
175 /* The fault is fully completed (including releasing mmap lock) */
176 if (fault & VM_FAULT_COMPLETED)
179 if (unlikely(fault & VM_FAULT_ERROR)) {
180 if (fault & VM_FAULT_OOM)
182 else if (fault & VM_FAULT_SIGSEGV)
184 else if (fault & VM_FAULT_SIGBUS)
189 if (fault & VM_FAULT_RETRY) {
190 flags |= FAULT_FLAG_TRIED;
192 /* No need to mmap_read_unlock(mm) as we would
193 * have already released it in __lock_page_or_retry
200 mmap_read_unlock(mm);
203 /* Something tried to access memory that isn't in our memory map..
204 * Fix it, but check if it's kernel or user first..
207 mmap_read_unlock(mm);
208 if (user_mode(regs)) {
209 force_sig_fault(SIGSEGV, code, (void *) address);
212 bad_page_fault(regs, address, SIGSEGV);
216 /* We ran out of memory, or some other thing happened to us that made
217 * us unable to handle the page fault gracefully.
220 mmap_read_unlock(mm);
221 if (!user_mode(regs))
222 bad_page_fault(regs, address, SIGKILL);
224 pagefault_out_of_memory();
228 mmap_read_unlock(mm);
230 /* Send a sigbus, regardless of whether we were in kernel
233 force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address);
235 /* Kernel mode? Handle exceptions or die */
236 if (!user_mode(regs))
237 bad_page_fault(regs, address, SIGBUS);
243 bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
245 extern void __noreturn die(const char*, struct pt_regs*, long);
246 const struct exception_table_entry *entry;
248 /* Are we prepared to handle this kernel fault? */
249 if ((entry = search_exception_tables(regs->pc)) != NULL) {
250 pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
251 current->comm, regs->pc, entry->fixup);
252 regs->pc = entry->fixup;
256 /* Oops. The kernel tried to access some bad page. We'll have to
257 * terminate things with extreme prejudice.
259 pr_alert("Unable to handle kernel paging request at virtual "
260 "address %08lx\n pc = %08lx, ra = %08lx\n",
261 address, regs->pc, regs->areg[0]);
262 die("Oops", regs, sig);