1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright IBM Corp. 1999
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (uweigand@de.ibm.com)
8 * Derived from "arch/i386/mm/fault.c"
9 * Copyright (C) 1995 Linus Torvalds
12 #include <linux/kernel_stat.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/sched/debug.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
24 #include <linux/compat.h>
25 #include <linux/smp.h>
26 #include <linux/kdebug.h>
27 #include <linux/init.h>
28 #include <linux/console.h>
29 #include <linux/extable.h>
30 #include <linux/hardirq.h>
31 #include <linux/kprobes.h>
32 #include <linux/uaccess.h>
33 #include <linux/hugetlb.h>
34 #include <asm/asm-offsets.h>
38 #include <asm/mmu_context.h>
39 #include <asm/facility.h>
41 #include "../kernel/entry.h"
43 #define __FAIL_ADDR_MASK -4096L
44 #define __SUBCODE_MASK 0x0600
45 #define __PF_RES_FIELD 0x8000000000000000ULL
47 #define VM_FAULT_BADCONTEXT ((__force vm_fault_t) 0x010000)
48 #define VM_FAULT_BADMAP ((__force vm_fault_t) 0x020000)
49 #define VM_FAULT_BADACCESS ((__force vm_fault_t) 0x040000)
50 #define VM_FAULT_SIGNAL ((__force vm_fault_t) 0x080000)
51 #define VM_FAULT_PFAULT ((__force vm_fault_t) 0x100000)
59 static unsigned long store_indication __read_mostly;
61 static int __init fault_init(void)
63 if (test_facility(75))
64 store_indication = 0xc00;
67 early_initcall(fault_init);
70 * Find out which address space caused the exception.
72 static enum fault_type get_fault_type(struct pt_regs *regs)
74 unsigned long trans_exc_code;
76 trans_exc_code = regs->int_parm_long & 3;
77 if (likely(trans_exc_code == 0)) {
78 /* primary space exception */
81 if (!IS_ENABLED(CONFIG_PGSTE))
83 if (test_pt_regs_flag(regs, PIF_GUEST_FAULT))
87 if (trans_exc_code == 2)
89 if (trans_exc_code == 1) {
90 /* access register mode, not used in the kernel */
93 /* home space exception -> access via kernel ASCE */
97 static int bad_address(void *p)
101 return get_kernel_nofault(dummy, (unsigned long *)p);
104 static void dump_pagetable(unsigned long asce, unsigned long address)
106 unsigned long *table = __va(asce & _ASCE_ORIGIN);
108 pr_alert("AS:%016lx ", asce);
109 switch (asce & _ASCE_TYPE_MASK) {
110 case _ASCE_TYPE_REGION1:
111 table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
112 if (bad_address(table))
114 pr_cont("R1:%016lx ", *table);
115 if (*table & _REGION_ENTRY_INVALID)
117 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
119 case _ASCE_TYPE_REGION2:
120 table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
121 if (bad_address(table))
123 pr_cont("R2:%016lx ", *table);
124 if (*table & _REGION_ENTRY_INVALID)
126 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
128 case _ASCE_TYPE_REGION3:
129 table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
130 if (bad_address(table))
132 pr_cont("R3:%016lx ", *table);
133 if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
135 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
137 case _ASCE_TYPE_SEGMENT:
138 table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
139 if (bad_address(table))
141 pr_cont("S:%016lx ", *table);
142 if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
144 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
146 table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
147 if (bad_address(table))
149 pr_cont("P:%016lx ", *table);
157 static void dump_fault_info(struct pt_regs *regs)
161 pr_alert("Failing address: %016lx TEID: %016lx\n",
162 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
163 pr_alert("Fault in ");
164 switch (regs->int_parm_long & 3) {
166 pr_cont("home space ");
169 pr_cont("secondary space ");
172 pr_cont("access register ");
175 pr_cont("primary space ");
178 pr_cont("mode while using ");
179 switch (get_fault_type(regs)) {
181 asce = S390_lowcore.user_asce;
185 asce = ((struct gmap *) S390_lowcore.gmap)->asce;
189 asce = S390_lowcore.kernel_asce;
196 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
199 int show_unhandled_signals = 1;
201 void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
203 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
205 if (!unhandled_signal(current, signr))
207 if (!printk_ratelimit())
209 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
210 regs->int_code & 0xffff, regs->int_code >> 17);
211 print_vma_addr(KERN_CONT "in ", regs->psw.addr);
212 printk(KERN_CONT "\n");
214 dump_fault_info(regs);
219 * Send SIGSEGV to task. This is an external routine
220 * to keep the stack usage of do_page_fault small.
222 static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
224 report_user_fault(regs, SIGSEGV, 1);
225 force_sig_fault(SIGSEGV, si_code,
226 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
229 const struct exception_table_entry *s390_search_extables(unsigned long addr)
231 const struct exception_table_entry *fixup;
233 fixup = search_extable(__start_dma_ex_table,
234 __stop_dma_ex_table - __start_dma_ex_table,
237 fixup = search_exception_tables(addr);
241 static noinline void do_no_context(struct pt_regs *regs)
243 const struct exception_table_entry *fixup;
245 /* Are we prepared to handle this kernel fault? */
246 fixup = s390_search_extables(regs->psw.addr);
247 if (fixup && ex_handle(fixup, regs))
251 * Oops. The kernel tried to access some bad page. We'll have to
252 * terminate things with extreme prejudice.
254 if (get_fault_type(regs) == KERNEL_FAULT)
255 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
256 " in virtual kernel address space\n");
258 printk(KERN_ALERT "Unable to handle kernel paging request"
259 " in virtual user address space\n");
260 dump_fault_info(regs);
265 static noinline void do_low_address(struct pt_regs *regs)
267 /* Low-address protection hit in kernel mode means
268 NULL pointer write access in kernel mode. */
269 if (regs->psw.mask & PSW_MASK_PSTATE) {
270 /* Low-address protection hit in user mode 'cannot happen'. */
271 die (regs, "Low-address protection");
278 static noinline void do_sigbus(struct pt_regs *regs)
281 * Send a sigbus, regardless of whether we were in kernel
284 force_sig_fault(SIGBUS, BUS_ADRERR,
285 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
288 static noinline void do_fault_error(struct pt_regs *regs, int access,
294 case VM_FAULT_BADACCESS:
295 case VM_FAULT_BADMAP:
296 /* Bad memory access. Check if it is kernel or user space. */
297 if (user_mode(regs)) {
298 /* User mode accesses just cause a SIGSEGV */
299 si_code = (fault == VM_FAULT_BADMAP) ?
300 SEGV_MAPERR : SEGV_ACCERR;
301 do_sigsegv(regs, si_code);
305 case VM_FAULT_BADCONTEXT:
306 case VM_FAULT_PFAULT:
309 case VM_FAULT_SIGNAL:
310 if (!user_mode(regs))
313 default: /* fault & VM_FAULT_ERROR */
314 if (fault & VM_FAULT_OOM) {
315 if (!user_mode(regs))
318 pagefault_out_of_memory();
319 } else if (fault & VM_FAULT_SIGSEGV) {
320 /* Kernel mode? Handle exceptions or die */
321 if (!user_mode(regs))
324 do_sigsegv(regs, SEGV_MAPERR);
325 } else if (fault & VM_FAULT_SIGBUS) {
326 /* Kernel mode? Handle exceptions or die */
327 if (!user_mode(regs))
338 * This routine handles page faults. It determines the address,
339 * and the problem, and then passes it off to one of the appropriate
342 * interruption code (int_code):
343 * 04 Protection -> Write-Protection (suppression)
344 * 10 Segment translation -> Not present (nullification)
345 * 11 Page translation -> Not present (nullification)
346 * 3b Region third trans. -> Not present (nullification)
348 static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
351 struct task_struct *tsk;
352 struct mm_struct *mm;
353 struct vm_area_struct *vma;
354 enum fault_type type;
355 unsigned long trans_exc_code;
356 unsigned long address;
362 * The instruction that caused the program check has
363 * been nullified. Don't signal single step via SIGTRAP.
365 clear_thread_flag(TIF_PER_TRAP);
367 if (kprobe_page_fault(regs, 14))
371 trans_exc_code = regs->int_parm_long;
374 * Verify that the fault happened in user space, that
375 * we are not in an interrupt and that there is a
378 fault = VM_FAULT_BADCONTEXT;
379 type = get_fault_type(regs);
385 if (faulthandler_disabled() || !mm)
390 address = trans_exc_code & __FAIL_ADDR_MASK;
391 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
392 flags = FAULT_FLAG_DEFAULT;
394 flags |= FAULT_FLAG_USER;
395 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
396 flags |= FAULT_FLAG_WRITE;
400 if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
401 gmap = (struct gmap *) S390_lowcore.gmap;
402 current->thread.gmap_addr = address;
403 current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
404 current->thread.gmap_int_code = regs->int_code & 0xffff;
405 address = __gmap_translate(gmap, address);
406 if (address == -EFAULT) {
407 fault = VM_FAULT_BADMAP;
410 if (gmap->pfault_enabled)
411 flags |= FAULT_FLAG_RETRY_NOWAIT;
415 fault = VM_FAULT_BADMAP;
416 vma = find_vma(mm, address);
420 if (unlikely(vma->vm_start > address)) {
421 if (!(vma->vm_flags & VM_GROWSDOWN))
423 if (expand_stack(vma, address))
428 * Ok, we have a good vm_area for this memory access, so
431 fault = VM_FAULT_BADACCESS;
432 if (unlikely(!(vma->vm_flags & access)))
435 if (is_vm_hugetlb_page(vma))
436 address &= HPAGE_MASK;
438 * If for any reason at all we couldn't handle the fault,
439 * make sure we exit gracefully rather than endlessly redo
442 fault = handle_mm_fault(vma, address, flags, regs);
443 if (fault_signal_pending(fault, regs)) {
444 fault = VM_FAULT_SIGNAL;
445 if (flags & FAULT_FLAG_RETRY_NOWAIT)
449 if (unlikely(fault & VM_FAULT_ERROR))
452 if (flags & FAULT_FLAG_ALLOW_RETRY) {
453 if (fault & VM_FAULT_RETRY) {
454 if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
455 (flags & FAULT_FLAG_RETRY_NOWAIT)) {
456 /* FAULT_FLAG_RETRY_NOWAIT has been set,
457 * mmap_lock has not been released */
458 current->thread.gmap_pfault = 1;
459 fault = VM_FAULT_PFAULT;
462 flags &= ~FAULT_FLAG_RETRY_NOWAIT;
463 flags |= FAULT_FLAG_TRIED;
468 if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
469 address = __gmap_link(gmap, current->thread.gmap_addr,
471 if (address == -EFAULT) {
472 fault = VM_FAULT_BADMAP;
475 if (address == -ENOMEM) {
476 fault = VM_FAULT_OOM;
482 mmap_read_unlock(mm);
487 void do_protection_exception(struct pt_regs *regs)
489 unsigned long trans_exc_code;
493 trans_exc_code = regs->int_parm_long;
495 * Protection exceptions are suppressing, decrement psw address.
496 * The exception to this rule are aborted transactions, for these
497 * the PSW already points to the correct location.
499 if (!(regs->int_code & 0x200))
500 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
502 * Check for low-address protection. This needs to be treated
503 * as a special case because the translation exception code
504 * field is not guaranteed to contain valid data in this case.
506 if (unlikely(!(trans_exc_code & 4))) {
507 do_low_address(regs);
510 if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
511 regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
512 (regs->psw.addr & PAGE_MASK);
514 fault = VM_FAULT_BADACCESS;
517 fault = do_exception(regs, access);
520 do_fault_error(regs, access, fault);
522 NOKPROBE_SYMBOL(do_protection_exception);
524 void do_dat_exception(struct pt_regs *regs)
529 access = VM_ACCESS_FLAGS;
530 fault = do_exception(regs, access);
532 do_fault_error(regs, access, fault);
534 NOKPROBE_SYMBOL(do_dat_exception);
538 * 'pfault' pseudo page faults routines.
540 static int pfault_disable;
542 static int __init nopfault(char *str)
548 __setup("nopfault", nopfault);
550 struct pfault_refbk {
559 } __attribute__ ((packed, aligned(8)));
561 static struct pfault_refbk pfault_init_refbk = {
566 .refgaddr = __LC_LPP,
567 .refselmk = 1ULL << 48,
568 .refcmpmk = 1ULL << 48,
569 .reserved = __PF_RES_FIELD
572 int pfault_init(void)
578 diag_stat_inc(DIAG_STAT_X258);
580 " diag %1,%0,0x258\n"
586 : "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
590 static struct pfault_refbk pfault_fini_refbk = {
597 void pfault_fini(void)
602 diag_stat_inc(DIAG_STAT_X258);
607 : : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
610 static DEFINE_SPINLOCK(pfault_lock);
611 static LIST_HEAD(pfault_list);
613 #define PF_COMPLETE 0x0080
616 * The mechanism of our pfault code: if Linux is running as guest, runs a user
617 * space process and the user space process accesses a page that the host has
618 * paged out we get a pfault interrupt.
620 * This allows us, within the guest, to schedule a different process. Without
621 * this mechanism the host would have to suspend the whole virtual cpu until
622 * the page has been paged in.
624 * So when we get such an interrupt then we set the state of the current task
625 * to uninterruptible and also set the need_resched flag. Both happens within
626 * interrupt context(!). If we later on want to return to user space we
627 * recognize the need_resched flag and then call schedule(). It's not very
628 * obvious how this works...
630 * Of course we have a lot of additional fun with the completion interrupt (->
631 * host signals that a page of a process has been paged in and the process can
632 * continue to run). This interrupt can arrive on any cpu and, since we have
633 * virtual cpus, actually appear before the interrupt that signals that a page
636 static void pfault_interrupt(struct ext_code ext_code,
637 unsigned int param32, unsigned long param64)
639 struct task_struct *tsk;
644 * Get the external interruption subcode & pfault initial/completion
645 * signal bit. VM stores this in the 'cpu address' field associated
646 * with the external interrupt.
648 subcode = ext_code.subcode;
649 if ((subcode & 0xff00) != __SUBCODE_MASK)
651 inc_irq_stat(IRQEXT_PFL);
652 /* Get the token (= pid of the affected task). */
653 pid = param64 & LPP_PID_MASK;
655 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
657 get_task_struct(tsk);
661 spin_lock(&pfault_lock);
662 if (subcode & PF_COMPLETE) {
663 /* signal bit is set -> a page has been swapped in by VM */
664 if (tsk->thread.pfault_wait == 1) {
665 /* Initial interrupt was faster than the completion
666 * interrupt. pfault_wait is valid. Set pfault_wait
667 * back to zero and wake up the process. This can
668 * safely be done because the task is still sleeping
669 * and can't produce new pfaults. */
670 tsk->thread.pfault_wait = 0;
671 list_del(&tsk->thread.list);
672 wake_up_process(tsk);
673 put_task_struct(tsk);
675 /* Completion interrupt was faster than initial
676 * interrupt. Set pfault_wait to -1 so the initial
677 * interrupt doesn't put the task to sleep.
678 * If the task is not running, ignore the completion
679 * interrupt since it must be a leftover of a PFAULT
680 * CANCEL operation which didn't remove all pending
681 * completion interrupts. */
682 if (task_is_running(tsk))
683 tsk->thread.pfault_wait = -1;
686 /* signal bit not set -> a real page is missing. */
687 if (WARN_ON_ONCE(tsk != current))
689 if (tsk->thread.pfault_wait == 1) {
690 /* Already on the list with a reference: put to sleep */
692 } else if (tsk->thread.pfault_wait == -1) {
693 /* Completion interrupt was faster than the initial
694 * interrupt (pfault_wait == -1). Set pfault_wait
695 * back to zero and exit. */
696 tsk->thread.pfault_wait = 0;
698 /* Initial interrupt arrived before completion
699 * interrupt. Let the task sleep.
700 * An extra task reference is needed since a different
701 * cpu may set the task state to TASK_RUNNING again
702 * before the scheduler is reached. */
703 get_task_struct(tsk);
704 tsk->thread.pfault_wait = 1;
705 list_add(&tsk->thread.list, &pfault_list);
707 /* Since this must be a userspace fault, there
708 * is no kernel task state to trample. Rely on the
709 * return to userspace schedule() to block. */
710 __set_current_state(TASK_UNINTERRUPTIBLE);
711 set_tsk_need_resched(tsk);
712 set_preempt_need_resched();
716 spin_unlock(&pfault_lock);
717 put_task_struct(tsk);
720 static int pfault_cpu_dead(unsigned int cpu)
722 struct thread_struct *thread, *next;
723 struct task_struct *tsk;
725 spin_lock_irq(&pfault_lock);
726 list_for_each_entry_safe(thread, next, &pfault_list, list) {
727 thread->pfault_wait = 0;
728 list_del(&thread->list);
729 tsk = container_of(thread, struct task_struct, thread);
730 wake_up_process(tsk);
731 put_task_struct(tsk);
733 spin_unlock_irq(&pfault_lock);
737 static int __init pfault_irq_init(void)
741 rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
744 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
747 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
748 cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
749 NULL, pfault_cpu_dead);
753 unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
758 early_initcall(pfault_irq_init);
760 #endif /* CONFIG_PFAULT */
762 #if IS_ENABLED(CONFIG_PGSTE)
764 void do_secure_storage_access(struct pt_regs *regs)
766 unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
767 struct vm_area_struct *vma;
768 struct mm_struct *mm;
773 * bit 61 tells us if the address is valid, if it's not we
774 * have a major problem and should stop the kernel or send a
775 * SIGSEGV to the process. Unfortunately bit 61 is not
776 * reliable without the misc UV feature so we need to check
779 if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications) &&
780 !test_bit_inv(61, ®s->int_parm_long)) {
782 * When this happens, userspace did something that it
783 * was not supposed to do, e.g. branching into secure
784 * memory. Trigger a segmentation fault.
786 if (user_mode(regs)) {
787 send_sig(SIGSEGV, current, 0);
792 * The kernel should never run into this case and we
793 * have no way out of this situation.
795 panic("Unexpected PGM 0x3d with TEID bit 61=0");
798 switch (get_fault_type(regs)) {
802 vma = find_vma(mm, addr);
804 mmap_read_unlock(mm);
805 do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
808 page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
809 if (IS_ERR_OR_NULL(page)) {
810 mmap_read_unlock(mm);
813 if (arch_make_page_accessible(page))
814 send_sig(SIGSEGV, current, 0);
816 mmap_read_unlock(mm);
819 page = phys_to_page(addr);
820 if (unlikely(!try_get_page(page)))
822 rc = arch_make_page_accessible(page);
829 do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
833 NOKPROBE_SYMBOL(do_secure_storage_access);
835 void do_non_secure_storage_access(struct pt_regs *regs)
837 unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
838 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
840 if (get_fault_type(regs) != GMAP_FAULT) {
841 do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
846 if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
847 send_sig(SIGSEGV, current, 0);
849 NOKPROBE_SYMBOL(do_non_secure_storage_access);
851 void do_secure_storage_violation(struct pt_regs *regs)
854 * Either KVM messed up the secure guest mapping or the same
855 * page is mapped into multiple secure guests.
857 * This exception is only triggered when a guest 2 is running
858 * and can therefore never occur in kernel context.
860 printk_ratelimited(KERN_WARNING
861 "Secure storage violation in task: %s, pid %d\n",
862 current->comm, current->pid);
863 send_sig(SIGSEGV, current, 0);
866 #endif /* CONFIG_PGSTE */