1 /* SPDX-License-Identifier: GPL-2.0 */
3 * S390 low-level entry points.
5 * Copyright IBM Corp. 1999, 2012
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Hartmut Penner (hp@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
12 #include <linux/init.h>
13 #include <linux/linkage.h>
14 #include <asm/processor.h>
15 #include <asm/cache.h>
16 #include <asm/ctl_reg.h>
17 #include <asm/errno.h>
18 #include <asm/ptrace.h>
19 #include <asm/thread_info.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/unistd.h>
25 #include <asm/vx-insn.h>
26 #include <asm/setup.h>
28 #include <asm/export.h>
31 __PT_R1 = __PT_GPRS + 8
32 __PT_R2 = __PT_GPRS + 16
33 __PT_R3 = __PT_GPRS + 24
34 __PT_R4 = __PT_GPRS + 32
35 __PT_R5 = __PT_GPRS + 40
36 __PT_R6 = __PT_GPRS + 48
37 __PT_R7 = __PT_GPRS + 56
38 __PT_R8 = __PT_GPRS + 64
39 __PT_R9 = __PT_GPRS + 72
40 __PT_R10 = __PT_GPRS + 80
41 __PT_R11 = __PT_GPRS + 88
42 __PT_R12 = __PT_GPRS + 96
43 __PT_R13 = __PT_GPRS + 104
44 __PT_R14 = __PT_GPRS + 112
45 __PT_R15 = __PT_GPRS + 120
47 STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
48 STACK_SIZE = 1 << STACK_SHIFT
49 STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
51 _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
52 _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING)
53 _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
54 _TIF_SYSCALL_TRACEPOINT)
55 _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \
56 _CIF_ASCE_SECONDARY | _CIF_FPU)
57 _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
59 #define BASED(name) name-cleanup_critical(%r13)
62 #ifdef CONFIG_TRACE_IRQFLAGS
64 brasl %r14,trace_hardirqs_on_caller
69 #ifdef CONFIG_TRACE_IRQFLAGS
71 brasl %r14,trace_hardirqs_off_caller
75 .macro LOCKDEP_SYS_EXIT
77 tm __PT_PSW+1(%r11),0x01 # returning to user ?
79 brasl %r14,lockdep_sys_exit
83 .macro CHECK_STACK stacksize,savearea
84 #ifdef CONFIG_CHECK_STACK
85 tml %r15,\stacksize - CONFIG_STACK_GUARD
91 .macro SWITCH_ASYNC savearea,timer
92 tmhh %r8,0x0001 # interrupting from user ?
95 slg %r14,BASED(.Lcritical_start)
96 clg %r14,BASED(.Lcritical_length)
98 lghi %r11,\savearea # inside critical section, do cleanup
99 brasl %r14,cleanup_critical
100 tmhh %r8,0x0001 # retest problem state after cleanup
102 0: lg %r14,__LC_ASYNC_STACK # are we already on the async stack?
104 srag %r14,%r14,STACK_SHIFT
106 CHECK_STACK 1<<STACK_SHIFT,\savearea
107 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
109 1: UPDATE_VTIME %r14,%r15,\timer
110 2: lg %r15,__LC_ASYNC_STACK # load async stack
111 3: la %r11,STACK_FRAME_OVERHEAD(%r15)
114 .macro UPDATE_VTIME w1,w2,enter_timer
115 lg \w1,__LC_EXIT_TIMER
116 lg \w2,__LC_LAST_UPDATE_TIMER
118 slg \w2,__LC_EXIT_TIMER
119 alg \w1,__LC_USER_TIMER
120 alg \w2,__LC_SYSTEM_TIMER
121 stg \w1,__LC_USER_TIMER
122 stg \w2,__LC_SYSTEM_TIMER
123 mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
127 stg %r8,__LC_RETURN_PSW
128 ni __LC_RETURN_PSW,0xbf
133 #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
134 .insn s,0xb27c0000,\savearea # store clock fast
136 .insn s,0xb2050000,\savearea # store clock
141 * The TSTMSK macro generates a test-under-mask instruction by
142 * calculating the memory offset for the specified mask value.
143 * Mask value can be any constant. The macro shifts the mask
144 * value to calculate the memory offset for the test-under-mask
147 .macro TSTMSK addr, mask, size=8, bytepos=0
148 .if (\bytepos < \size) && (\mask >> 8)
150 .error "Mask exceeds byte boundary"
152 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
156 .error "Mask must not be zero"
158 off = \size - \bytepos - 1
162 .section .kprobes.text, "ax"
165 * This nop exists only in order to avoid that __switch_to starts at
166 * the beginning of the kprobes text section. In that case we would
167 * have several symbols at the same address. E.g. objdump would take
168 * an arbitrary symbol name when disassembling this code.
169 * With the added nop in between the __switch_to symbol is unique
175 * Scheduler resume function, called by switch_to
176 * gpr2 = (task_struct *) prev
177 * gpr3 = (task_struct *) next
182 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
184 aghi %r1,__TASK_thread # thread_struct of prev task
185 lg %r5,__TASK_stack(%r3) # start of kernel stack of next
186 stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev
188 aghi %r1,__TASK_thread # thread_struct of next task
190 aghi %r15,STACK_INIT # end of kernel stack of next
191 stg %r3,__LC_CURRENT # store task struct of next
192 stg %r15,__LC_KERNEL_STACK # store end of kernel stack
193 lg %r15,__THREAD_ksp(%r1) # load kernel stack of next
194 mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
195 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
196 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
198 .insn s,0xb2800000,__LC_LPP # set program parameter
203 #if IS_ENABLED(CONFIG_KVM)
205 * sie64a calling convention:
206 * %r2 pointer to sie control block
207 * %r3 guest register save area
210 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
211 stg %r2,__SF_EMPTY(%r15) # save control block pointer
212 stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
213 xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
214 TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
215 jno .Lsie_load_guest_gprs
216 brasl %r14,load_fpu_regs # load guest fp/vx regs
217 .Lsie_load_guest_gprs:
218 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
219 lg %r14,__LC_GMAP # get gmap pointer
222 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
224 lg %r14,__SF_EMPTY(%r15) # get control block pointer
225 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
226 tm __SIE_PROG20+3(%r14),3 # last exit...
228 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
229 jo .Lsie_skip # exit if fp/vx regs changed
233 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
234 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
236 # some program checks are suppressing. C code (e.g. do_protection_exception)
237 # will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
238 # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
239 # Other instructions between sie64a and .Lsie_done should not cause program
240 # interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
241 # See also .Lcleanup_sie
250 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
251 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
252 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
253 lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
257 stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
260 EX_TABLE(.Lrewind_pad6,.Lsie_fault)
261 EX_TABLE(.Lrewind_pad4,.Lsie_fault)
262 EX_TABLE(.Lrewind_pad2,.Lsie_fault)
263 EX_TABLE(sie_exit,.Lsie_fault)
264 EXPORT_SYMBOL(sie64a)
265 EXPORT_SYMBOL(sie_exit)
269 * SVC interrupt handler routine. System calls are synchronous events and
270 * are executed with interrupts enabled.
274 stpt __LC_SYNC_ENTER_TIMER
276 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
278 lghi %r13,__TASK_thread
279 lghi %r14,_PIF_SYSCALL
281 lg %r15,__LC_KERNEL_STACK
282 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
284 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
285 stmg %r0,%r7,__PT_R0(%r11)
286 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
287 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
288 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
289 stg %r14,__PT_FLAGS(%r11)
291 # load address of system call table
292 lg %r10,__THREAD_sysc_table(%r13,%r12)
293 llgh %r8,__PT_INT_CODE+2(%r11)
294 slag %r8,%r8,2 # shift and test for svc 0
296 # svc 0: system call number in %r1
297 llgfr %r1,%r1 # clear high word in r1
300 sth %r1,__PT_INT_CODE+2(%r11)
303 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
304 stg %r2,__PT_ORIG_GPR2(%r11)
305 stg %r7,STACK_FRAME_OVERHEAD(%r15)
306 lgf %r9,0(%r8,%r10) # get system call add.
307 TSTMSK __TI_flags(%r12),_TIF_TRACE
309 basr %r14,%r9 # call sys_xxxx
310 stg %r2,__PT_R2(%r11) # store return value
315 TSTMSK __PT_FLAGS(%r11),_PIF_WORK
317 TSTMSK __TI_flags(%r12),_TIF_WORK
318 jnz .Lsysc_work # check for work
319 TSTMSK __LC_CPU_FLAGS,_CIF_WORK
322 lg %r14,__LC_VDSO_PER_CPU
323 lmg %r0,%r10,__PT_R0(%r11)
324 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
327 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
328 lmg %r11,%r15,__PT_R11(%r11)
329 lpswe __LC_RETURN_PSW
333 # One of the work bits is on. Find out which one.
336 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
337 jo .Lsysc_mcck_pending
338 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
340 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
341 jo .Lsysc_syscall_restart
342 #ifdef CONFIG_UPROBES
343 TSTMSK __TI_flags(%r12),_TIF_UPROBE
344 jo .Lsysc_uprobe_notify
346 TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE
347 jo .Lsysc_guarded_storage
348 TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP
350 #ifdef CONFIG_LIVEPATCH
351 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING
352 jo .Lsysc_patch_pending # handle live patching just before
353 # signals and possible syscall restart
355 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
356 jo .Lsysc_syscall_restart
357 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
359 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
360 jo .Lsysc_notify_resume
361 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
363 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
365 j .Lsysc_return # beware of critical section cleanup
368 # _TIF_NEED_RESCHED is set, call schedule
371 larl %r14,.Lsysc_return
375 # _CIF_MCCK_PENDING is set, call handler
378 larl %r14,.Lsysc_return
379 jg s390_handle_mcck # TIF bit will be cleared by handler
382 # _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce
385 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
386 lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce
387 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
389 #ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
390 tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ?
391 jnz .Lsysc_set_fs_fixup
392 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
393 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
397 larl %r14,.Lsysc_return
401 # CIF_FPU is set, restore floating-point controls and floating-point registers.
404 larl %r14,.Lsysc_return
408 # _TIF_SIGPENDING is set, call do_signal
411 lgr %r2,%r11 # pass pointer to pt_regs
413 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
416 lghi %r13,__TASK_thread
417 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
418 lghi %r1,0 # svc 0 returns -ENOSYS
422 # _TIF_NOTIFY_RESUME is set, call do_notify_resume
424 .Lsysc_notify_resume:
425 lgr %r2,%r11 # pass pointer to pt_regs
426 larl %r14,.Lsysc_return
430 # _TIF_UPROBE is set, call uprobe_notify_resume
432 #ifdef CONFIG_UPROBES
433 .Lsysc_uprobe_notify:
434 lgr %r2,%r11 # pass pointer to pt_regs
435 larl %r14,.Lsysc_return
436 jg uprobe_notify_resume
440 # _TIF_GUARDED_STORAGE is set, call guarded_storage_load
442 .Lsysc_guarded_storage:
443 lgr %r2,%r11 # pass pointer to pt_regs
444 larl %r14,.Lsysc_return
447 # _TIF_PATCH_PENDING is set, call klp_update_patch_state
449 #ifdef CONFIG_LIVEPATCH
450 .Lsysc_patch_pending:
451 lg %r2,__LC_CURRENT # pass pointer to task struct
452 larl %r14,.Lsysc_return
453 jg klp_update_patch_state
457 # _PIF_PER_TRAP is set, call do_per_trap
460 ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
461 lgr %r2,%r11 # pass pointer to pt_regs
462 larl %r14,.Lsysc_return
466 # _PIF_SYSCALL_RESTART is set, repeat the current system call
468 .Lsysc_syscall_restart:
469 ni __PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART
470 lmg %r1,%r7,__PT_R1(%r11) # load svc arguments
471 lg %r2,__PT_ORIG_GPR2(%r11)
475 # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
476 # and after the system call
479 lgr %r2,%r11 # pass pointer to pt_regs
481 llgh %r0,__PT_INT_CODE+2(%r11)
482 stg %r0,__PT_R2(%r11)
483 brasl %r14,do_syscall_trace_enter
490 lmg %r3,%r7,__PT_R3(%r11)
491 stg %r7,STACK_FRAME_OVERHEAD(%r15)
492 lg %r2,__PT_ORIG_GPR2(%r11)
493 basr %r14,%r9 # call sys_xxx
494 stg %r2,__PT_R2(%r11) # store return value
496 TSTMSK __TI_flags(%r12),_TIF_TRACE
498 lgr %r2,%r11 # pass pointer to pt_regs
499 larl %r14,.Lsysc_return
500 jg do_syscall_trace_exit
503 # a new process exits the kernel with ret_from_fork
506 la %r11,STACK_FRAME_OVERHEAD(%r15)
508 brasl %r14,schedule_tail
510 ssm __LC_SVC_NEW_PSW # reenable interrupts
511 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
513 # it's a kernel thread
514 lmg %r9,%r10,__PT_R9(%r11) # load gprs
515 ENTRY(kernel_thread_starter)
521 * Program check handler routine
524 ENTRY(pgm_check_handler)
525 stpt __LC_SYNC_ENTER_TIMER
526 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
527 lg %r10,__LC_LAST_BREAK
530 larl %r13,cleanup_critical
531 lmg %r8,%r9,__LC_PGM_OLD_PSW
532 tmhh %r8,0x0001 # test problem state bit
533 jnz 2f # -> fault in user space
534 #if IS_ENABLED(CONFIG_KVM)
535 # cleanup critical section for program checks in sie64a
537 slg %r14,BASED(.Lsie_critical_start)
538 clg %r14,BASED(.Lsie_critical_length)
540 lg %r14,__SF_EMPTY(%r15) # get control block pointer
541 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
542 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
543 larl %r9,sie_exit # skip forward to sie_exit
544 lghi %r11,_PIF_GUEST_FAULT
546 0: tmhh %r8,0x4000 # PER bit set in old PSW ?
547 jnz 1f # -> enabled, can't be a double fault
548 tm __LC_PGM_ILC+3,0x80 # check for per exception
549 jnz .Lpgm_svcper # -> single stepped svc
550 1: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
551 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
553 2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
554 lg %r15,__LC_KERNEL_STACK
556 aghi %r14,__TASK_thread # pointer to thread_struct
557 lghi %r13,__LC_PGM_TDB
558 tm __LC_PGM_ILC+2,0x02 # check for transaction abort
560 mvc __THREAD_trap_tdb(256,%r14),0(%r13)
561 3: stg %r10,__THREAD_last_break(%r14)
563 la %r11,STACK_FRAME_OVERHEAD(%r15)
564 stmg %r0,%r7,__PT_R0(%r11)
565 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
566 stmg %r8,%r9,__PT_PSW(%r11)
567 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
568 mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
569 stg %r13,__PT_FLAGS(%r11)
570 stg %r10,__PT_ARGS(%r11)
571 tm __LC_PGM_ILC+3,0x80 # check for per exception
573 tmhh %r8,0x0001 # kernel per event ?
575 oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
576 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
577 mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
578 mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
580 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
581 larl %r1,pgm_check_table
582 llgh %r10,__PT_INT_CODE+2(%r11)
586 lgf %r1,0(%r10,%r1) # load address of handler routine
587 lgr %r2,%r11 # pass pointer to pt_regs
588 basr %r14,%r1 # branch to interrupt-handler
591 tm __PT_PSW+1(%r11),0x01 # returning to user ?
593 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
598 # PER event in supervisor state, must be kprobes
602 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
603 lgr %r2,%r11 # pass pointer to pt_regs
604 brasl %r14,do_per_trap
608 # single stepped system call
611 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
612 lghi %r13,__TASK_thread
614 stg %r14,__LC_RETURN_PSW+8
615 lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
616 lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
619 * IO interrupt handler routine
621 ENTRY(io_int_handler)
623 stpt __LC_ASYNC_ENTER_TIMER
624 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
626 larl %r13,cleanup_critical
627 lmg %r8,%r9,__LC_IO_OLD_PSW
628 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
629 stmg %r0,%r7,__PT_R0(%r11)
630 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
631 stmg %r8,%r9,__PT_PSW(%r11)
632 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
633 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
634 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
637 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
639 lgr %r2,%r11 # pass pointer to pt_regs
640 lghi %r3,IO_INTERRUPT
641 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
643 lghi %r3,THIN_INTERRUPT
646 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR
650 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
656 TSTMSK __TI_flags(%r12),_TIF_WORK
657 jnz .Lio_work # there is work to do (signals etc.)
658 TSTMSK __LC_CPU_FLAGS,_CIF_WORK
661 lg %r14,__LC_VDSO_PER_CPU
662 lmg %r0,%r10,__PT_R0(%r11)
663 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
666 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
667 lmg %r11,%r15,__PT_R11(%r11)
668 lpswe __LC_RETURN_PSW
672 # There is work todo, find out in which context we have been interrupted:
673 # 1) if we return to user space we can do all _TIF_WORK work
674 # 2) if we return to kernel code and kvm is enabled check if we need to
675 # modify the psw to leave SIE
676 # 3) if we return to kernel code and preemptive scheduling is enabled check
677 # the preemption counter and if it is zero call preempt_schedule_irq
678 # Before any work can be done, a switch to the kernel stack is required.
681 tm __PT_PSW+1(%r11),0x01 # returning to user ?
682 jo .Lio_work_user # yes -> do resched & signal
683 #ifdef CONFIG_PREEMPT
684 # check for preemptive scheduling
685 icm %r0,15,__LC_PREEMPT_COUNT
686 jnz .Lio_restore # preemption is disabled
687 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
689 # switch to kernel stack
690 lg %r1,__PT_R15(%r11)
691 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
692 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
693 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
694 la %r11,STACK_FRAME_OVERHEAD(%r1)
696 # TRACE_IRQS_ON already done at .Lio_return, call
697 # TRACE_IRQS_OFF to keep things symmetrical
699 brasl %r14,preempt_schedule_irq
706 # Need to do work before returning to userspace, switch to kernel stack
709 lg %r1,__LC_KERNEL_STACK
710 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
711 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
712 la %r11,STACK_FRAME_OVERHEAD(%r1)
716 # One of the work bits is on. Find out which one.
719 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
721 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
723 #ifdef CONFIG_LIVEPATCH
724 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING
725 jo .Lio_patch_pending
727 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
729 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
730 jo .Lio_notify_resume
731 TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE
732 jo .Lio_guarded_storage
733 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
735 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
737 j .Lio_return # beware of critical section cleanup
740 # _CIF_MCCK_PENDING is set, call handler
743 # TRACE_IRQS_ON already done at .Lio_return
744 brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
749 # _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
752 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
753 lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce
754 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
756 #ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
757 tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ?
758 jnz .Lio_set_fs_fixup
759 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
760 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
764 larl %r14,.Lio_return
768 # CIF_FPU is set, restore floating-point controls and floating-point registers.
771 larl %r14,.Lio_return
775 # _TIF_GUARDED_STORAGE is set, call guarded_storage_load
777 .Lio_guarded_storage:
778 # TRACE_IRQS_ON already done at .Lio_return
779 ssm __LC_SVC_NEW_PSW # reenable interrupts
780 lgr %r2,%r11 # pass pointer to pt_regs
781 brasl %r14,gs_load_bc_cb
782 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
787 # _TIF_NEED_RESCHED is set, call schedule
790 # TRACE_IRQS_ON already done at .Lio_return
791 ssm __LC_SVC_NEW_PSW # reenable interrupts
792 brasl %r14,schedule # call scheduler
793 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
798 # _TIF_PATCH_PENDING is set, call klp_update_patch_state
800 #ifdef CONFIG_LIVEPATCH
802 lg %r2,__LC_CURRENT # pass pointer to task struct
803 larl %r14,.Lio_return
804 jg klp_update_patch_state
808 # _TIF_SIGPENDING or is set, call do_signal
811 # TRACE_IRQS_ON already done at .Lio_return
812 ssm __LC_SVC_NEW_PSW # reenable interrupts
813 lgr %r2,%r11 # pass pointer to pt_regs
815 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
820 # _TIF_NOTIFY_RESUME or is set, call do_notify_resume
823 # TRACE_IRQS_ON already done at .Lio_return
824 ssm __LC_SVC_NEW_PSW # reenable interrupts
825 lgr %r2,%r11 # pass pointer to pt_regs
826 brasl %r14,do_notify_resume
827 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
832 * External interrupt handler routine
834 ENTRY(ext_int_handler)
836 stpt __LC_ASYNC_ENTER_TIMER
837 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
839 larl %r13,cleanup_critical
840 lmg %r8,%r9,__LC_EXT_OLD_PSW
841 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
842 stmg %r0,%r7,__PT_R0(%r11)
843 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
844 stmg %r8,%r9,__PT_PSW(%r11)
845 lghi %r1,__LC_EXT_PARAMS2
846 mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
847 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
848 mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
849 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
850 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
853 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
854 lgr %r2,%r11 # pass pointer to pt_regs
855 lghi %r3,EXT_INTERRUPT
860 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
863 stg %r3,__SF_EMPTY(%r15)
864 larl %r1,.Lpsw_idle_lpsw+4
865 stg %r1,__SF_EMPTY+8(%r15)
867 larl %r1,smp_cpu_mtid
871 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
874 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
875 STCK __CLOCK_IDLE_ENTER(%r2)
876 stpt __TIMER_IDLE_ENTER(%r2)
878 lpswe __SF_EMPTY(%r15)
883 * Store floating-point controls and floating-point or vector register
884 * depending whether the vector facility is available. A critical section
885 * cleanup assures that the registers are stored even if interrupted for
886 * some other work. The CIF_FPU flag is set to trigger a lazy restore
887 * of the register contents at return from io or a system call.
891 aghi %r2,__TASK_thread
892 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
894 stfpc __THREAD_FPU_fpc(%r2)
895 lg %r3,__THREAD_FPU_regs(%r2)
896 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
897 jz .Lsave_fpu_regs_fp # no -> store FP regs
898 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
899 VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
900 j .Lsave_fpu_regs_done # -> set CIF_FPU flag
918 .Lsave_fpu_regs_done:
919 oi __LC_CPU_FLAGS+7,_CIF_FPU
922 EXPORT_SYMBOL(save_fpu_regs)
925 * Load floating-point controls and floating-point or vector registers.
926 * A critical section cleanup assures that the register contents are
927 * loaded even if interrupted for some other work.
929 * There are special calling conventions to fit into sysc and io return work:
930 * %r15: <kernel stack>
931 * The function requires:
936 aghi %r4,__TASK_thread
937 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
939 lfpc __THREAD_FPU_fpc(%r4)
940 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
941 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
942 jz .Lload_fpu_regs_fp # -> no VX, load FP regs
944 VLM %v16,%v31,256,%r4
945 j .Lload_fpu_regs_done
963 .Lload_fpu_regs_done:
964 ni __LC_CPU_FLAGS+7,255-_CIF_FPU
971 * Machine check handler routines
973 ENTRY(mcck_int_handler)
975 la %r1,4095 # validate r1
976 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
977 sckc __LC_CLOCK_COMPARATOR # validate comparator
978 lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs
979 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
981 larl %r13,cleanup_critical
982 lmg %r8,%r9,__LC_MCK_OLD_PSW
983 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
984 jo .Lmcck_panic # yes -> rest of mcck code invalid
985 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID
986 jno .Lmcck_panic # control registers invalid -> panic
988 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
990 lg %r11,__LC_MCESAD-4095(%r14) # extended machine check save area
991 nill %r11,0xfc00 # MCESA_ORIGIN_MASK
992 TSTMSK __LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE
994 TSTMSK __LC_MCCK_CODE,MCCK_CODE_GS_VALID
996 .insn rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC
997 0: l %r14,__LC_FP_CREG_SAVE_AREA-4095(%r14)
998 TSTMSK __LC_MCCK_CODE,MCCK_CODE_FC_VALID
1002 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1004 lghi %r14,__LC_FPREGS_SAVE_AREA
1022 0: VLM %v0,%v15,0,%r11
1023 VLM %v16,%v31,256,%r11
1024 1: lghi %r14,__LC_CPU_TIMER_SAVE_AREA
1025 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
1026 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
1028 la %r14,__LC_SYNC_ENTER_TIMER
1029 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
1031 la %r14,__LC_ASYNC_ENTER_TIMER
1032 0: clc 0(8,%r14),__LC_EXIT_TIMER
1034 la %r14,__LC_EXIT_TIMER
1035 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
1037 la %r14,__LC_LAST_UPDATE_TIMER
1039 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
1040 3: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
1042 tmhh %r8,0x0001 # interrupting from user ?
1044 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
1046 4: SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
1048 lghi %r14,__LC_GPREGS_SAVE_AREA+64
1049 stmg %r0,%r7,__PT_R0(%r11)
1050 mvc __PT_R8(64,%r11),0(%r14)
1051 stmg %r8,%r9,__PT_PSW(%r11)
1052 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
1053 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
1054 lgr %r2,%r11 # pass pointer to pt_regs
1055 brasl %r14,s390_do_machine_check
1056 tm __PT_PSW+1(%r11),0x01 # returning to user ?
1058 lg %r1,__LC_KERNEL_STACK # switch to kernel stack
1059 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
1060 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
1061 la %r11,STACK_FRAME_OVERHEAD(%r1)
1063 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
1064 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
1067 brasl %r14,s390_handle_mcck
1070 lg %r14,__LC_VDSO_PER_CPU
1071 lmg %r0,%r10,__PT_R0(%r11)
1072 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
1073 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
1075 stpt __LC_EXIT_TIMER
1076 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
1077 0: lmg %r11,%r15,__PT_R11(%r11)
1078 lpswe __LC_RETURN_MCCK_PSW
1081 lg %r15,__LC_PANIC_STACK
1082 la %r11,STACK_FRAME_OVERHEAD(%r15)
1086 # PSW restart interrupt handler
1088 ENTRY(restart_int_handler)
1089 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
1091 .insn s,0xb2800000,__LC_LPP
1092 0: stg %r15,__LC_SAVE_AREA_RESTART
1093 lg %r15,__LC_RESTART_STACK
1094 aghi %r15,-__PT_SIZE # create pt_regs on stack
1095 xc 0(__PT_SIZE,%r15),0(%r15)
1096 stmg %r0,%r14,__PT_R0(%r15)
1097 mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
1098 mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
1099 aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
1100 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
1101 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
1102 lg %r2,__LC_RESTART_DATA
1103 lg %r3,__LC_RESTART_SOURCE
1104 ltgr %r3,%r3 # test source cpu address
1105 jm 1f # negative -> skip source stop
1106 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
1107 brc 10,0b # wait for status stored
1108 1: basr %r14,%r1 # call function
1109 stap __SF_EMPTY(%r15) # store cpu address
1110 llgh %r3,__SF_EMPTY(%r15)
1111 2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
1115 .section .kprobes.text, "ax"
1117 #ifdef CONFIG_CHECK_STACK
1119 * The synchronous or the asynchronous stack overflowed. We are dead.
1120 * No need to properly save the registers, we are going to panic anyway.
1121 * Setup a pt_regs so that show_trace can provide a good call trace.
1124 lg %r15,__LC_PANIC_STACK # change to panic stack
1125 la %r11,STACK_FRAME_OVERHEAD(%r15)
1126 stmg %r0,%r7,__PT_R0(%r11)
1127 stmg %r8,%r9,__PT_PSW(%r11)
1128 mvc __PT_R8(64,%r11),0(%r14)
1129 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
1130 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
1131 lgr %r2,%r11 # pass pointer to pt_regs
1132 jg kernel_stack_overflow
1136 #if IS_ENABLED(CONFIG_KVM)
1137 clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap
1139 clg %r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done
1142 clg %r9,BASED(.Lcleanup_table) # system_call
1144 clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc
1145 jl .Lcleanup_system_call
1146 clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif
1148 clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore
1149 jl .Lcleanup_sysc_tif
1150 clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done
1151 jl .Lcleanup_sysc_restore
1152 clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif
1154 clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore
1156 clg %r9,BASED(.Lcleanup_table+56) # .Lio_done
1157 jl .Lcleanup_io_restore
1158 clg %r9,BASED(.Lcleanup_table+64) # psw_idle
1160 clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end
1162 clg %r9,BASED(.Lcleanup_table+80) # save_fpu_regs
1164 clg %r9,BASED(.Lcleanup_table+88) # .Lsave_fpu_regs_end
1165 jl .Lcleanup_save_fpu_regs
1166 clg %r9,BASED(.Lcleanup_table+96) # load_fpu_regs
1168 clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
1169 jl .Lcleanup_load_fpu_regs
1177 .quad .Lsysc_restore
1183 .quad .Lpsw_idle_end
1185 .quad .Lsave_fpu_regs_end
1187 .quad .Lload_fpu_regs_end
1189 #if IS_ENABLED(CONFIG_KVM)
1190 .Lcleanup_table_sie:
1195 cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt?
1197 slg %r9,BASED(.Lsie_crit_mcck_start)
1198 clg %r9,BASED(.Lsie_crit_mcck_length)
1200 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
1201 1: lg %r9,__SF_EMPTY(%r15) # get control block pointer
1202 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
1203 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
1204 larl %r9,sie_exit # skip forward to sie_exit
1208 .Lcleanup_system_call:
1209 # check if stpt has been executed
1210 clg %r9,BASED(.Lcleanup_system_call_insn)
1212 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
1213 cghi %r11,__LC_SAVE_AREA_ASYNC
1215 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
1216 0: # check if stmg has been executed
1217 clg %r9,BASED(.Lcleanup_system_call_insn+8)
1219 mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
1220 0: # check if base register setup + TIF bit load has been done
1221 clg %r9,BASED(.Lcleanup_system_call_insn+16)
1223 # set up saved register r12 task struct pointer
1225 # set up saved register r13 __TASK_thread offset
1226 mvc 40(8,%r11),BASED(.Lcleanup_system_call_const)
1227 0: # check if the user time update has been done
1228 clg %r9,BASED(.Lcleanup_system_call_insn+24)
1230 lg %r15,__LC_EXIT_TIMER
1231 slg %r15,__LC_SYNC_ENTER_TIMER
1232 alg %r15,__LC_USER_TIMER
1233 stg %r15,__LC_USER_TIMER
1234 0: # check if the system time update has been done
1235 clg %r9,BASED(.Lcleanup_system_call_insn+32)
1237 lg %r15,__LC_LAST_UPDATE_TIMER
1238 slg %r15,__LC_EXIT_TIMER
1239 alg %r15,__LC_SYSTEM_TIMER
1240 stg %r15,__LC_SYSTEM_TIMER
1241 0: # update accounting time stamp
1242 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
1243 # set up saved register r11
1244 lg %r15,__LC_KERNEL_STACK
1245 la %r9,STACK_FRAME_OVERHEAD(%r15)
1246 stg %r9,24(%r11) # r11 pt_regs pointer
1248 mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
1249 stmg %r0,%r7,__PT_R0(%r9)
1250 mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
1251 mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
1252 xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
1253 mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL
1254 # setup saved register r15
1255 stg %r15,56(%r11) # r15 stack pointer
1256 # set new psw address and exit
1257 larl %r9,.Lsysc_do_svc
1259 .Lcleanup_system_call_insn:
1263 .quad .Lsysc_vtime+36
1264 .quad .Lsysc_vtime+42
1265 .Lcleanup_system_call_const:
1272 .Lcleanup_sysc_restore:
1273 # check if stpt has been executed
1274 clg %r9,BASED(.Lcleanup_sysc_restore_insn)
1276 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
1277 cghi %r11,__LC_SAVE_AREA_ASYNC
1279 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
1280 0: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8)
1282 lg %r9,24(%r11) # get saved pointer to pt_regs
1283 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
1284 mvc 0(64,%r11),__PT_R8(%r9)
1285 lmg %r0,%r7,__PT_R0(%r9)
1286 1: lmg %r8,%r9,__LC_RETURN_PSW
1288 .Lcleanup_sysc_restore_insn:
1289 .quad .Lsysc_exit_timer
1290 .quad .Lsysc_done - 4
1296 .Lcleanup_io_restore:
1297 # check if stpt has been executed
1298 clg %r9,BASED(.Lcleanup_io_restore_insn)
1300 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
1301 0: clg %r9,BASED(.Lcleanup_io_restore_insn+8)
1303 lg %r9,24(%r11) # get saved r11 pointer to pt_regs
1304 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
1305 mvc 0(64,%r11),__PT_R8(%r9)
1306 lmg %r0,%r7,__PT_R0(%r9)
1307 1: lmg %r8,%r9,__LC_RETURN_PSW
1309 .Lcleanup_io_restore_insn:
1310 .quad .Lio_exit_timer
1314 ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
1315 # copy interrupt clock & cpu timer
1316 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
1317 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
1318 cghi %r11,__LC_SAVE_AREA_ASYNC
1320 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
1321 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
1322 0: # check if stck & stpt have been executed
1323 clg %r9,BASED(.Lcleanup_idle_insn)
1325 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
1326 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
1327 1: # calculate idle cycles
1329 clg %r9,BASED(.Lcleanup_idle_insn)
1331 larl %r1,smp_cpu_mtid
1335 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
1337 ag %r3,__LC_PERCPU_OFFSET
1338 la %r4,__SF_EMPTY+16(%r15)
1347 3: # account system time going idle
1348 lg %r9,__LC_STEAL_TIMER
1349 alg %r9,__CLOCK_IDLE_ENTER(%r2)
1350 slg %r9,__LC_LAST_UPDATE_CLOCK
1351 stg %r9,__LC_STEAL_TIMER
1352 mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
1353 lg %r9,__LC_SYSTEM_TIMER
1354 alg %r9,__LC_LAST_UPDATE_TIMER
1355 slg %r9,__TIMER_IDLE_ENTER(%r2)
1356 stg %r9,__LC_SYSTEM_TIMER
1357 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
1358 # prepare return psw
1359 nihh %r8,0xfcfd # clear irq & wait state bits
1360 lg %r9,48(%r11) # return from psw_idle
1362 .Lcleanup_idle_insn:
1363 .quad .Lpsw_idle_lpsw
1365 .Lcleanup_save_fpu_regs:
1366 larl %r9,save_fpu_regs
1369 .Lcleanup_load_fpu_regs:
1370 larl %r9,load_fpu_regs
1378 .quad .L__critical_start
1380 .quad .L__critical_end - .L__critical_start
1381 #if IS_ENABLED(CONFIG_KVM)
1382 .Lsie_critical_start:
1384 .Lsie_critical_length:
1385 .quad .Lsie_done - .Lsie_gmap
1386 .Lsie_crit_mcck_start:
1388 .Lsie_crit_mcck_length:
1389 .quad .Lsie_skip - .Lsie_entry
1392 .section .rodata, "a"
1393 #define SYSCALL(esame,emu) .long esame
1394 .globl sys_call_table
1396 #include "syscalls.S"
1399 #ifdef CONFIG_COMPAT
1401 #define SYSCALL(esame,emu) .long emu
1402 .globl sys_call_table_emu
1404 #include "syscalls.S"