1 /* SPDX-License-Identifier: GPL-2.0 */
3 * S390 low-level entry points.
5 * Copyright IBM Corp. 1999, 2012
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Hartmut Penner (hp@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
11 #include <linux/init.h>
12 #include <linux/linkage.h>
13 #include <asm/asm-extable.h>
14 #include <asm/alternative-asm.h>
15 #include <asm/processor.h>
16 #include <asm/cache.h>
17 #include <asm/dwarf.h>
18 #include <asm/errno.h>
19 #include <asm/ptrace.h>
20 #include <asm/thread_info.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/unistd.h>
26 #include <asm/vx-insn.h>
27 #include <asm/setup.h>
29 #include <asm/export.h>
30 #include <asm/nospec-insn.h>
32 STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
33 STACK_SIZE = 1 << STACK_SHIFT
34 STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
36 _LPP_OFFSET = __LC_LPP
39 ALTERNATIVE "nop", ".insn s,0xb2010000,\address", 193
43 ALTERNATIVE "nop", ".insn s,0xb2000000,\address", 193
46 .macro LPSWEY address,lpswe
47 ALTERNATIVE "b \lpswe; nopr", ".insn siy,0xeb0000000071,\address,0", 193
51 ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193
54 .macro CHECK_STACK savearea
55 #ifdef CONFIG_CHECK_STACK
56 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
62 .macro CHECK_VMAP_STACK savearea,oklabel
63 #ifdef CONFIG_VMAP_STACK
65 nill %r14,0x10000 - STACK_SIZE
67 clg %r14,__LC_KERNEL_STACK
69 clg %r14,__LC_ASYNC_STACK
71 clg %r14,__LC_MCCK_STACK
73 clg %r14,__LC_NODAT_STACK
75 clg %r14,__LC_RESTART_STACK
85 * The TSTMSK macro generates a test-under-mask instruction by
86 * calculating the memory offset for the specified mask value.
87 * Mask value can be any constant. The macro shifts the mask
88 * value to calculate the memory offset for the test-under-mask
91 .macro TSTMSK addr, mask, size=8, bytepos=0
92 .if (\bytepos < \size) && (\mask >> 8)
94 .error "Mask exceeds byte boundary"
96 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
100 .error "Mask must not be zero"
102 off = \size - \bytepos - 1
107 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", 82
111 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", 82
114 .macro BPENTER tif_ptr,tif_mask
115 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \
116 "j .+12; nop; nop", 82
119 .macro BPEXIT tif_ptr,tif_mask
120 TSTMSK \tif_ptr,\tif_mask
121 ALTERNATIVE "jz .+8; .insn rrf,0xb2e80000,0,0,12,0", \
122 "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82
126 * The CHKSTG macro jumps to the provided label in case the
127 * machine check interruption code reports one of unrecoverable
129 * - Storage error uncorrected
130 * - Storage key error uncorrected
131 * - Storage degradation with Failing-storage-address validity
133 .macro CHKSTG errlabel
134 TSTMSK __LC_MCCK_CODE,(MCCK_CODE_STG_ERROR|MCCK_CODE_STG_KEY_ERROR)
136 TSTMSK __LC_MCCK_CODE,MCCK_CODE_STG_DEGRAD
138 TSTMSK __LC_MCCK_CODE,MCCK_CODE_STG_FAIL_ADDR
143 #if IS_ENABLED(CONFIG_KVM)
145 * The OUTSIDE macro jumps to the provided label in case the value
146 * in the provided register is outside of the provided range. The
147 * macro is useful for checking whether a PSW stored in a register
148 * pair points inside or outside of a block of instructions.
149 * @reg: register to check
150 * @start: start of the range
151 * @end: end of the range
152 * @outside_label: jump here if @reg is outside of [@start..@end)
154 .macro OUTSIDE reg,start,end,outside_label
158 #ifdef CONFIG_AS_IS_LLVM
159 clgfrl %r14,.Lrange_size\@
161 clgfi %r14,\end - \start
164 #ifdef CONFIG_AS_IS_LLVM
165 .section .rodata, "a"
174 lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer
175 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
176 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
177 larl %r9,sie_exit # skip forward to sie_exit
183 .section .kprobes.text, "ax"
186 * This nop exists only in order to avoid that __bpon starts at
187 * the beginning of the kprobes text section. In that case we would
188 * have several symbols at the same address. E.g. objdump would take
189 * an arbitrary symbol name when disassembling this code.
190 * With the added nop in between the __bpon symbol is unique
202 * Scheduler resume function, called by switch_to
203 * gpr2 = (task_struct *) prev
204 * gpr3 = (task_struct *) next
209 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
210 lghi %r4,__TASK_stack
211 lghi %r1,__TASK_thread
213 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev
214 lg %r15,0(%r4,%r3) # start of kernel stack of next
215 agr %r15,%r5 # end of kernel stack of next
216 stg %r3,__LC_CURRENT # store task struct of next
217 stg %r15,__LC_KERNEL_STACK # store end of kernel stack
218 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next
220 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next
221 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
222 ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
226 #if IS_ENABLED(CONFIG_KVM)
228 * sie64a calling convention:
229 * %r2 pointer to sie control block
230 * %r3 guest register save area
233 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
235 stg %r2,__SF_SIE_CONTROL(%r15) # save control block pointer
236 stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area
237 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
238 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
239 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
240 lg %r14,__LC_GMAP # get gmap pointer
243 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
245 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
246 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
247 tm __SIE_PROG20+3(%r14),3 # last exit...
249 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
250 jo .Lsie_skip # exit if fp/vx regs changed
251 BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
254 # Let the next instruction be NOP to avoid triggering a machine check
255 # and handling it in a guest as result of the instruction execution.
259 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
261 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
262 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
264 # some program checks are suppressing. C code (e.g. do_protection_exception)
265 # will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
266 # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
267 # Other instructions between sie64a and .Lsie_done should not cause program
268 # interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
277 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area
278 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
279 xgr %r0,%r0 # clear guest registers to
280 xgr %r1,%r1 # prevent speculative use
284 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
285 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code
289 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code
292 EX_TABLE(.Lrewind_pad6,.Lsie_fault)
293 EX_TABLE(.Lrewind_pad4,.Lsie_fault)
294 EX_TABLE(.Lrewind_pad2,.Lsie_fault)
295 EX_TABLE(sie_exit,.Lsie_fault)
297 EXPORT_SYMBOL(sie64a)
298 EXPORT_SYMBOL(sie_exit)
302 * SVC interrupt handler routine. System calls are synchronous events and
303 * are entered with interrupts disabled.
307 stpt __LC_SYS_ENTER_TIMER
308 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
312 STBEAR __LC_LAST_BREAK
313 lctlg %c1,%c1,__LC_KERNEL_ASCE
315 lg %r15,__LC_KERNEL_STACK
316 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
317 stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
318 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
319 # clear user controlled register to prevent speculative use
330 la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
331 mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC
334 brasl %r14,__do_syscall
335 lctlg %c1,%c1,__LC_USER_ASCE
336 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
337 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
338 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
339 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
341 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
345 # a new process exits the kernel with ret_from_fork
349 brasl %r14,__ret_from_fork
350 lctlg %c1,%c1,__LC_USER_ASCE
351 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
352 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
353 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
354 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
356 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
357 ENDPROC(ret_from_fork)
360 * Program check handler routine
363 ENTRY(pgm_check_handler)
364 stpt __LC_SYS_ENTER_TIMER
366 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
369 lmg %r8,%r9,__LC_PGM_OLD_PSW
370 tmhh %r8,0x0001 # coming from user space?
372 lctlg %c1,%c1,__LC_KERNEL_ASCE
373 j 3f # -> fault in user space
375 #if IS_ENABLED(CONFIG_KVM)
376 # cleanup critical section for program checks in sie64a
377 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f
379 lghi %r10,_PIF_GUEST_FAULT
381 1: tmhh %r8,0x4000 # PER bit set in old PSW ?
382 jnz 2f # -> enabled, can't be a double fault
383 tm __LC_PGM_ILC+3,0x80 # check for per exception
384 jnz .Lpgm_svcper # -> single stepped svc
385 2: CHECK_STACK __LC_SAVE_AREA_SYNC
386 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
387 # CHECK_VMAP_STACK branches to stack_overflow or 4f
388 CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
389 3: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
390 lg %r15,__LC_KERNEL_STACK
391 4: la %r11,STACK_FRAME_OVERHEAD(%r15)
392 stg %r10,__PT_FLAGS(%r11)
393 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
394 stmg %r0,%r7,__PT_R0(%r11)
395 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
396 mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK
397 stmg %r8,%r9,__PT_PSW(%r11)
399 # clear user controlled registers to prevent speculative use
408 brasl %r14,__do_pgm_check
409 tmhh %r8,0x0001 # returning to user space?
410 jno .Lpgm_exit_kernel
411 lctlg %c1,%c1,__LC_USER_ASCE
412 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
415 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
416 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
417 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
418 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
421 # single stepped system call
424 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
426 stg %r14,__LC_RETURN_PSW+8
428 LBEAR __LC_PGM_LAST_BREAK
429 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per
430 ENDPROC(pgm_check_handler)
433 * Interrupt handler macro used for external and IO interrupts.
435 .macro INT_HANDLER name,lc_old_psw,handler
438 stpt __LC_SYS_ENTER_TIMER
439 STBEAR __LC_LAST_BREAK
441 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
443 lmg %r8,%r9,\lc_old_psw
444 tmhh %r8,0x0001 # interrupting from user ?
446 #if IS_ENABLED(CONFIG_KVM)
447 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,0f
448 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
451 0: CHECK_STACK __LC_SAVE_AREA_ASYNC
452 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
454 1: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
455 lctlg %c1,%c1,__LC_KERNEL_ASCE
456 lg %r15,__LC_KERNEL_STACK
457 2: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
458 la %r11,STACK_FRAME_OVERHEAD(%r15)
459 stmg %r0,%r7,__PT_R0(%r11)
460 # clear user controlled registers to prevent speculative use
469 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
470 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
472 stmg %r8,%r9,__PT_PSW(%r11)
473 lgr %r2,%r11 # pass pointer to pt_regs
475 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
476 tmhh %r8,0x0001 # returning to user ?
478 lctlg %c1,%c1,__LC_USER_ASCE
479 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
481 2: LBEAR __PT_LAST_BREAK(%r11)
482 lmg %r0,%r15,__PT_R0(%r11)
483 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
487 INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
488 INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
494 stg %r14,(__SF_GPRS+8*8)(%r15)
495 stg %r3,__SF_EMPTY(%r15)
496 larl %r1,psw_idle_exit
497 stg %r1,__SF_EMPTY+8(%r15)
498 larl %r1,smp_cpu_mtid
502 .insn rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2)
504 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
506 stckf __CLOCK_IDLE_ENTER(%r2)
507 stpt __TIMER_IDLE_ENTER(%r2)
508 lpswe __SF_EMPTY(%r15)
515 * Machine check handler routines
517 ENTRY(mcck_int_handler)
518 stckf __LC_MCCK_CLOCK
520 la %r1,4095 # validate r1
521 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
522 LBEAR __LC_LAST_BREAK_SAVE_AREA-4095(%r1) # validate bear
523 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
525 lmg %r8,%r9,__LC_MCK_OLD_PSW
526 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
527 jo .Lmcck_panic # yes -> rest of mcck code invalid
528 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID
529 jno .Lmcck_panic # control registers invalid -> panic
531 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
533 lghi %r14,__LC_CPU_TIMER_SAVE_AREA
534 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
535 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
537 la %r14,__LC_SYS_ENTER_TIMER
538 clc 0(8,%r14),__LC_EXIT_TIMER
540 la %r14,__LC_EXIT_TIMER
541 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
543 la %r14,__LC_LAST_UPDATE_TIMER
545 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
546 3: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
548 tmhh %r8,0x0001 # interrupting from user ?
550 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
552 #if IS_ENABLED(CONFIG_KVM)
553 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,6f
554 OUTSIDE %r9,.Lsie_entry,.Lsie_leave,4f
555 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
557 4: CHKSTG .Lmcck_panic
558 5: larl %r14,.Lstosm_tmp
559 stosm 0(%r14),0x04 # turn dat on, keep irqs off
560 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
564 6: CHKSTG .Lmcck_panic
565 larl %r14,.Lstosm_tmp
566 stosm 0(%r14),0x04 # turn dat on, keep irqs off
567 tmhh %r8,0x0001 # interrupting from user ?
569 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
571 lg %r15,__LC_MCCK_STACK
572 la %r11,STACK_FRAME_OVERHEAD(%r15)
573 stctg %c1,%c1,__PT_CR1(%r11)
574 lctlg %c1,%c1,__LC_KERNEL_ASCE
575 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
576 lghi %r14,__LC_GPREGS_SAVE_AREA+64
577 stmg %r0,%r7,__PT_R0(%r11)
578 # clear user controlled registers to prevent speculative use
587 mvc __PT_R8(64,%r11),0(%r14)
588 stmg %r8,%r9,__PT_PSW(%r11)
589 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
590 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
591 lgr %r2,%r11 # pass pointer to pt_regs
592 brasl %r14,s390_do_machine_check
595 lg %r1,__LC_KERNEL_STACK # switch to kernel stack
596 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
597 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
598 la %r11,STACK_FRAME_OVERHEAD(%r1)
601 brasl %r14,s390_handle_mcck
603 lctlg %c1,%c1,__PT_CR1(%r11)
604 lmg %r0,%r10,__PT_R0(%r11)
605 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
606 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
608 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
610 0: ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193
612 lmg %r11,%r15,__PT_R11(%r11)
613 LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE
617 * Iterate over all possible CPU addresses in the range 0..0xffff
618 * and stop each CPU using signal processor. Use compare and swap
619 * to allow just one CPU-stopper and prevent concurrent CPUs from
620 * stopping each other while leaving the others running.
625 cs %r5,%r6,0(%r7) # single CPU-stopper only
628 stap 0(%r7) # this CPU address
632 sll %r0,16 # CPU counter
633 lhi %r3,0 # next CPU address
636 1: sigp %r1,%r3,SIGP_STOP # stop next CPU
640 3: sigp %r1,%r4,SIGP_STOP # stop this CPU
643 ENDPROC(mcck_int_handler)
645 ENTRY(restart_int_handler)
646 ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
647 stg %r15,__LC_SAVE_AREA_RESTART
648 TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
651 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15)
652 0: larl %r15,.Lstosm_tmp
653 stosm 0(%r15),0x04 # turn dat on, keep irqs off
654 lg %r15,__LC_RESTART_STACK
655 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
656 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
657 mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
658 mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
659 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
660 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
661 lg %r2,__LC_RESTART_DATA
662 lgf %r3,__LC_RESTART_SOURCE
663 ltgr %r3,%r3 # test source cpu address
664 jm 1f # negative -> skip source stop
665 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
666 brc 10,0b # wait for status stored
667 1: basr %r14,%r1 # call function
668 stap __SF_EMPTY(%r15) # store cpu address
669 llgh %r3,__SF_EMPTY(%r15)
670 2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
673 ENDPROC(restart_int_handler)
675 .section .kprobes.text, "ax"
677 #if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
679 * The synchronous or the asynchronous stack overflowed. We are dead.
680 * No need to properly save the registers, we are going to panic anyway.
681 * Setup a pt_regs so that show_trace can provide a good call trace.
683 ENTRY(stack_overflow)
684 lg %r15,__LC_NODAT_STACK # change to panic stack
685 la %r11,STACK_FRAME_OVERHEAD(%r15)
686 stmg %r0,%r7,__PT_R0(%r11)
687 stmg %r8,%r9,__PT_PSW(%r11)
688 mvc __PT_R8(64,%r11),0(%r14)
689 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
690 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
691 lgr %r2,%r11 # pass pointer to pt_regs
692 jg kernel_stack_overflow
693 ENDPROC(stack_overflow)
701 .section .rodata, "a"
702 #define SYSCALL(esame,emu) .quad __s390x_ ## esame
703 .globl sys_call_table
705 #include "asm/syscall_table.h"
710 #define SYSCALL(esame,emu) .quad __s390_ ## emu
711 .globl sys_call_table_emu
713 #include "asm/syscall_table.h"