1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/signal.c
5 * Copyright (C) 1995-2009 Russell King
6 * Copyright (C) 2012 ARM Ltd.
9 #include <linux/cache.h>
10 #include <linux/compat.h>
11 #include <linux/errno.h>
12 #include <linux/kernel.h>
13 #include <linux/signal.h>
14 #include <linux/freezer.h>
15 #include <linux/stddef.h>
16 #include <linux/uaccess.h>
17 #include <linux/sizes.h>
18 #include <linux/string.h>
19 #include <linux/resume_user_mode.h>
20 #include <linux/ratelimit.h>
21 #include <linux/syscalls.h>
23 #include <asm/daifflags.h>
24 #include <asm/debug-monitors.h>
26 #include <asm/cacheflush.h>
27 #include <asm/ucontext.h>
28 #include <asm/unistd.h>
29 #include <asm/fpsimd.h>
30 #include <asm/ptrace.h>
31 #include <asm/syscall.h>
32 #include <asm/signal32.h>
33 #include <asm/traps.h>
37 * Do a signal return; undo the signal stack. These are aligned to 128-bit.
49 struct rt_sigframe_user_layout {
50 struct rt_sigframe __user *sigframe;
51 struct frame_record __user *next_frame;
53 unsigned long size; /* size of allocated sigframe data */
54 unsigned long limit; /* largest allowed size */
56 unsigned long fpsimd_offset;
57 unsigned long esr_offset;
58 unsigned long sve_offset;
59 unsigned long za_offset;
60 unsigned long extra_offset;
61 unsigned long end_offset;
64 #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16)
65 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
66 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
68 static void init_user_layout(struct rt_sigframe_user_layout *user)
70 const size_t reserved_size =
71 sizeof(user->sigframe->uc.uc_mcontext.__reserved);
73 memset(user, 0, sizeof(*user));
74 user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
76 user->limit = user->size + reserved_size;
78 user->limit -= TERMINATOR_SIZE;
79 user->limit -= EXTRA_CONTEXT_SIZE;
80 /* Reserve space for extension and terminator ^ */
83 static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
85 return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
89 * Sanity limit on the approximate maximum size of signal frame we'll
90 * try to generate. Stack alignment padding and the frame record are
91 * not taken into account. This limit is not a guarantee and is
94 #define SIGFRAME_MAXSZ SZ_256K
96 static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
97 unsigned long *offset, size_t size, bool extend)
99 size_t padded_size = round_up(size, 16);
101 if (padded_size > user->limit - user->size &&
102 !user->extra_offset &&
106 user->limit += EXTRA_CONTEXT_SIZE;
107 ret = __sigframe_alloc(user, &user->extra_offset,
108 sizeof(struct extra_context), false);
110 user->limit -= EXTRA_CONTEXT_SIZE;
114 /* Reserve space for the __reserved[] terminator */
115 user->size += TERMINATOR_SIZE;
118 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
121 user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
124 /* Still not enough space? Bad luck! */
125 if (padded_size > user->limit - user->size)
128 *offset = user->size;
129 user->size += padded_size;
135 * Allocate space for an optional record of <size> bytes in the user
136 * signal frame. The offset from the signal frame base address to the
137 * allocated block is assigned to *offset.
139 static int sigframe_alloc(struct rt_sigframe_user_layout *user,
140 unsigned long *offset, size_t size)
142 return __sigframe_alloc(user, offset, size, true);
145 /* Allocate the null terminator record and prevent further allocations */
146 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
150 /* Un-reserve the space reserved for the terminator: */
151 user->limit += TERMINATOR_SIZE;
153 ret = sigframe_alloc(user, &user->end_offset,
154 sizeof(struct _aarch64_ctx));
158 /* Prevent further allocation: */
159 user->limit = user->size;
163 static void __user *apply_user_offset(
164 struct rt_sigframe_user_layout const *user, unsigned long offset)
166 char __user *base = (char __user *)user->sigframe;
168 return base + offset;
171 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
173 struct user_fpsimd_state const *fpsimd =
174 ¤t->thread.uw.fpsimd_state;
177 /* copy the FP and status/control registers */
178 err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
179 __put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
180 __put_user_error(fpsimd->fpcr, &ctx->fpcr, err);
182 /* copy the magic/size information */
183 __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err);
184 __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err);
186 return err ? -EFAULT : 0;
189 static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
191 struct user_fpsimd_state fpsimd;
195 /* check the magic/size information */
196 __get_user_error(magic, &ctx->head.magic, err);
197 __get_user_error(size, &ctx->head.size, err);
200 if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context))
203 /* copy the FP and status/control registers */
204 err = __copy_from_user(fpsimd.vregs, ctx->vregs,
205 sizeof(fpsimd.vregs));
206 __get_user_error(fpsimd.fpsr, &ctx->fpsr, err);
207 __get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
209 clear_thread_flag(TIF_SVE);
211 /* load the hardware registers from the fpsimd_state structure */
213 fpsimd_update_current_state(&fpsimd);
215 return err ? -EFAULT : 0;
220 struct fpsimd_context __user *fpsimd;
221 struct sve_context __user *sve;
222 struct za_context __user *za;
225 #ifdef CONFIG_ARM64_SVE
227 static int preserve_sve_context(struct sve_context __user *ctx)
230 u16 reserved[ARRAY_SIZE(ctx->__reserved)];
232 unsigned int vl = task_get_sve_vl(current);
235 if (thread_sm_enabled(¤t->thread)) {
236 vl = task_get_sme_vl(current);
237 vq = sve_vq_from_vl(vl);
238 flags |= SVE_SIG_FLAG_SM;
239 } else if (test_thread_flag(TIF_SVE)) {
240 vq = sve_vq_from_vl(vl);
243 memset(reserved, 0, sizeof(reserved));
245 __put_user_error(SVE_MAGIC, &ctx->head.magic, err);
246 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
247 &ctx->head.size, err);
248 __put_user_error(vl, &ctx->vl, err);
249 __put_user_error(flags, &ctx->flags, err);
250 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
251 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
255 * This assumes that the SVE state has already been saved to
256 * the task struct by calling the function
257 * fpsimd_signal_preserve_current_state().
259 err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
260 current->thread.sve_state,
261 SVE_SIG_REGS_SIZE(vq));
264 return err ? -EFAULT : 0;
267 static int restore_sve_fpsimd_context(struct user_ctxs *user)
271 struct user_fpsimd_state fpsimd;
272 struct sve_context sve;
274 if (__copy_from_user(&sve, user->sve, sizeof(sve)))
277 if (sve.flags & SVE_SIG_FLAG_SM) {
278 if (!system_supports_sme())
281 vl = task_get_sme_vl(current);
284 * A SME only system use SVE for streaming mode so can
285 * have a SVE formatted context with a zero VL and no
288 if (!system_supports_sve() && !system_supports_sme())
291 vl = task_get_sve_vl(current);
297 if (sve.head.size <= sizeof(*user->sve)) {
298 clear_thread_flag(TIF_SVE);
299 current->thread.svcr &= ~SVCR_SM_MASK;
303 vq = sve_vq_from_vl(sve.vl);
305 if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq))
309 * Careful: we are about __copy_from_user() directly into
310 * thread.sve_state with preemption enabled, so protection is
311 * needed to prevent a racing context switch from writing stale
312 * registers back over the new data.
315 fpsimd_flush_task_state(current);
316 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
318 sve_alloc(current, true);
319 if (!current->thread.sve_state) {
320 clear_thread_flag(TIF_SVE);
324 err = __copy_from_user(current->thread.sve_state,
325 (char __user const *)user->sve +
327 SVE_SIG_REGS_SIZE(vq));
331 if (sve.flags & SVE_SIG_FLAG_SM)
332 current->thread.svcr |= SVCR_SM_MASK;
334 set_thread_flag(TIF_SVE);
337 /* copy the FP and status/control registers */
338 /* restore_sigframe() already checked that user->fpsimd != NULL. */
339 err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
340 sizeof(fpsimd.vregs));
341 __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
342 __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
344 /* load the hardware registers from the fpsimd_state structure */
346 fpsimd_update_current_state(&fpsimd);
348 return err ? -EFAULT : 0;
351 #else /* ! CONFIG_ARM64_SVE */
353 static int restore_sve_fpsimd_context(struct user_ctxs *user)
359 /* Turn any non-optimised out attempts to use this into a link error: */
360 extern int preserve_sve_context(void __user *ctx);
362 #endif /* ! CONFIG_ARM64_SVE */
364 #ifdef CONFIG_ARM64_SME
366 static int preserve_za_context(struct za_context __user *ctx)
369 u16 reserved[ARRAY_SIZE(ctx->__reserved)];
370 unsigned int vl = task_get_sme_vl(current);
373 if (thread_za_enabled(¤t->thread))
374 vq = sve_vq_from_vl(vl);
378 memset(reserved, 0, sizeof(reserved));
380 __put_user_error(ZA_MAGIC, &ctx->head.magic, err);
381 __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16),
382 &ctx->head.size, err);
383 __put_user_error(vl, &ctx->vl, err);
384 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
385 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
389 * This assumes that the ZA state has already been saved to
390 * the task struct by calling the function
391 * fpsimd_signal_preserve_current_state().
393 err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET,
394 current->thread.za_state,
395 ZA_SIG_REGS_SIZE(vq));
398 return err ? -EFAULT : 0;
401 static int restore_za_context(struct user_ctxs *user)
405 struct za_context za;
407 if (__copy_from_user(&za, user->za, sizeof(za)))
410 if (za.vl != task_get_sme_vl(current))
413 if (za.head.size <= sizeof(*user->za)) {
414 current->thread.svcr &= ~SVCR_ZA_MASK;
418 vq = sve_vq_from_vl(za.vl);
420 if (za.head.size < ZA_SIG_CONTEXT_SIZE(vq))
424 * Careful: we are about __copy_from_user() directly into
425 * thread.za_state with preemption enabled, so protection is
426 * needed to prevent a racing context switch from writing stale
427 * registers back over the new data.
430 fpsimd_flush_task_state(current);
431 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
433 sme_alloc(current, true);
434 if (!current->thread.za_state) {
435 current->thread.svcr &= ~SVCR_ZA_MASK;
436 clear_thread_flag(TIF_SME);
440 err = __copy_from_user(current->thread.za_state,
441 (char __user const *)user->za +
443 ZA_SIG_REGS_SIZE(vq));
447 set_thread_flag(TIF_SME);
448 current->thread.svcr |= SVCR_ZA_MASK;
452 #else /* ! CONFIG_ARM64_SME */
454 /* Turn any non-optimised out attempts to use these into a link error: */
455 extern int preserve_za_context(void __user *ctx);
456 extern int restore_za_context(struct user_ctxs *user);
458 #endif /* ! CONFIG_ARM64_SME */
460 static int parse_user_sigframe(struct user_ctxs *user,
461 struct rt_sigframe __user *sf)
463 struct sigcontext __user *const sc = &sf->uc.uc_mcontext;
464 struct _aarch64_ctx __user *head;
465 char __user *base = (char __user *)&sc->__reserved;
467 size_t limit = sizeof(sc->__reserved);
468 bool have_extra_context = false;
469 char const __user *const sfp = (char const __user *)sf;
475 if (!IS_ALIGNED((unsigned long)base, 16))
481 char const __user *userp;
482 struct extra_context const __user *extra;
485 struct _aarch64_ctx const __user *end;
486 u32 end_magic, end_size;
488 if (limit - offset < sizeof(*head))
491 if (!IS_ALIGNED(offset, 16))
494 head = (struct _aarch64_ctx __user *)(base + offset);
495 __get_user_error(magic, &head->magic, err);
496 __get_user_error(size, &head->size, err);
500 if (limit - offset < size)
511 if (!system_supports_fpsimd())
516 if (size < sizeof(*user->fpsimd))
519 user->fpsimd = (struct fpsimd_context __user *)head;
527 if (!system_supports_sve() && !system_supports_sme())
533 if (size < sizeof(*user->sve))
536 user->sve = (struct sve_context __user *)head;
540 if (!system_supports_sme())
546 if (size < sizeof(*user->za))
549 user->za = (struct za_context __user *)head;
553 if (have_extra_context)
556 if (size < sizeof(*extra))
559 userp = (char const __user *)head;
561 extra = (struct extra_context const __user *)userp;
564 __get_user_error(extra_datap, &extra->datap, err);
565 __get_user_error(extra_size, &extra->size, err);
569 /* Check for the dummy terminator in __reserved[]: */
571 if (limit - offset - size < TERMINATOR_SIZE)
574 end = (struct _aarch64_ctx const __user *)userp;
575 userp += TERMINATOR_SIZE;
577 __get_user_error(end_magic, &end->magic, err);
578 __get_user_error(end_size, &end->size, err);
582 if (end_magic || end_size)
585 /* Prevent looping/repeated parsing of extra_context */
586 have_extra_context = true;
588 base = (__force void __user *)extra_datap;
589 if (!IS_ALIGNED((unsigned long)base, 16))
592 if (!IS_ALIGNED(extra_size, 16))
598 /* Reject "unreasonably large" frames: */
599 if (extra_size > sfp + SIGFRAME_MAXSZ - userp)
603 * Ignore trailing terminator in __reserved[]
604 * and start parsing extra data:
609 if (!access_ok(base, limit))
618 if (size < sizeof(*head))
621 if (limit - offset < size)
634 static int restore_sigframe(struct pt_regs *regs,
635 struct rt_sigframe __user *sf)
639 struct user_ctxs user;
641 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
643 set_current_blocked(&set);
645 for (i = 0; i < 31; i++)
646 __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
648 __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
649 __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
650 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
653 * Avoid sys_rt_sigreturn() restarting.
655 forget_syscall(regs);
657 err |= !valid_user_regs(®s->user_regs, current);
659 err = parse_user_sigframe(&user, sf);
661 if (err == 0 && system_supports_fpsimd()) {
666 err = restore_sve_fpsimd_context(&user);
668 err = restore_fpsimd_context(user.fpsimd);
671 if (err == 0 && system_supports_sme() && user.za)
672 err = restore_za_context(&user);
677 SYSCALL_DEFINE0(rt_sigreturn)
679 struct pt_regs *regs = current_pt_regs();
680 struct rt_sigframe __user *frame;
682 /* Always make any pending restarted system calls return -EINTR */
683 current->restart_block.fn = do_no_restart_syscall;
686 * Since we stacked the signal on a 128-bit boundary, then 'sp' should
687 * be word aligned here.
692 frame = (struct rt_sigframe __user *)regs->sp;
694 if (!access_ok(frame, sizeof (*frame)))
697 if (restore_sigframe(regs, frame))
700 if (restore_altstack(&frame->uc.uc_stack))
703 return regs->regs[0];
706 arm64_notify_segfault(regs->sp);
711 * Determine the layout of optional records in the signal frame
713 * add_all: if true, lays out the biggest possible signal frame for
714 * this task; otherwise, generates a layout for the current state
717 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
722 if (system_supports_fpsimd()) {
723 err = sigframe_alloc(user, &user->fpsimd_offset,
724 sizeof(struct fpsimd_context));
729 /* fault information, if valid */
730 if (add_all || current->thread.fault_code) {
731 err = sigframe_alloc(user, &user->esr_offset,
732 sizeof(struct esr_context));
737 if (system_supports_sve() || system_supports_sme()) {
740 if (add_all || test_thread_flag(TIF_SVE) ||
741 thread_sm_enabled(¤t->thread)) {
742 int vl = max(sve_max_vl(), sme_max_vl());
745 vl = thread_get_cur_vl(¤t->thread);
747 vq = sve_vq_from_vl(vl);
750 err = sigframe_alloc(user, &user->sve_offset,
751 SVE_SIG_CONTEXT_SIZE(vq));
756 if (system_supports_sme()) {
763 vl = task_get_sme_vl(current);
765 if (thread_za_enabled(¤t->thread))
766 vq = sve_vq_from_vl(vl);
768 err = sigframe_alloc(user, &user->za_offset,
769 ZA_SIG_CONTEXT_SIZE(vq));
774 return sigframe_alloc_end(user);
777 static int setup_sigframe(struct rt_sigframe_user_layout *user,
778 struct pt_regs *regs, sigset_t *set)
781 struct rt_sigframe __user *sf = user->sigframe;
783 /* set up the stack frame for unwinding */
784 __put_user_error(regs->regs[29], &user->next_frame->fp, err);
785 __put_user_error(regs->regs[30], &user->next_frame->lr, err);
787 for (i = 0; i < 31; i++)
788 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
790 __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
791 __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
792 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
794 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
796 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
798 if (err == 0 && system_supports_fpsimd()) {
799 struct fpsimd_context __user *fpsimd_ctx =
800 apply_user_offset(user, user->fpsimd_offset);
801 err |= preserve_fpsimd_context(fpsimd_ctx);
804 /* fault information, if valid */
805 if (err == 0 && user->esr_offset) {
806 struct esr_context __user *esr_ctx =
807 apply_user_offset(user, user->esr_offset);
809 __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
810 __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
811 __put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
814 /* Scalable Vector Extension state (including streaming), if present */
815 if ((system_supports_sve() || system_supports_sme()) &&
816 err == 0 && user->sve_offset) {
817 struct sve_context __user *sve_ctx =
818 apply_user_offset(user, user->sve_offset);
819 err |= preserve_sve_context(sve_ctx);
822 /* ZA state if present */
823 if (system_supports_sme() && err == 0 && user->za_offset) {
824 struct za_context __user *za_ctx =
825 apply_user_offset(user, user->za_offset);
826 err |= preserve_za_context(za_ctx);
829 if (err == 0 && user->extra_offset) {
830 char __user *sfp = (char __user *)user->sigframe;
832 apply_user_offset(user, user->extra_offset);
834 struct extra_context __user *extra;
835 struct _aarch64_ctx __user *end;
839 extra = (struct extra_context __user *)userp;
840 userp += EXTRA_CONTEXT_SIZE;
842 end = (struct _aarch64_ctx __user *)userp;
843 userp += TERMINATOR_SIZE;
846 * extra_datap is just written to the signal frame.
847 * The value gets cast back to a void __user *
850 extra_datap = (__force u64)userp;
851 extra_size = sfp + round_up(user->size, 16) - userp;
853 __put_user_error(EXTRA_MAGIC, &extra->head.magic, err);
854 __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err);
855 __put_user_error(extra_datap, &extra->datap, err);
856 __put_user_error(extra_size, &extra->size, err);
858 /* Add the terminator */
859 __put_user_error(0, &end->magic, err);
860 __put_user_error(0, &end->size, err);
863 /* set the "end" magic */
865 struct _aarch64_ctx __user *end =
866 apply_user_offset(user, user->end_offset);
868 __put_user_error(0, &end->magic, err);
869 __put_user_error(0, &end->size, err);
875 static int get_sigframe(struct rt_sigframe_user_layout *user,
876 struct ksignal *ksig, struct pt_regs *regs)
878 unsigned long sp, sp_top;
881 init_user_layout(user);
882 err = setup_sigframe_layout(user, false);
886 sp = sp_top = sigsp(regs->sp, ksig);
888 sp = round_down(sp - sizeof(struct frame_record), 16);
889 user->next_frame = (struct frame_record __user *)sp;
891 sp = round_down(sp, 16) - sigframe_size(user);
892 user->sigframe = (struct rt_sigframe __user *)sp;
895 * Check that we can actually write to the signal frame.
897 if (!access_ok(user->sigframe, sp_top - sp))
903 static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
904 struct rt_sigframe_user_layout *user, int usig)
906 __sigrestore_t sigtramp;
908 regs->regs[0] = usig;
909 regs->sp = (unsigned long)user->sigframe;
910 regs->regs[29] = (unsigned long)&user->next_frame->fp;
911 regs->pc = (unsigned long)ka->sa.sa_handler;
914 * Signal delivery is a (wacky) indirect function call in
915 * userspace, so simulate the same setting of BTYPE as a BLR
916 * <register containing the signal handler entry point>.
917 * Signal delivery to a location in a PROT_BTI guarded page
918 * that is not a function entry point will now trigger a
919 * SIGILL in userspace.
921 * If the signal handler entry point is not in a PROT_BTI
922 * guarded page, this is harmless.
924 if (system_supports_bti()) {
925 regs->pstate &= ~PSR_BTYPE_MASK;
926 regs->pstate |= PSR_BTYPE_C;
929 /* TCO (Tag Check Override) always cleared for signal handlers */
930 regs->pstate &= ~PSR_TCO_BIT;
932 /* Signal handlers are invoked with ZA and streaming mode disabled */
933 if (system_supports_sme()) {
935 * If we were in streaming mode the saved register
936 * state was SVE but we will exit SM and use the
937 * FPSIMD register state - flush the saved FPSIMD
938 * register state in case it gets loaded.
940 if (current->thread.svcr & SVCR_SM_MASK)
941 memset(¤t->thread.uw.fpsimd_state, 0,
942 sizeof(current->thread.uw.fpsimd_state));
944 current->thread.svcr &= ~(SVCR_ZA_MASK |
949 if (ka->sa.sa_flags & SA_RESTORER)
950 sigtramp = ka->sa.sa_restorer;
952 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
954 regs->regs[30] = (unsigned long)sigtramp;
957 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
958 struct pt_regs *regs)
960 struct rt_sigframe_user_layout user;
961 struct rt_sigframe __user *frame;
964 fpsimd_signal_preserve_current_state();
966 if (get_sigframe(&user, ksig, regs))
969 frame = user.sigframe;
971 __put_user_error(0, &frame->uc.uc_flags, err);
972 __put_user_error(NULL, &frame->uc.uc_link, err);
974 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
975 err |= setup_sigframe(&user, regs, set);
977 setup_return(regs, &ksig->ka, &user, usig);
978 if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
979 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
980 regs->regs[1] = (unsigned long)&frame->info;
981 regs->regs[2] = (unsigned long)&frame->uc;
988 static void setup_restart_syscall(struct pt_regs *regs)
990 if (is_compat_task())
991 compat_setup_restart_syscall(regs);
993 regs->regs[8] = __NR_restart_syscall;
997 * OK, we're invoking a handler
999 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
1001 sigset_t *oldset = sigmask_to_save();
1002 int usig = ksig->sig;
1005 rseq_signal_deliver(ksig, regs);
1008 * Set up the stack frame
1010 if (is_compat_task()) {
1011 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
1012 ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
1014 ret = compat_setup_frame(usig, ksig, oldset, regs);
1016 ret = setup_rt_frame(usig, ksig, oldset, regs);
1020 * Check that the resulting registers are actually sane.
1022 ret |= !valid_user_regs(®s->user_regs, current);
1024 /* Step into the signal handler if we are stepping */
1025 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
1029 * Note that 'init' is a special process: it doesn't get signals it doesn't
1030 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1033 * Note that we go through the signals twice: once to check the signals that
1034 * the kernel can handle, and then we build all the user-level signal handling
1035 * stack-frames in one go after that.
1037 static void do_signal(struct pt_regs *regs)
1039 unsigned long continue_addr = 0, restart_addr = 0;
1041 struct ksignal ksig;
1042 bool syscall = in_syscall(regs);
1045 * If we were from a system call, check for system call restarting...
1048 continue_addr = regs->pc;
1049 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
1050 retval = regs->regs[0];
1053 * Avoid additional syscall restarting via ret_to_user.
1055 forget_syscall(regs);
1058 * Prepare for system call restart. We do this here so that a
1059 * debugger will see the already changed PC.
1062 case -ERESTARTNOHAND:
1064 case -ERESTARTNOINTR:
1065 case -ERESTART_RESTARTBLOCK:
1066 regs->regs[0] = regs->orig_x0;
1067 regs->pc = restart_addr;
1073 * Get the signal to deliver. When running under ptrace, at this point
1074 * the debugger may change all of our registers.
1076 if (get_signal(&ksig)) {
1078 * Depending on the signal settings, we may need to revert the
1079 * decision to restart the system call, but skip this if a
1080 * debugger has chosen to restart at a different PC.
1082 if (regs->pc == restart_addr &&
1083 (retval == -ERESTARTNOHAND ||
1084 retval == -ERESTART_RESTARTBLOCK ||
1085 (retval == -ERESTARTSYS &&
1086 !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
1087 syscall_set_return_value(current, regs, -EINTR, 0);
1088 regs->pc = continue_addr;
1091 handle_signal(&ksig, regs);
1096 * Handle restarting a different system call. As above, if a debugger
1097 * has chosen to restart at a different PC, ignore the restart.
1099 if (syscall && regs->pc == restart_addr) {
1100 if (retval == -ERESTART_RESTARTBLOCK)
1101 setup_restart_syscall(regs);
1102 user_rewind_single_step(current);
1105 restore_saved_sigmask();
1108 void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
1111 if (thread_flags & _TIF_NEED_RESCHED) {
1112 /* Unmask Debug and SError for the next task */
1113 local_daif_restore(DAIF_PROCCTX_NOIRQ);
1117 local_daif_restore(DAIF_PROCCTX);
1119 if (thread_flags & _TIF_UPROBE)
1120 uprobe_notify_resume(regs);
1122 if (thread_flags & _TIF_MTE_ASYNC_FAULT) {
1123 clear_thread_flag(TIF_MTE_ASYNC_FAULT);
1124 send_sig_fault(SIGSEGV, SEGV_MTEAERR,
1125 (void __user *)NULL, current);
1128 if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
1131 if (thread_flags & _TIF_NOTIFY_RESUME)
1132 resume_user_mode_work(regs);
1134 if (thread_flags & _TIF_FOREIGN_FPSTATE)
1135 fpsimd_restore_current_state();
1139 thread_flags = read_thread_flags();
1140 } while (thread_flags & _TIF_WORK_MASK);
1143 unsigned long __ro_after_init signal_minsigstksz;
1146 * Determine the stack space required for guaranteed signal devliery.
1147 * This function is used to populate AT_MINSIGSTKSZ at process startup.
1148 * cpufeatures setup is assumed to be complete.
1150 void __init minsigstksz_setup(void)
1152 struct rt_sigframe_user_layout user;
1154 init_user_layout(&user);
1157 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't
1158 * be big enough, but it's our best guess:
1160 if (WARN_ON(setup_sigframe_layout(&user, true)))
1163 signal_minsigstksz = sigframe_size(&user) +
1164 round_up(sizeof(struct frame_record), 16) +
1165 16; /* max alignment padding */
1169 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
1170 * changes likely come with new fields that should be added below.
1172 static_assert(NSIGILL == 11);
1173 static_assert(NSIGFPE == 15);
1174 static_assert(NSIGSEGV == 9);
1175 static_assert(NSIGBUS == 5);
1176 static_assert(NSIGTRAP == 6);
1177 static_assert(NSIGCHLD == 6);
1178 static_assert(NSIGSYS == 2);
1179 static_assert(sizeof(siginfo_t) == 128);
1180 static_assert(__alignof__(siginfo_t) == 8);
1181 static_assert(offsetof(siginfo_t, si_signo) == 0x00);
1182 static_assert(offsetof(siginfo_t, si_errno) == 0x04);
1183 static_assert(offsetof(siginfo_t, si_code) == 0x08);
1184 static_assert(offsetof(siginfo_t, si_pid) == 0x10);
1185 static_assert(offsetof(siginfo_t, si_uid) == 0x14);
1186 static_assert(offsetof(siginfo_t, si_tid) == 0x10);
1187 static_assert(offsetof(siginfo_t, si_overrun) == 0x14);
1188 static_assert(offsetof(siginfo_t, si_status) == 0x18);
1189 static_assert(offsetof(siginfo_t, si_utime) == 0x20);
1190 static_assert(offsetof(siginfo_t, si_stime) == 0x28);
1191 static_assert(offsetof(siginfo_t, si_value) == 0x18);
1192 static_assert(offsetof(siginfo_t, si_int) == 0x18);
1193 static_assert(offsetof(siginfo_t, si_ptr) == 0x18);
1194 static_assert(offsetof(siginfo_t, si_addr) == 0x10);
1195 static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18);
1196 static_assert(offsetof(siginfo_t, si_lower) == 0x20);
1197 static_assert(offsetof(siginfo_t, si_upper) == 0x28);
1198 static_assert(offsetof(siginfo_t, si_pkey) == 0x20);
1199 static_assert(offsetof(siginfo_t, si_perf_data) == 0x18);
1200 static_assert(offsetof(siginfo_t, si_perf_type) == 0x20);
1201 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24);
1202 static_assert(offsetof(siginfo_t, si_band) == 0x10);
1203 static_assert(offsetof(siginfo_t, si_fd) == 0x18);
1204 static_assert(offsetof(siginfo_t, si_call_addr) == 0x10);
1205 static_assert(offsetof(siginfo_t, si_syscall) == 0x18);
1206 static_assert(offsetof(siginfo_t, si_arch) == 0x1c);