2 * linux/arch/arm/kernel/signal.c
4 * Copyright (C) 1995-2009 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/errno.h>
11 #include <linux/random.h>
12 #include <linux/signal.h>
13 #include <linux/personality.h>
14 #include <linux/uaccess.h>
15 #include <linux/tracehook.h>
16 #include <linux/uprobes.h>
17 #include <linux/syscalls.h>
20 #include <asm/cacheflush.h>
21 #include <asm/traps.h>
22 #include <asm/ucontext.h>
23 #include <asm/unistd.h>
26 extern const unsigned long sigreturn_codes[7];
28 static unsigned long signal_return_offset;
31 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
33 char kbuf[sizeof(*frame) + 8];
34 struct crunch_sigframe *kframe;
36 /* the crunch context must be 64 bit aligned */
37 kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
38 kframe->magic = CRUNCH_MAGIC;
39 kframe->size = CRUNCH_STORAGE_SIZE;
40 crunch_task_copy(current_thread_info(), &kframe->storage);
41 return __copy_to_user(frame, kframe, sizeof(*frame));
44 static int restore_crunch_context(char __user **auxp)
46 struct crunch_sigframe __user *frame =
47 (struct crunch_sigframe __user *)*auxp;
48 char kbuf[sizeof(*frame) + 8];
49 struct crunch_sigframe *kframe;
51 /* the crunch context must be 64 bit aligned */
52 kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
53 if (__copy_from_user(kframe, frame, sizeof(*frame)))
55 if (kframe->magic != CRUNCH_MAGIC ||
56 kframe->size != CRUNCH_STORAGE_SIZE)
58 *auxp += CRUNCH_STORAGE_SIZE;
59 crunch_task_restore(current_thread_info(), &kframe->storage);
66 static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
68 char kbuf[sizeof(*frame) + 8];
69 struct iwmmxt_sigframe *kframe;
72 /* the iWMMXt context must be 64 bit aligned */
73 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
75 if (test_thread_flag(TIF_USING_IWMMXT)) {
76 kframe->magic = IWMMXT_MAGIC;
77 kframe->size = IWMMXT_STORAGE_SIZE;
78 iwmmxt_task_copy(current_thread_info(), &kframe->storage);
81 * For bug-compatibility with older kernels, some space
82 * has to be reserved for iWMMXt even if it's not used.
83 * Set the magic and size appropriately so that properly
84 * written userspace can skip it reliably:
86 *kframe = (struct iwmmxt_sigframe) {
88 .size = IWMMXT_STORAGE_SIZE,
92 err = __copy_to_user(frame, kframe, sizeof(*kframe));
97 static int restore_iwmmxt_context(char __user **auxp)
99 struct iwmmxt_sigframe __user *frame =
100 (struct iwmmxt_sigframe __user *)*auxp;
101 char kbuf[sizeof(*frame) + 8];
102 struct iwmmxt_sigframe *kframe;
104 /* the iWMMXt context must be 64 bit aligned */
105 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
106 if (__copy_from_user(kframe, frame, sizeof(*frame)))
110 * For non-iWMMXt threads: a single iwmmxt_sigframe-sized dummy
111 * block is discarded for compatibility with setup_sigframe() if
112 * present, but we don't mandate its presence. If some other
113 * magic is here, it's not for us:
115 if (!test_thread_flag(TIF_USING_IWMMXT) &&
116 kframe->magic != DUMMY_MAGIC)
119 if (kframe->size != IWMMXT_STORAGE_SIZE)
122 if (test_thread_flag(TIF_USING_IWMMXT)) {
123 if (kframe->magic != IWMMXT_MAGIC)
126 iwmmxt_task_restore(current_thread_info(), &kframe->storage);
129 *auxp += IWMMXT_STORAGE_SIZE;
137 static int preserve_vfp_context(struct vfp_sigframe __user *frame)
139 struct vfp_sigframe kframe;
142 memset(&kframe, 0, sizeof(kframe));
143 kframe.magic = VFP_MAGIC;
144 kframe.size = VFP_STORAGE_SIZE;
146 err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
150 return __copy_to_user(frame, &kframe, sizeof(kframe));
153 static int restore_vfp_context(char __user **auxp)
155 struct vfp_sigframe frame;
158 err = __copy_from_user(&frame, *auxp, sizeof(frame));
162 if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE)
165 *auxp += sizeof(frame);
166 return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc);
172 * Do a signal return; undo the signal stack. These are aligned to 64-bit.
176 unsigned long retcode[2];
184 static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
186 struct sigcontext context;
191 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
193 set_current_blocked(&set);
195 err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context));
197 regs->ARM_r0 = context.arm_r0;
198 regs->ARM_r1 = context.arm_r1;
199 regs->ARM_r2 = context.arm_r2;
200 regs->ARM_r3 = context.arm_r3;
201 regs->ARM_r4 = context.arm_r4;
202 regs->ARM_r5 = context.arm_r5;
203 regs->ARM_r6 = context.arm_r6;
204 regs->ARM_r7 = context.arm_r7;
205 regs->ARM_r8 = context.arm_r8;
206 regs->ARM_r9 = context.arm_r9;
207 regs->ARM_r10 = context.arm_r10;
208 regs->ARM_fp = context.arm_fp;
209 regs->ARM_ip = context.arm_ip;
210 regs->ARM_sp = context.arm_sp;
211 regs->ARM_lr = context.arm_lr;
212 regs->ARM_pc = context.arm_pc;
213 regs->ARM_cpsr = context.arm_cpsr;
216 err |= !valid_user_regs(regs);
218 aux = (char __user *) sf->uc.uc_regspace;
221 err |= restore_crunch_context(&aux);
225 err |= restore_iwmmxt_context(&aux);
229 err |= restore_vfp_context(&aux);
235 asmlinkage int sys_sigreturn(struct pt_regs *regs)
237 struct sigframe __user *frame;
239 /* Always make any pending restarted system calls return -EINTR */
240 current->restart_block.fn = do_no_restart_syscall;
243 * Since we stacked the signal on a 64-bit boundary,
244 * then 'sp' should be word aligned here. If it's
245 * not, then the user is trying to mess with us.
247 if (regs->ARM_sp & 7)
250 frame = (struct sigframe __user *)regs->ARM_sp;
252 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
255 if (restore_sigframe(regs, frame))
261 force_sig(SIGSEGV, current);
265 asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
267 struct rt_sigframe __user *frame;
269 /* Always make any pending restarted system calls return -EINTR */
270 current->restart_block.fn = do_no_restart_syscall;
273 * Since we stacked the signal on a 64-bit boundary,
274 * then 'sp' should be word aligned here. If it's
275 * not, then the user is trying to mess with us.
277 if (regs->ARM_sp & 7)
280 frame = (struct rt_sigframe __user *)regs->ARM_sp;
282 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
285 if (restore_sigframe(regs, &frame->sig))
288 if (restore_altstack(&frame->sig.uc.uc_stack))
294 force_sig(SIGSEGV, current);
299 setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
301 struct aux_sigframe __user *aux;
302 struct sigcontext context;
305 context = (struct sigcontext) {
306 .arm_r0 = regs->ARM_r0,
307 .arm_r1 = regs->ARM_r1,
308 .arm_r2 = regs->ARM_r2,
309 .arm_r3 = regs->ARM_r3,
310 .arm_r4 = regs->ARM_r4,
311 .arm_r5 = regs->ARM_r5,
312 .arm_r6 = regs->ARM_r6,
313 .arm_r7 = regs->ARM_r7,
314 .arm_r8 = regs->ARM_r8,
315 .arm_r9 = regs->ARM_r9,
316 .arm_r10 = regs->ARM_r10,
317 .arm_fp = regs->ARM_fp,
318 .arm_ip = regs->ARM_ip,
319 .arm_sp = regs->ARM_sp,
320 .arm_lr = regs->ARM_lr,
321 .arm_pc = regs->ARM_pc,
322 .arm_cpsr = regs->ARM_cpsr,
324 .trap_no = current->thread.trap_no,
325 .error_code = current->thread.error_code,
326 .fault_address = current->thread.address,
327 .oldmask = set->sig[0],
330 err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
332 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
334 aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
337 err |= preserve_crunch_context(&aux->crunch);
341 err |= preserve_iwmmxt_context(&aux->iwmmxt);
345 err |= preserve_vfp_context(&aux->vfp);
347 err |= __put_user(0, &aux->end_magic);
352 static inline void __user *
353 get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize)
355 unsigned long sp = sigsp(regs->ARM_sp, ksig);
359 * ATPCS B01 mandates 8-byte alignment
361 frame = (void __user *)((sp - framesize) & ~7);
364 * Check that we can actually write to the signal frame.
366 if (!access_ok(VERIFY_WRITE, frame, framesize))
373 setup_return(struct pt_regs *regs, struct ksignal *ksig,
374 unsigned long __user *rc, void __user *frame)
376 unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler;
377 unsigned long retcode;
379 unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
381 cpsr |= PSR_ENDSTATE;
384 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
386 if (ksig->ka.sa.sa_flags & SA_THIRTYTWO)
387 cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
389 #ifdef CONFIG_ARM_THUMB
390 if (elf_hwcap & HWCAP_THUMB) {
392 * The LSB of the handler determines if we're going to
393 * be using THUMB or ARM mode for this signal handler.
398 * Clear the If-Then Thumb-2 execution state. ARM spec
399 * requires this to be all 000s in ARM mode. Snapdragon
400 * S4/Krait misbehaves on a Thumb=>ARM signal transition
403 * We must do this whenever we are running on a Thumb-2
404 * capable CPU, which includes ARMv6T2. However, we elect
405 * to always do this to simplify the code; this field is
406 * marked UNK/SBZP for older architectures.
408 cpsr &= ~PSR_IT_MASK;
417 if (ksig->ka.sa.sa_flags & SA_RESTORER) {
418 retcode = (unsigned long)ksig->ka.sa.sa_restorer;
420 unsigned int idx = thumb << 1;
422 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
426 * Put the sigreturn code on the stack no matter which return
427 * mechanism we use in order to remain ABI compliant
429 if (__put_user(sigreturn_codes[idx], rc) ||
430 __put_user(sigreturn_codes[idx+1], rc+1))
434 if (cpsr & MODE32_BIT) {
435 struct mm_struct *mm = current->mm;
438 * 32-bit code can use the signal return page
439 * except when the MPU has protected the vectors
442 retcode = mm->context.sigpage + signal_return_offset +
448 * Ensure that the instruction cache sees
449 * the return code written onto the stack.
451 flush_icache_range((unsigned long)rc,
452 (unsigned long)(rc + 2));
454 retcode = ((unsigned long)rc) + thumb;
458 regs->ARM_r0 = ksig->sig;
459 regs->ARM_sp = (unsigned long)frame;
460 regs->ARM_lr = retcode;
461 regs->ARM_pc = handler;
462 regs->ARM_cpsr = cpsr;
468 setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
470 struct sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
477 * Set uc.uc_flags to a value which sc.trap_no would never have.
479 err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
481 err |= setup_sigframe(frame, regs, set);
483 err = setup_return(regs, ksig, frame->retcode, frame);
489 setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
491 struct rt_sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
497 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
499 err |= __put_user(0, &frame->sig.uc.uc_flags);
500 err |= __put_user(NULL, &frame->sig.uc.uc_link);
502 err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
503 err |= setup_sigframe(&frame->sig, regs, set);
505 err = setup_return(regs, ksig, frame->sig.retcode, frame);
509 * For realtime signals we must also set the second and third
510 * arguments for the signal handler.
511 * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
513 regs->ARM_r1 = (unsigned long)&frame->info;
514 regs->ARM_r2 = (unsigned long)&frame->sig.uc;
521 * OK, we're invoking a handler
523 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
525 sigset_t *oldset = sigmask_to_save();
529 * Set up the stack frame
531 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
532 ret = setup_rt_frame(ksig, oldset, regs);
534 ret = setup_frame(ksig, oldset, regs);
537 * Check that the resulting registers are actually sane.
539 ret |= !valid_user_regs(regs);
541 signal_setup_done(ret, ksig, 0);
545 * Note that 'init' is a special process: it doesn't get signals it doesn't
546 * want to handle. Thus you cannot kill init even with a SIGKILL even by
549 * Note that we go through the signals twice: once to check the signals that
550 * the kernel can handle, and then we build all the user-level signal handling
551 * stack-frames in one go after that.
553 static int do_signal(struct pt_regs *regs, int syscall)
555 unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
560 * If we were from a system call, check for system call restarting...
563 continue_addr = regs->ARM_pc;
564 restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
565 retval = regs->ARM_r0;
568 * Prepare for system call restart. We do this here so that a
569 * debugger will see the already changed PSW.
572 case -ERESTART_RESTARTBLOCK:
574 case -ERESTARTNOHAND:
576 case -ERESTARTNOINTR:
578 regs->ARM_r0 = regs->ARM_ORIG_r0;
579 regs->ARM_pc = restart_addr;
585 * Get the signal to deliver. When running under ptrace, at this
586 * point the debugger may change all our registers ...
589 * Depending on the signal settings we may need to revert the
590 * decision to restart the system call. But skip this if a
591 * debugger has chosen to restart at a different PC.
593 if (get_signal(&ksig)) {
595 if (unlikely(restart) && regs->ARM_pc == restart_addr) {
596 if (retval == -ERESTARTNOHAND ||
597 retval == -ERESTART_RESTARTBLOCK
598 || (retval == -ERESTARTSYS
599 && !(ksig.ka.sa.sa_flags & SA_RESTART))) {
600 regs->ARM_r0 = -EINTR;
601 regs->ARM_pc = continue_addr;
604 handle_signal(&ksig, regs);
607 restore_saved_sigmask();
608 if (unlikely(restart) && regs->ARM_pc == restart_addr) {
609 regs->ARM_pc = continue_addr;
617 do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
620 * The assembly code enters us with IRQs off, but it hasn't
621 * informed the tracing code of that for efficiency reasons.
622 * Update the trace code with the current status.
624 trace_hardirqs_off();
626 if (likely(thread_flags & _TIF_NEED_RESCHED)) {
629 if (unlikely(!user_mode(regs)))
632 if (thread_flags & _TIF_SIGPENDING) {
633 int restart = do_signal(regs, syscall);
634 if (unlikely(restart)) {
636 * Restart without handlers.
637 * Deal with it without leaving
643 } else if (thread_flags & _TIF_UPROBE) {
644 uprobe_notify_resume(regs);
646 clear_thread_flag(TIF_NOTIFY_RESUME);
647 tracehook_notify_resume(regs);
651 thread_flags = current_thread_info()->flags;
652 } while (thread_flags & _TIF_WORK_MASK);
656 struct page *get_signal_page(void)
663 page = alloc_pages(GFP_KERNEL, 0);
668 addr = page_address(page);
670 /* Poison the entire page */
671 memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
672 PAGE_SIZE / sizeof(u32));
674 /* Give the signal return code some randomness */
675 offset = 0x200 + (get_random_int() & 0x7fc);
676 signal_return_offset = offset;
678 /* Copy signal return handlers into the page */
679 memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
681 /* Flush out all instructions in this page */
682 ptr = (unsigned long)addr;
683 flush_icache_range(ptr, ptr + PAGE_SIZE);
688 /* Defer to generic check */
689 asmlinkage void addr_limit_check_failed(void)
691 addr_limit_user_check();