2 * linux/kernel/ptrace.c
4 * (C) Copyright 1999 Linus Torvalds
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
10 #include <linux/capability.h>
11 #include <linux/export.h>
12 #include <linux/sched.h>
13 #include <linux/sched/mm.h>
14 #include <linux/sched/coredump.h>
15 #include <linux/sched/task.h>
16 #include <linux/errno.h>
18 #include <linux/highmem.h>
19 #include <linux/pagemap.h>
20 #include <linux/ptrace.h>
21 #include <linux/security.h>
22 #include <linux/signal.h>
23 #include <linux/uio.h>
24 #include <linux/audit.h>
25 #include <linux/pid_namespace.h>
26 #include <linux/syscalls.h>
27 #include <linux/uaccess.h>
28 #include <linux/regset.h>
29 #include <linux/hw_breakpoint.h>
30 #include <linux/cn_proc.h>
31 #include <linux/compat.h>
32 #include <linux/sched/signal.h>
35 * Access another process' address space via ptrace.
36 * Source/target buffer must be kernel space,
37 * Do not walk the page table directly, use get_user_pages
39 int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
40 void *buf, int len, unsigned int gup_flags)
45 mm = get_task_mm(tsk);
50 (current != tsk->parent) ||
51 ((get_dumpable(mm) != SUID_DUMP_USER) &&
52 !ptracer_capable(tsk, mm->user_ns))) {
57 ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
64 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
65 const struct cred *ptracer_cred)
67 BUG_ON(!list_empty(&child->ptrace_entry));
68 list_add(&child->ptrace_entry, &new_parent->ptraced);
69 child->parent = new_parent;
70 child->ptracer_cred = get_cred(ptracer_cred);
74 * ptrace a task: make the debugger its new parent and
75 * move it to the ptrace list.
77 * Must be called with the tasklist lock write-held.
79 static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
81 __ptrace_link(child, new_parent, current_cred());
85 * __ptrace_unlink - unlink ptracee and restore its execution state
86 * @child: ptracee to be unlinked
88 * Remove @child from the ptrace list, move it back to the original parent,
89 * and restore the execution state so that it conforms to the group stop
92 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
93 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
94 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
95 * If the ptracer is exiting, the ptracee can be in any state.
97 * After detach, the ptracee should be in a state which conforms to the
98 * group stop. If the group is stopped or in the process of stopping, the
99 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
100 * up from TASK_TRACED.
102 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
103 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
104 * to but in the opposite direction of what happens while attaching to a
105 * stopped task. However, in this direction, the intermediate RUNNING
106 * state is not hidden even from the current ptracer and if it immediately
107 * re-attaches and performs a WNOHANG wait(2), it may fail.
110 * write_lock_irq(tasklist_lock)
112 void __ptrace_unlink(struct task_struct *child)
114 const struct cred *old_cred;
115 BUG_ON(!child->ptrace);
117 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
119 child->parent = child->real_parent;
120 list_del_init(&child->ptrace_entry);
121 old_cred = child->ptracer_cred;
122 child->ptracer_cred = NULL;
125 spin_lock(&child->sighand->siglock);
128 * Clear all pending traps and TRAPPING. TRAPPING should be
129 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
131 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
132 task_clear_jobctl_trapping(child);
135 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
138 if (!(child->flags & PF_EXITING) &&
139 (child->signal->flags & SIGNAL_STOP_STOPPED ||
140 child->signal->group_stop_count)) {
141 child->jobctl |= JOBCTL_STOP_PENDING;
144 * This is only possible if this thread was cloned by the
145 * traced task running in the stopped group, set the signal
146 * for the future reports.
147 * FIXME: we should change ptrace_init_task() to handle this
150 if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
151 child->jobctl |= SIGSTOP;
155 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
156 * @child in the butt. Note that @resume should be used iff @child
157 * is in TASK_TRACED; otherwise, we might unduly disrupt
158 * TASK_KILLABLE sleeps.
160 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
161 ptrace_signal_wake_up(child, true);
163 spin_unlock(&child->sighand->siglock);
166 static bool looks_like_a_spurious_pid(struct task_struct *task)
168 if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP))
171 if (task_pid_vnr(task) == task->ptrace_message)
174 * The tracee changed its pid but the PTRACE_EVENT_EXEC event
175 * was not wait()'ed, most probably debugger targets the old
176 * leader which was destroyed in de_thread().
181 /* Ensure that nothing can wake it up, even SIGKILL */
182 static bool ptrace_freeze_traced(struct task_struct *task)
186 /* Lockless, nobody but us can set this flag */
187 if (task->jobctl & JOBCTL_LISTENING)
190 spin_lock_irq(&task->sighand->siglock);
191 if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
192 !__fatal_signal_pending(task)) {
193 task->state = __TASK_TRACED;
196 spin_unlock_irq(&task->sighand->siglock);
201 static void ptrace_unfreeze_traced(struct task_struct *task)
203 if (task->state != __TASK_TRACED)
206 WARN_ON(!task->ptrace || task->parent != current);
209 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
210 * Recheck state under the lock to close this race.
212 spin_lock_irq(&task->sighand->siglock);
213 if (task->state == __TASK_TRACED) {
214 if (__fatal_signal_pending(task))
215 wake_up_state(task, __TASK_TRACED);
217 task->state = TASK_TRACED;
219 spin_unlock_irq(&task->sighand->siglock);
223 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
224 * @child: ptracee to check for
225 * @ignore_state: don't check whether @child is currently %TASK_TRACED
227 * Check whether @child is being ptraced by %current and ready for further
228 * ptrace operations. If @ignore_state is %false, @child also should be in
229 * %TASK_TRACED state and on return the child is guaranteed to be traced
230 * and not executing. If @ignore_state is %true, @child can be in any
234 * Grabs and releases tasklist_lock and @child->sighand->siglock.
237 * 0 on success, -ESRCH if %child is not ready.
239 static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
244 * We take the read lock around doing both checks to close a
245 * possible race where someone else was tracing our child and
246 * detached between these two checks. After this locked check,
247 * we are sure that this is our traced child and that can only
248 * be changed by us so it's not changing right after this.
250 read_lock(&tasklist_lock);
251 if (child->ptrace && child->parent == current) {
252 WARN_ON(child->state == __TASK_TRACED);
254 * child->sighand can't be NULL, release_task()
255 * does ptrace_unlink() before __exit_signal().
257 if (ignore_state || ptrace_freeze_traced(child))
260 read_unlock(&tasklist_lock);
262 if (!ret && !ignore_state) {
263 if (!wait_task_inactive(child, __TASK_TRACED)) {
265 * This can only happen if may_ptrace_stop() fails and
266 * ptrace_stop() changes ->state back to TASK_RUNNING,
267 * so we should not worry about leaking __TASK_TRACED.
269 WARN_ON(child->state == __TASK_TRACED);
277 static bool ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
279 if (mode & PTRACE_MODE_NOAUDIT)
280 return ns_capable_noaudit(ns, CAP_SYS_PTRACE);
281 return ns_capable(ns, CAP_SYS_PTRACE);
284 /* Returns 0 on success, -errno on denial. */
285 static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
287 const struct cred *cred = current_cred(), *tcred;
288 struct mm_struct *mm;
292 if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
293 WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
297 /* May we inspect the given task?
298 * This check is used both for attaching with ptrace
299 * and for allowing access to sensitive information in /proc.
301 * ptrace_attach denies several cases that /proc allows
302 * because setting up the necessary parent/child relationship
303 * or halting the specified task is impossible.
306 /* Don't let security modules deny introspection */
307 if (same_thread_group(task, current))
310 if (mode & PTRACE_MODE_FSCREDS) {
311 caller_uid = cred->fsuid;
312 caller_gid = cred->fsgid;
315 * Using the euid would make more sense here, but something
316 * in userland might rely on the old behavior, and this
317 * shouldn't be a security problem since
318 * PTRACE_MODE_REALCREDS implies that the caller explicitly
319 * used a syscall that requests access to another process
320 * (and not a filesystem syscall to procfs).
322 caller_uid = cred->uid;
323 caller_gid = cred->gid;
325 tcred = __task_cred(task);
326 if (uid_eq(caller_uid, tcred->euid) &&
327 uid_eq(caller_uid, tcred->suid) &&
328 uid_eq(caller_uid, tcred->uid) &&
329 gid_eq(caller_gid, tcred->egid) &&
330 gid_eq(caller_gid, tcred->sgid) &&
331 gid_eq(caller_gid, tcred->gid))
333 if (ptrace_has_cap(tcred->user_ns, mode))
340 * If a task drops privileges and becomes nondumpable (through a syscall
341 * like setresuid()) while we are trying to access it, we must ensure
342 * that the dumpability is read after the credentials; otherwise,
343 * we may be able to attach to a task that we shouldn't be able to
344 * attach to (as if the task had dropped privileges without becoming
346 * Pairs with a write barrier in commit_creds().
351 ((get_dumpable(mm) != SUID_DUMP_USER) &&
352 !ptrace_has_cap(mm->user_ns, mode)))
355 return security_ptrace_access_check(task, mode);
358 bool ptrace_may_access(struct task_struct *task, unsigned int mode)
362 err = __ptrace_may_access(task, mode);
367 static int check_ptrace_options(unsigned long data)
369 if (data & ~(unsigned long)PTRACE_O_MASK)
372 if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
373 if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
374 !IS_ENABLED(CONFIG_SECCOMP))
377 if (!capable(CAP_SYS_ADMIN))
380 if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED ||
381 current->ptrace & PT_SUSPEND_SECCOMP)
387 static int ptrace_attach(struct task_struct *task, long request,
391 bool seize = (request == PTRACE_SEIZE);
399 * This duplicates the check in check_ptrace_options() because
400 * ptrace_attach() and ptrace_setoptions() have historically
401 * used different error codes for unknown ptrace options.
403 if (flags & ~(unsigned long)PTRACE_O_MASK)
405 retval = check_ptrace_options(flags);
408 flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
416 if (unlikely(task->flags & PF_KTHREAD))
418 if (same_thread_group(task, current))
422 * Protect exec's credential calculations against our interference;
423 * SUID, SGID and LSM creds get determined differently
426 retval = -ERESTARTNOINTR;
427 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
431 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
436 write_lock_irq(&tasklist_lock);
438 if (unlikely(task->exit_state))
439 goto unlock_tasklist;
441 goto unlock_tasklist;
445 task->ptrace = flags;
447 ptrace_link(task, current);
449 /* SEIZE doesn't trap tracee on attach */
451 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
453 spin_lock(&task->sighand->siglock);
456 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
457 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
458 * will be cleared if the child completes the transition or any
459 * event which clears the group stop states happens. We'll wait
460 * for the transition to complete before returning from this
463 * This hides STOPPED -> RUNNING -> TRACED transition from the
464 * attaching thread but a different thread in the same group can
465 * still observe the transient RUNNING state. IOW, if another
466 * thread's WNOHANG wait(2) on the stopped tracee races against
467 * ATTACH, the wait(2) may fail due to the transient RUNNING.
469 * The following task_is_stopped() test is safe as both transitions
470 * in and out of STOPPED are protected by siglock.
472 if (task_is_stopped(task) &&
473 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
474 signal_wake_up_state(task, __TASK_STOPPED);
476 spin_unlock(&task->sighand->siglock);
480 write_unlock_irq(&tasklist_lock);
482 mutex_unlock(&task->signal->cred_guard_mutex);
486 * We do not bother to change retval or clear JOBCTL_TRAPPING
487 * if wait_on_bit() was interrupted by SIGKILL. The tracer will
488 * not return to user-mode, it will exit and clear this bit in
489 * __ptrace_unlink() if it wasn't already cleared by the tracee;
490 * and until then nobody can ptrace this task.
492 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
493 proc_ptrace_connector(task, PTRACE_ATTACH);
500 * ptrace_traceme -- helper for PTRACE_TRACEME
502 * Performs checks and sets PT_PTRACED.
503 * Should be used by all ptrace implementations for PTRACE_TRACEME.
505 static int ptrace_traceme(void)
509 write_lock_irq(&tasklist_lock);
510 /* Are we already being traced? */
511 if (!current->ptrace) {
512 ret = security_ptrace_traceme(current->parent);
514 * Check PF_EXITING to ensure ->real_parent has not passed
515 * exit_ptrace(). Otherwise we don't report the error but
516 * pretend ->real_parent untraces us right after return.
518 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
519 current->ptrace = PT_PTRACED;
520 ptrace_link(current, current->real_parent);
523 write_unlock_irq(&tasklist_lock);
529 * Called with irqs disabled, returns true if childs should reap themselves.
531 static int ignoring_children(struct sighand_struct *sigh)
534 spin_lock(&sigh->siglock);
535 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
536 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
537 spin_unlock(&sigh->siglock);
542 * Called with tasklist_lock held for writing.
543 * Unlink a traced task, and clean it up if it was a traced zombie.
544 * Return true if it needs to be reaped with release_task().
545 * (We can't call release_task() here because we already hold tasklist_lock.)
547 * If it's a zombie, our attachedness prevented normal parent notification
548 * or self-reaping. Do notification now if it would have happened earlier.
549 * If it should reap itself, return true.
551 * If it's our own child, there is no notification to do. But if our normal
552 * children self-reap, then this child was prevented by ptrace and we must
553 * reap it now, in that case we must also wake up sub-threads sleeping in
556 static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
562 if (p->exit_state != EXIT_ZOMBIE)
565 dead = !thread_group_leader(p);
567 if (!dead && thread_group_empty(p)) {
568 if (!same_thread_group(p->real_parent, tracer))
569 dead = do_notify_parent(p, p->exit_signal);
570 else if (ignoring_children(tracer->sighand)) {
571 __wake_up_parent(p, tracer);
575 /* Mark it as in the process of being reaped. */
577 p->exit_state = EXIT_DEAD;
581 static int ptrace_detach(struct task_struct *child, unsigned int data)
583 if (!valid_signal(data))
586 /* Architecture-specific hardware disable .. */
587 ptrace_disable(child);
589 write_lock_irq(&tasklist_lock);
591 * We rely on ptrace_freeze_traced(). It can't be killed and
592 * untraced by another thread, it can't be a zombie.
594 WARN_ON(!child->ptrace || child->exit_state);
596 * tasklist_lock avoids the race with wait_task_stopped(), see
597 * the comment in ptrace_resume().
599 child->exit_code = data;
600 __ptrace_detach(current, child);
601 write_unlock_irq(&tasklist_lock);
603 proc_ptrace_connector(child, PTRACE_DETACH);
609 * Detach all tasks we were using ptrace on. Called with tasklist held
612 void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
614 struct task_struct *p, *n;
616 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
617 if (unlikely(p->ptrace & PT_EXITKILL))
618 send_sig_info(SIGKILL, SEND_SIG_FORCED, p);
620 if (__ptrace_detach(tracer, p))
621 list_add(&p->ptrace_entry, dead);
625 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
631 int this_len, retval;
633 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
634 retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
641 if (copy_to_user(dst, buf, retval))
651 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
657 int this_len, retval;
659 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
660 if (copy_from_user(buf, src, this_len))
662 retval = ptrace_access_vm(tsk, dst, buf, this_len,
663 FOLL_FORCE | FOLL_WRITE);
677 static int ptrace_setoptions(struct task_struct *child, unsigned long data)
682 ret = check_ptrace_options(data);
686 /* Avoid intermediate state when all opts are cleared */
687 flags = child->ptrace;
688 flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
689 flags |= (data << PT_OPT_FLAG_SHIFT);
690 child->ptrace = flags;
695 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
700 if (lock_task_sighand(child, &flags)) {
702 if (likely(child->last_siginfo != NULL)) {
703 copy_siginfo(info, child->last_siginfo);
706 unlock_task_sighand(child, &flags);
711 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
716 if (lock_task_sighand(child, &flags)) {
718 if (likely(child->last_siginfo != NULL)) {
719 copy_siginfo(child->last_siginfo, info);
722 unlock_task_sighand(child, &flags);
727 static int ptrace_peek_siginfo(struct task_struct *child,
731 struct ptrace_peeksiginfo_args arg;
732 struct sigpending *pending;
736 ret = copy_from_user(&arg, (void __user *) addr,
737 sizeof(struct ptrace_peeksiginfo_args));
741 if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
742 return -EINVAL; /* unknown flags */
747 /* Ensure arg.off fits in an unsigned long */
748 if (arg.off > ULONG_MAX)
751 if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
752 pending = &child->signal->shared_pending;
754 pending = &child->pending;
756 for (i = 0; i < arg.nr; ) {
758 unsigned long off = arg.off + i;
761 spin_lock_irq(&child->sighand->siglock);
762 list_for_each_entry(q, &pending->list, list) {
765 copy_siginfo(&info, &q->info);
769 spin_unlock_irq(&child->sighand->siglock);
771 if (!found) /* beyond the end of the list */
775 if (unlikely(in_compat_syscall())) {
776 compat_siginfo_t __user *uinfo = compat_ptr(data);
778 if (copy_siginfo_to_user32(uinfo, &info)) {
786 siginfo_t __user *uinfo = (siginfo_t __user *) data;
788 if (copy_siginfo_to_user(uinfo, &info)) {
794 data += sizeof(siginfo_t);
797 if (signal_pending(current))
809 #ifdef PTRACE_SINGLESTEP
810 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
812 #define is_singlestep(request) 0
815 #ifdef PTRACE_SINGLEBLOCK
816 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
818 #define is_singleblock(request) 0
822 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
824 #define is_sysemu_singlestep(request) 0
827 static int ptrace_resume(struct task_struct *child, long request,
832 if (!valid_signal(data))
835 if (request == PTRACE_SYSCALL)
836 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
838 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
840 #ifdef TIF_SYSCALL_EMU
841 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
842 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
844 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
847 if (is_singleblock(request)) {
848 if (unlikely(!arch_has_block_step()))
850 user_enable_block_step(child);
851 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
852 if (unlikely(!arch_has_single_step()))
854 user_enable_single_step(child);
856 user_disable_single_step(child);
860 * Change ->exit_code and ->state under siglock to avoid the race
861 * with wait_task_stopped() in between; a non-zero ->exit_code will
862 * wrongly look like another report from tracee.
864 * Note that we need siglock even if ->exit_code == data and/or this
865 * status was not reported yet, the new status must not be cleared by
866 * wait_task_stopped() after resume.
868 * If data == 0 we do not care if wait_task_stopped() reports the old
869 * status and clears the code too; this can't race with the tracee, it
870 * takes siglock after resume.
872 need_siglock = data && !thread_group_empty(current);
874 spin_lock_irq(&child->sighand->siglock);
875 child->exit_code = data;
876 wake_up_state(child, __TASK_TRACED);
878 spin_unlock_irq(&child->sighand->siglock);
883 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
885 static const struct user_regset *
886 find_regset(const struct user_regset_view *view, unsigned int type)
888 const struct user_regset *regset;
891 for (n = 0; n < view->n; ++n) {
892 regset = view->regsets + n;
893 if (regset->core_note_type == type)
900 static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
903 const struct user_regset_view *view = task_user_regset_view(task);
904 const struct user_regset *regset = find_regset(view, type);
907 if (!regset || (kiov->iov_len % regset->size) != 0)
910 regset_no = regset - view->regsets;
911 kiov->iov_len = min(kiov->iov_len,
912 (__kernel_size_t) (regset->n * regset->size));
914 if (req == PTRACE_GETREGSET)
915 return copy_regset_to_user(task, view, regset_no, 0,
916 kiov->iov_len, kiov->iov_base);
918 return copy_regset_from_user(task, view, regset_no, 0,
919 kiov->iov_len, kiov->iov_base);
923 * This is declared in linux/regset.h and defined in machine-dependent
924 * code. We put the export here, near the primary machine-neutral use,
925 * to ensure no machine forgets it.
927 EXPORT_SYMBOL_GPL(task_user_regset_view);
930 int ptrace_request(struct task_struct *child, long request,
931 unsigned long addr, unsigned long data)
933 bool seized = child->ptrace & PT_SEIZED;
935 siginfo_t siginfo, *si;
936 void __user *datavp = (void __user *) data;
937 unsigned long __user *datalp = datavp;
941 case PTRACE_PEEKTEXT:
942 case PTRACE_PEEKDATA:
943 return generic_ptrace_peekdata(child, addr, data);
944 case PTRACE_POKETEXT:
945 case PTRACE_POKEDATA:
946 return generic_ptrace_pokedata(child, addr, data);
948 #ifdef PTRACE_OLDSETOPTIONS
949 case PTRACE_OLDSETOPTIONS:
951 case PTRACE_SETOPTIONS:
952 ret = ptrace_setoptions(child, data);
954 case PTRACE_GETEVENTMSG:
955 ret = put_user(child->ptrace_message, datalp);
958 case PTRACE_PEEKSIGINFO:
959 ret = ptrace_peek_siginfo(child, addr, data);
962 case PTRACE_GETSIGINFO:
963 ret = ptrace_getsiginfo(child, &siginfo);
965 ret = copy_siginfo_to_user(datavp, &siginfo);
968 case PTRACE_SETSIGINFO:
969 if (copy_from_user(&siginfo, datavp, sizeof siginfo))
972 ret = ptrace_setsiginfo(child, &siginfo);
975 case PTRACE_GETSIGMASK: {
978 if (addr != sizeof(sigset_t)) {
983 if (test_tsk_restore_sigmask(child))
984 mask = &child->saved_sigmask;
986 mask = &child->blocked;
988 if (copy_to_user(datavp, mask, sizeof(sigset_t)))
996 case PTRACE_SETSIGMASK: {
999 if (addr != sizeof(sigset_t)) {
1004 if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
1009 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1012 * Every thread does recalc_sigpending() after resume, so
1013 * retarget_shared_pending() and recalc_sigpending() are not
1016 spin_lock_irq(&child->sighand->siglock);
1017 child->blocked = new_set;
1018 spin_unlock_irq(&child->sighand->siglock);
1020 clear_tsk_restore_sigmask(child);
1026 case PTRACE_INTERRUPT:
1028 * Stop tracee without any side-effect on signal or job
1029 * control. At least one trap is guaranteed to happen
1030 * after this request. If @child is already trapped, the
1031 * current trap is not disturbed and another trap will
1032 * happen after the current trap is ended with PTRACE_CONT.
1034 * The actual trap might not be PTRACE_EVENT_STOP trap but
1035 * the pending condition is cleared regardless.
1037 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1041 * INTERRUPT doesn't disturb existing trap sans one
1042 * exception. If ptracer issued LISTEN for the current
1043 * STOP, this INTERRUPT should clear LISTEN and re-trap
1046 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
1047 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
1049 unlock_task_sighand(child, &flags);
1055 * Listen for events. Tracee must be in STOP. It's not
1056 * resumed per-se but is not considered to be in TRACED by
1057 * wait(2) or ptrace(2). If an async event (e.g. group
1058 * stop state change) happens, tracee will enter STOP trap
1059 * again. Alternatively, ptracer can issue INTERRUPT to
1060 * finish listening and re-trap tracee into STOP.
1062 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1065 si = child->last_siginfo;
1066 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
1067 child->jobctl |= JOBCTL_LISTENING;
1069 * If NOTIFY is set, it means event happened between
1070 * start of this trap and now. Trigger re-trap.
1072 if (child->jobctl & JOBCTL_TRAP_NOTIFY)
1073 ptrace_signal_wake_up(child, true);
1076 unlock_task_sighand(child, &flags);
1079 case PTRACE_DETACH: /* detach a process that was attached. */
1080 ret = ptrace_detach(child, data);
1083 #ifdef CONFIG_BINFMT_ELF_FDPIC
1084 case PTRACE_GETFDPIC: {
1085 struct mm_struct *mm = get_task_mm(child);
1086 unsigned long tmp = 0;
1093 case PTRACE_GETFDPIC_EXEC:
1094 tmp = mm->context.exec_fdpic_loadmap;
1096 case PTRACE_GETFDPIC_INTERP:
1097 tmp = mm->context.interp_fdpic_loadmap;
1104 ret = put_user(tmp, datalp);
1109 #ifdef PTRACE_SINGLESTEP
1110 case PTRACE_SINGLESTEP:
1112 #ifdef PTRACE_SINGLEBLOCK
1113 case PTRACE_SINGLEBLOCK:
1115 #ifdef PTRACE_SYSEMU
1117 case PTRACE_SYSEMU_SINGLESTEP:
1119 case PTRACE_SYSCALL:
1121 return ptrace_resume(child, request, data);
1124 if (child->exit_state) /* already dead */
1126 return ptrace_resume(child, request, SIGKILL);
1128 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1129 case PTRACE_GETREGSET:
1130 case PTRACE_SETREGSET: {
1132 struct iovec __user *uiov = datavp;
1134 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1137 if (__get_user(kiov.iov_base, &uiov->iov_base) ||
1138 __get_user(kiov.iov_len, &uiov->iov_len))
1141 ret = ptrace_regset(child, request, addr, &kiov);
1143 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1148 case PTRACE_SECCOMP_GET_FILTER:
1149 ret = seccomp_get_filter(child, addr, datavp);
1152 case PTRACE_SECCOMP_GET_METADATA:
1153 ret = seccomp_get_metadata(child, addr, datavp);
1163 #ifndef arch_ptrace_attach
1164 #define arch_ptrace_attach(child) do { } while (0)
1167 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1168 unsigned long, data)
1170 struct task_struct *child;
1173 if (request == PTRACE_TRACEME) {
1174 ret = ptrace_traceme();
1176 arch_ptrace_attach(current);
1180 child = find_get_task_by_vpid(pid);
1186 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1187 ret = ptrace_attach(child, request, addr, data);
1189 * Some architectures need to do book-keeping after
1193 arch_ptrace_attach(child);
1194 goto out_put_task_struct;
1197 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1198 request == PTRACE_INTERRUPT);
1200 goto out_put_task_struct;
1202 ret = arch_ptrace(child, request, addr, data);
1203 if (ret || request != PTRACE_DETACH)
1204 ptrace_unfreeze_traced(child);
1206 out_put_task_struct:
1207 put_task_struct(child);
1212 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1218 copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
1219 if (copied != sizeof(tmp))
1221 return put_user(tmp, (unsigned long __user *)data);
1224 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1229 copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
1230 FOLL_FORCE | FOLL_WRITE);
1231 return (copied == sizeof(data)) ? 0 : -EIO;
1234 #if defined CONFIG_COMPAT
1236 int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1237 compat_ulong_t addr, compat_ulong_t data)
1239 compat_ulong_t __user *datap = compat_ptr(data);
1240 compat_ulong_t word;
1245 case PTRACE_PEEKTEXT:
1246 case PTRACE_PEEKDATA:
1247 ret = ptrace_access_vm(child, addr, &word, sizeof(word),
1249 if (ret != sizeof(word))
1252 ret = put_user(word, datap);
1255 case PTRACE_POKETEXT:
1256 case PTRACE_POKEDATA:
1257 ret = ptrace_access_vm(child, addr, &data, sizeof(data),
1258 FOLL_FORCE | FOLL_WRITE);
1259 ret = (ret != sizeof(data) ? -EIO : 0);
1262 case PTRACE_GETEVENTMSG:
1263 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1266 case PTRACE_GETSIGINFO:
1267 ret = ptrace_getsiginfo(child, &siginfo);
1269 ret = copy_siginfo_to_user32(
1270 (struct compat_siginfo __user *) datap,
1274 case PTRACE_SETSIGINFO:
1275 if (copy_siginfo_from_user32(
1276 &siginfo, (struct compat_siginfo __user *) datap))
1279 ret = ptrace_setsiginfo(child, &siginfo);
1281 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1282 case PTRACE_GETREGSET:
1283 case PTRACE_SETREGSET:
1286 struct compat_iovec __user *uiov =
1287 (struct compat_iovec __user *) datap;
1291 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1294 if (__get_user(ptr, &uiov->iov_base) ||
1295 __get_user(len, &uiov->iov_len))
1298 kiov.iov_base = compat_ptr(ptr);
1301 ret = ptrace_regset(child, request, addr, &kiov);
1303 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1309 ret = ptrace_request(child, request, addr, data);
1315 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
1316 compat_long_t, addr, compat_long_t, data)
1318 struct task_struct *child;
1321 if (request == PTRACE_TRACEME) {
1322 ret = ptrace_traceme();
1326 child = find_get_task_by_vpid(pid);
1332 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1333 ret = ptrace_attach(child, request, addr, data);
1335 * Some architectures need to do book-keeping after
1339 arch_ptrace_attach(child);
1340 goto out_put_task_struct;
1343 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1344 request == PTRACE_INTERRUPT);
1346 ret = compat_arch_ptrace(child, request, addr, data);
1347 if (ret || request != PTRACE_DETACH)
1348 ptrace_unfreeze_traced(child);
1351 out_put_task_struct:
1352 put_task_struct(child);
1356 #endif /* CONFIG_COMPAT */