1 // SPDX-License-Identifier: GPL-2.0
3 * linux/kernel/seccomp.c
5 * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
7 * Copyright (C) 2012 Google, Inc.
8 * Will Drewry <wad@chromium.org>
10 * This defines a simple but solid secure-computing facility.
12 * Mode 1 uses a fixed list of allowed system calls.
13 * Mode 2 allows user-defined system call filters in the form
14 * of Berkeley Packet Filters/Linux Socket Filters.
17 #include <linux/refcount.h>
18 #include <linux/audit.h>
19 #include <linux/compat.h>
20 #include <linux/coredump.h>
21 #include <linux/kmemleak.h>
22 #include <linux/nospec.h>
23 #include <linux/prctl.h>
24 #include <linux/sched.h>
25 #include <linux/sched/task_stack.h>
26 #include <linux/seccomp.h>
27 #include <linux/slab.h>
28 #include <linux/syscalls.h>
29 #include <linux/sysctl.h>
31 /* Not exposed in headers: strictly internal use only. */
32 #define SECCOMP_MODE_DEAD (SECCOMP_MODE_FILTER + 1)
34 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
35 #include <asm/syscall.h>
38 #ifdef CONFIG_SECCOMP_FILTER
39 #include <linux/filter.h>
40 #include <linux/pid.h>
41 #include <linux/ptrace.h>
42 #include <linux/security.h>
43 #include <linux/tracehook.h>
44 #include <linux/uaccess.h>
47 * struct seccomp_filter - container for seccomp BPF programs
49 * @usage: reference count to manage the object lifetime.
50 * get/put helpers should be used when accessing an instance
51 * outside of a lifetime-guarded section. In general, this
52 * is only needed for handling filters shared across tasks.
53 * @log: true if all actions except for SECCOMP_RET_ALLOW should be logged
54 * @prev: points to a previously installed, or inherited, filter
55 * @prog: the BPF program to evaluate
57 * seccomp_filter objects are organized in a tree linked via the @prev
58 * pointer. For any task, it appears to be a singly-linked list starting
59 * with current->seccomp.filter, the most recently attached or inherited filter.
60 * However, multiple filters may share a @prev node, by way of fork(), which
61 * results in a unidirectional tree existing in memory. This is similar to
62 * how namespaces work.
64 * seccomp_filter objects should never be modified after being attached
65 * to a task_struct (other than @usage).
67 struct seccomp_filter {
70 struct seccomp_filter *prev;
71 struct bpf_prog *prog;
74 /* Limit any path through the tree to 256KB worth of instructions. */
75 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
78 * Endianness is explicitly ignored and left for BPF program authors to manage
79 * as per the specific architecture.
81 static void populate_seccomp_data(struct seccomp_data *sd)
83 struct task_struct *task = current;
84 struct pt_regs *regs = task_pt_regs(task);
85 unsigned long args[6];
87 sd->nr = syscall_get_nr(task, regs);
88 sd->arch = syscall_get_arch();
89 syscall_get_arguments(task, regs, 0, 6, args);
90 sd->args[0] = args[0];
91 sd->args[1] = args[1];
92 sd->args[2] = args[2];
93 sd->args[3] = args[3];
94 sd->args[4] = args[4];
95 sd->args[5] = args[5];
96 sd->instruction_pointer = KSTK_EIP(task);
100 * seccomp_check_filter - verify seccomp filter code
101 * @filter: filter to verify
102 * @flen: length of filter
104 * Takes a previously checked filter (by bpf_check_classic) and
105 * redirects all filter code that loads struct sk_buff data
106 * and related data through seccomp_bpf_load. It also
107 * enforces length and alignment checking of those loads.
109 * Returns 0 if the rule set is legal or -EINVAL if not.
111 static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
114 for (pc = 0; pc < flen; pc++) {
115 struct sock_filter *ftest = &filter[pc];
116 u16 code = ftest->code;
120 case BPF_LD | BPF_W | BPF_ABS:
121 ftest->code = BPF_LDX | BPF_W | BPF_ABS;
122 /* 32-bit aligned and not out of bounds. */
123 if (k >= sizeof(struct seccomp_data) || k & 3)
126 case BPF_LD | BPF_W | BPF_LEN:
127 ftest->code = BPF_LD | BPF_IMM;
128 ftest->k = sizeof(struct seccomp_data);
130 case BPF_LDX | BPF_W | BPF_LEN:
131 ftest->code = BPF_LDX | BPF_IMM;
132 ftest->k = sizeof(struct seccomp_data);
134 /* Explicitly include allowed calls. */
135 case BPF_RET | BPF_K:
136 case BPF_RET | BPF_A:
137 case BPF_ALU | BPF_ADD | BPF_K:
138 case BPF_ALU | BPF_ADD | BPF_X:
139 case BPF_ALU | BPF_SUB | BPF_K:
140 case BPF_ALU | BPF_SUB | BPF_X:
141 case BPF_ALU | BPF_MUL | BPF_K:
142 case BPF_ALU | BPF_MUL | BPF_X:
143 case BPF_ALU | BPF_DIV | BPF_K:
144 case BPF_ALU | BPF_DIV | BPF_X:
145 case BPF_ALU | BPF_AND | BPF_K:
146 case BPF_ALU | BPF_AND | BPF_X:
147 case BPF_ALU | BPF_OR | BPF_K:
148 case BPF_ALU | BPF_OR | BPF_X:
149 case BPF_ALU | BPF_XOR | BPF_K:
150 case BPF_ALU | BPF_XOR | BPF_X:
151 case BPF_ALU | BPF_LSH | BPF_K:
152 case BPF_ALU | BPF_LSH | BPF_X:
153 case BPF_ALU | BPF_RSH | BPF_K:
154 case BPF_ALU | BPF_RSH | BPF_X:
155 case BPF_ALU | BPF_NEG:
156 case BPF_LD | BPF_IMM:
157 case BPF_LDX | BPF_IMM:
158 case BPF_MISC | BPF_TAX:
159 case BPF_MISC | BPF_TXA:
160 case BPF_LD | BPF_MEM:
161 case BPF_LDX | BPF_MEM:
164 case BPF_JMP | BPF_JA:
165 case BPF_JMP | BPF_JEQ | BPF_K:
166 case BPF_JMP | BPF_JEQ | BPF_X:
167 case BPF_JMP | BPF_JGE | BPF_K:
168 case BPF_JMP | BPF_JGE | BPF_X:
169 case BPF_JMP | BPF_JGT | BPF_K:
170 case BPF_JMP | BPF_JGT | BPF_X:
171 case BPF_JMP | BPF_JSET | BPF_K:
172 case BPF_JMP | BPF_JSET | BPF_X:
182 * seccomp_run_filters - evaluates all seccomp filters against @sd
183 * @sd: optional seccomp data to be passed to filters
184 * @match: stores struct seccomp_filter that resulted in the return value,
185 * unless filter returned SECCOMP_RET_ALLOW, in which case it will
188 * Returns valid seccomp BPF response codes.
190 #define ACTION_ONLY(ret) ((s32)((ret) & (SECCOMP_RET_ACTION_FULL)))
191 static u32 seccomp_run_filters(const struct seccomp_data *sd,
192 struct seccomp_filter **match)
194 struct seccomp_data sd_local;
195 u32 ret = SECCOMP_RET_ALLOW;
196 /* Make sure cross-thread synced filter points somewhere sane. */
197 struct seccomp_filter *f =
198 READ_ONCE(current->seccomp.filter);
200 /* Ensure unexpected behavior doesn't result in failing open. */
201 if (unlikely(WARN_ON(f == NULL)))
202 return SECCOMP_RET_KILL_PROCESS;
205 populate_seccomp_data(&sd_local);
210 * All filters in the list are evaluated and the lowest BPF return
211 * value always takes priority (ignoring the DATA).
213 for (; f; f = f->prev) {
214 u32 cur_ret = BPF_PROG_RUN(f->prog, sd);
216 if (ACTION_ONLY(cur_ret) < ACTION_ONLY(ret)) {
223 #endif /* CONFIG_SECCOMP_FILTER */
225 static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
227 assert_spin_locked(¤t->sighand->siglock);
229 if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
235 void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
237 static inline void seccomp_assign_mode(struct task_struct *task,
238 unsigned long seccomp_mode,
241 assert_spin_locked(&task->sighand->siglock);
243 task->seccomp.mode = seccomp_mode;
245 * Make sure TIF_SECCOMP cannot be set before the mode (and
248 smp_mb__before_atomic();
249 /* Assume default seccomp processes want spec flaw mitigation. */
250 if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
251 arch_seccomp_spec_mitigate(task);
252 set_tsk_thread_flag(task, TIF_SECCOMP);
255 #ifdef CONFIG_SECCOMP_FILTER
256 /* Returns 1 if the parent is an ancestor of the child. */
257 static int is_ancestor(struct seccomp_filter *parent,
258 struct seccomp_filter *child)
260 /* NULL is the root ancestor. */
263 for (; child; child = child->prev)
270 * seccomp_can_sync_threads: checks if all threads can be synchronized
272 * Expects sighand and cred_guard_mutex locks to be held.
274 * Returns 0 on success, -ve on error, or the pid of a thread which was
275 * either not in the correct seccomp mode or it did not have an ancestral
278 static inline pid_t seccomp_can_sync_threads(void)
280 struct task_struct *thread, *caller;
282 BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex));
283 assert_spin_locked(¤t->sighand->siglock);
285 /* Validate all threads being eligible for synchronization. */
287 for_each_thread(caller, thread) {
290 /* Skip current, since it is initiating the sync. */
291 if (thread == caller)
294 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
295 (thread->seccomp.mode == SECCOMP_MODE_FILTER &&
296 is_ancestor(thread->seccomp.filter,
297 caller->seccomp.filter)))
300 /* Return the first thread that cannot be synchronized. */
301 failed = task_pid_vnr(thread);
302 /* If the pid cannot be resolved, then return -ESRCH */
303 if (unlikely(WARN_ON(failed == 0)))
312 * seccomp_sync_threads: sets all threads to use current's filter
314 * Expects sighand and cred_guard_mutex locks to be held, and for
315 * seccomp_can_sync_threads() to have returned success already
316 * without dropping the locks.
319 static inline void seccomp_sync_threads(unsigned long flags)
321 struct task_struct *thread, *caller;
323 BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex));
324 assert_spin_locked(¤t->sighand->siglock);
326 /* Synchronize all threads. */
328 for_each_thread(caller, thread) {
329 /* Skip current, since it needs no changes. */
330 if (thread == caller)
333 /* Get a task reference for the new leaf node. */
334 get_seccomp_filter(caller);
336 * Drop the task reference to the shared ancestor since
337 * current's path will hold a reference. (This also
338 * allows a put before the assignment.)
340 put_seccomp_filter(thread);
341 smp_store_release(&thread->seccomp.filter,
342 caller->seccomp.filter);
345 * Don't let an unprivileged task work around
346 * the no_new_privs restriction by creating
347 * a thread that sets it up, enters seccomp,
350 if (task_no_new_privs(caller))
351 task_set_no_new_privs(thread);
354 * Opt the other thread into seccomp if needed.
355 * As threads are considered to be trust-realm
356 * equivalent (see ptrace_may_access), it is safe to
357 * allow one thread to transition the other.
359 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
360 seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
366 * seccomp_prepare_filter: Prepares a seccomp filter for use.
367 * @fprog: BPF program to install
369 * Returns filter on success or an ERR_PTR on failure.
371 static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
373 struct seccomp_filter *sfilter;
375 const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE);
377 if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
378 return ERR_PTR(-EINVAL);
380 BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
383 * Installing a seccomp filter requires that the task has
384 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
385 * This avoids scenarios where unprivileged tasks can affect the
386 * behavior of privileged children.
388 if (!task_no_new_privs(current) &&
389 security_capable_noaudit(current_cred(), current_user_ns(),
391 return ERR_PTR(-EACCES);
393 /* Allocate a new seccomp_filter */
394 sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN);
396 return ERR_PTR(-ENOMEM);
398 ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
399 seccomp_check_filter, save_orig);
405 refcount_set(&sfilter->usage, 1);
411 * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
412 * @user_filter: pointer to the user data containing a sock_fprog.
414 * Returns 0 on success and non-zero otherwise.
416 static struct seccomp_filter *
417 seccomp_prepare_user_filter(const char __user *user_filter)
419 struct sock_fprog fprog;
420 struct seccomp_filter *filter = ERR_PTR(-EFAULT);
423 if (in_compat_syscall()) {
424 struct compat_sock_fprog fprog32;
425 if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
427 fprog.len = fprog32.len;
428 fprog.filter = compat_ptr(fprog32.filter);
429 } else /* falls through to the if below. */
431 if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
433 filter = seccomp_prepare_filter(&fprog);
439 * seccomp_attach_filter: validate and attach filter
440 * @flags: flags to change filter behavior
441 * @filter: seccomp filter to add to the current process
443 * Caller must be holding current->sighand->siglock lock.
445 * Returns 0 on success, -ve on error.
447 static long seccomp_attach_filter(unsigned int flags,
448 struct seccomp_filter *filter)
450 unsigned long total_insns;
451 struct seccomp_filter *walker;
453 assert_spin_locked(¤t->sighand->siglock);
455 /* Validate resulting filter length. */
456 total_insns = filter->prog->len;
457 for (walker = current->seccomp.filter; walker; walker = walker->prev)
458 total_insns += walker->prog->len + 4; /* 4 instr penalty */
459 if (total_insns > MAX_INSNS_PER_PATH)
462 /* If thread sync has been requested, check that it is possible. */
463 if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
466 ret = seccomp_can_sync_threads();
471 /* Set log flag, if present. */
472 if (flags & SECCOMP_FILTER_FLAG_LOG)
476 * If there is an existing filter, make it the prev and don't drop its
479 filter->prev = current->seccomp.filter;
480 current->seccomp.filter = filter;
482 /* Now that the new filter is in place, synchronize to all threads. */
483 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
484 seccomp_sync_threads(flags);
489 static void __get_seccomp_filter(struct seccomp_filter *filter)
491 /* Reference count is bounded by the number of total processes. */
492 refcount_inc(&filter->usage);
495 /* get_seccomp_filter - increments the reference count of the filter on @tsk */
496 void get_seccomp_filter(struct task_struct *tsk)
498 struct seccomp_filter *orig = tsk->seccomp.filter;
501 __get_seccomp_filter(orig);
504 static inline void seccomp_filter_free(struct seccomp_filter *filter)
507 bpf_prog_destroy(filter->prog);
512 static void __put_seccomp_filter(struct seccomp_filter *orig)
514 /* Clean up single-reference branches iteratively. */
515 while (orig && refcount_dec_and_test(&orig->usage)) {
516 struct seccomp_filter *freeme = orig;
518 seccomp_filter_free(freeme);
522 /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
523 void put_seccomp_filter(struct task_struct *tsk)
525 __put_seccomp_filter(tsk->seccomp.filter);
528 static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason)
530 memset(info, 0, sizeof(*info));
531 info->si_signo = SIGSYS;
532 info->si_code = SYS_SECCOMP;
533 info->si_call_addr = (void __user *)KSTK_EIP(current);
534 info->si_errno = reason;
535 info->si_arch = syscall_get_arch();
536 info->si_syscall = syscall;
540 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
541 * @syscall: syscall number to send to userland
542 * @reason: filter-supplied reason code to send to userland (via si_errno)
544 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
546 static void seccomp_send_sigsys(int syscall, int reason)
549 seccomp_init_siginfo(&info, syscall, reason);
550 force_sig_info(SIGSYS, &info, current);
552 #endif /* CONFIG_SECCOMP_FILTER */
554 /* For use with seccomp_actions_logged */
555 #define SECCOMP_LOG_KILL_PROCESS (1 << 0)
556 #define SECCOMP_LOG_KILL_THREAD (1 << 1)
557 #define SECCOMP_LOG_TRAP (1 << 2)
558 #define SECCOMP_LOG_ERRNO (1 << 3)
559 #define SECCOMP_LOG_TRACE (1 << 4)
560 #define SECCOMP_LOG_LOG (1 << 5)
561 #define SECCOMP_LOG_ALLOW (1 << 6)
563 static u32 seccomp_actions_logged = SECCOMP_LOG_KILL_PROCESS |
564 SECCOMP_LOG_KILL_THREAD |
570 static inline void seccomp_log(unsigned long syscall, long signr, u32 action,
576 case SECCOMP_RET_ALLOW:
578 case SECCOMP_RET_TRAP:
579 log = requested && seccomp_actions_logged & SECCOMP_LOG_TRAP;
581 case SECCOMP_RET_ERRNO:
582 log = requested && seccomp_actions_logged & SECCOMP_LOG_ERRNO;
584 case SECCOMP_RET_TRACE:
585 log = requested && seccomp_actions_logged & SECCOMP_LOG_TRACE;
587 case SECCOMP_RET_LOG:
588 log = seccomp_actions_logged & SECCOMP_LOG_LOG;
590 case SECCOMP_RET_KILL_THREAD:
591 log = seccomp_actions_logged & SECCOMP_LOG_KILL_THREAD;
593 case SECCOMP_RET_KILL_PROCESS:
595 log = seccomp_actions_logged & SECCOMP_LOG_KILL_PROCESS;
599 * Force an audit message to be emitted when the action is RET_KILL_*,
600 * RET_LOG, or the FILTER_FLAG_LOG bit was set and the action is
601 * allowed to be logged by the admin.
604 return __audit_seccomp(syscall, signr, action);
607 * Let the audit subsystem decide if the action should be audited based
608 * on whether the current task itself is being audited.
610 return audit_seccomp(syscall, signr, action);
614 * Secure computing mode 1 allows only read/write/exit/sigreturn.
615 * To be fully secure this must be combined with rlimit
616 * to limit the stack allocations too.
618 static const int mode1_syscalls[] = {
619 __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
620 0, /* null terminated */
623 static void __secure_computing_strict(int this_syscall)
625 const int *syscall_whitelist = mode1_syscalls;
627 if (in_compat_syscall())
628 syscall_whitelist = get_compat_mode1_syscalls();
631 if (*syscall_whitelist == this_syscall)
633 } while (*++syscall_whitelist);
638 current->seccomp.mode = SECCOMP_MODE_DEAD;
639 seccomp_log(this_syscall, SIGKILL, SECCOMP_RET_KILL_THREAD, true);
643 #ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
644 void secure_computing_strict(int this_syscall)
646 int mode = current->seccomp.mode;
648 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
649 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
652 if (mode == SECCOMP_MODE_DISABLED)
654 else if (mode == SECCOMP_MODE_STRICT)
655 __secure_computing_strict(this_syscall);
661 #ifdef CONFIG_SECCOMP_FILTER
662 static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
663 const bool recheck_after_trace)
665 u32 filter_ret, action;
666 struct seccomp_filter *match = NULL;
670 * Make sure that any changes to mode from another thread have
671 * been seen after TIF_SECCOMP was seen.
675 filter_ret = seccomp_run_filters(sd, &match);
676 data = filter_ret & SECCOMP_RET_DATA;
677 action = filter_ret & SECCOMP_RET_ACTION_FULL;
680 case SECCOMP_RET_ERRNO:
681 /* Set low-order bits as an errno, capped at MAX_ERRNO. */
682 if (data > MAX_ERRNO)
684 syscall_set_return_value(current, task_pt_regs(current),
688 case SECCOMP_RET_TRAP:
689 /* Show the handler the original registers. */
690 syscall_rollback(current, task_pt_regs(current));
691 /* Let the filter pass back 16 bits of data. */
692 seccomp_send_sigsys(this_syscall, data);
695 case SECCOMP_RET_TRACE:
696 /* We've been put in this state by the ptracer already. */
697 if (recheck_after_trace)
700 /* ENOSYS these calls if there is no tracer attached. */
701 if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
702 syscall_set_return_value(current,
703 task_pt_regs(current),
708 /* Allow the BPF to provide the event message */
709 ptrace_event(PTRACE_EVENT_SECCOMP, data);
711 * The delivery of a fatal signal during event
712 * notification may silently skip tracer notification,
713 * which could leave us with a potentially unmodified
714 * syscall that the tracer would have liked to have
715 * changed. Since the process is about to die, we just
716 * force the syscall to be skipped and let the signal
717 * kill the process and correctly handle any tracer exit
720 if (fatal_signal_pending(current))
722 /* Check if the tracer forced the syscall to be skipped. */
723 this_syscall = syscall_get_nr(current, task_pt_regs(current));
724 if (this_syscall < 0)
728 * Recheck the syscall, since it may have changed. This
729 * intentionally uses a NULL struct seccomp_data to force
730 * a reload of all registers. This does not goto skip since
731 * a skip would have already been reported.
733 if (__seccomp_filter(this_syscall, NULL, true))
738 case SECCOMP_RET_LOG:
739 seccomp_log(this_syscall, 0, action, true);
742 case SECCOMP_RET_ALLOW:
744 * Note that the "match" filter will always be NULL for
745 * this action since SECCOMP_RET_ALLOW is the starting
746 * state in seccomp_run_filters().
750 case SECCOMP_RET_KILL_THREAD:
751 case SECCOMP_RET_KILL_PROCESS:
753 current->seccomp.mode = SECCOMP_MODE_DEAD;
754 seccomp_log(this_syscall, SIGSYS, action, true);
755 /* Dump core only if this is the last remaining thread. */
756 if (action == SECCOMP_RET_KILL_PROCESS ||
757 get_nr_threads(current) == 1) {
760 /* Show the original registers in the dump. */
761 syscall_rollback(current, task_pt_regs(current));
762 /* Trigger a manual coredump since do_exit skips it. */
763 seccomp_init_siginfo(&info, this_syscall, data);
766 if (action == SECCOMP_RET_KILL_PROCESS)
767 do_group_exit(SIGSYS);
775 seccomp_log(this_syscall, 0, action, match ? match->log : false);
779 static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
780 const bool recheck_after_trace)
788 int __secure_computing(const struct seccomp_data *sd)
790 int mode = current->seccomp.mode;
793 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
794 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
797 this_syscall = sd ? sd->nr :
798 syscall_get_nr(current, task_pt_regs(current));
801 case SECCOMP_MODE_STRICT:
802 __secure_computing_strict(this_syscall); /* may call do_exit */
804 case SECCOMP_MODE_FILTER:
805 return __seccomp_filter(this_syscall, sd, false);
806 /* Surviving SECCOMP_RET_KILL_* must be proactively impossible. */
807 case SECCOMP_MODE_DEAD:
815 #endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
817 long prctl_get_seccomp(void)
819 return current->seccomp.mode;
823 * seccomp_set_mode_strict: internal function for setting strict seccomp
825 * Once current->seccomp.mode is non-zero, it may not be changed.
827 * Returns 0 on success or -EINVAL on failure.
829 static long seccomp_set_mode_strict(void)
831 const unsigned long seccomp_mode = SECCOMP_MODE_STRICT;
834 spin_lock_irq(¤t->sighand->siglock);
836 if (!seccomp_may_assign_mode(seccomp_mode))
842 seccomp_assign_mode(current, seccomp_mode, 0);
846 spin_unlock_irq(¤t->sighand->siglock);
851 #ifdef CONFIG_SECCOMP_FILTER
853 * seccomp_set_mode_filter: internal function for setting seccomp filter
854 * @flags: flags to change filter behavior
855 * @filter: struct sock_fprog containing filter
857 * This function may be called repeatedly to install additional filters.
858 * Every filter successfully installed will be evaluated (in reverse order)
859 * for each system call the task makes.
861 * Once current->seccomp.mode is non-zero, it may not be changed.
863 * Returns 0 on success or -EINVAL on failure.
865 static long seccomp_set_mode_filter(unsigned int flags,
866 const char __user *filter)
868 const unsigned long seccomp_mode = SECCOMP_MODE_FILTER;
869 struct seccomp_filter *prepared = NULL;
872 /* Validate flags. */
873 if (flags & ~SECCOMP_FILTER_FLAG_MASK)
876 /* Prepare the new filter before holding any locks. */
877 prepared = seccomp_prepare_user_filter(filter);
878 if (IS_ERR(prepared))
879 return PTR_ERR(prepared);
882 * Make sure we cannot change seccomp or nnp state via TSYNC
883 * while another thread is in the middle of calling exec.
885 if (flags & SECCOMP_FILTER_FLAG_TSYNC &&
886 mutex_lock_killable(¤t->signal->cred_guard_mutex))
889 spin_lock_irq(¤t->sighand->siglock);
891 if (!seccomp_may_assign_mode(seccomp_mode))
894 ret = seccomp_attach_filter(flags, prepared);
897 /* Do not free the successfully attached filter. */
900 seccomp_assign_mode(current, seccomp_mode, flags);
902 spin_unlock_irq(¤t->sighand->siglock);
903 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
904 mutex_unlock(¤t->signal->cred_guard_mutex);
906 seccomp_filter_free(prepared);
910 static inline long seccomp_set_mode_filter(unsigned int flags,
911 const char __user *filter)
917 static long seccomp_get_action_avail(const char __user *uaction)
921 if (copy_from_user(&action, uaction, sizeof(action)))
925 case SECCOMP_RET_KILL_PROCESS:
926 case SECCOMP_RET_KILL_THREAD:
927 case SECCOMP_RET_TRAP:
928 case SECCOMP_RET_ERRNO:
929 case SECCOMP_RET_TRACE:
930 case SECCOMP_RET_LOG:
931 case SECCOMP_RET_ALLOW:
940 /* Common entry point for both prctl and syscall. */
941 static long do_seccomp(unsigned int op, unsigned int flags,
942 const char __user *uargs)
945 case SECCOMP_SET_MODE_STRICT:
946 if (flags != 0 || uargs != NULL)
948 return seccomp_set_mode_strict();
949 case SECCOMP_SET_MODE_FILTER:
950 return seccomp_set_mode_filter(flags, uargs);
951 case SECCOMP_GET_ACTION_AVAIL:
955 return seccomp_get_action_avail(uargs);
961 SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags,
962 const char __user *, uargs)
964 return do_seccomp(op, flags, uargs);
968 * prctl_set_seccomp: configures current->seccomp.mode
969 * @seccomp_mode: requested mode to use
970 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
972 * Returns 0 on success or -EINVAL on failure.
974 long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
979 switch (seccomp_mode) {
980 case SECCOMP_MODE_STRICT:
981 op = SECCOMP_SET_MODE_STRICT;
983 * Setting strict mode through prctl always ignored filter,
984 * so make sure it is always NULL here to pass the internal
985 * check in do_seccomp().
989 case SECCOMP_MODE_FILTER:
990 op = SECCOMP_SET_MODE_FILTER;
997 /* prctl interface doesn't have flags, so they are always zero. */
998 return do_seccomp(op, 0, uargs);
1001 #if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
1002 long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
1005 struct seccomp_filter *filter;
1006 struct sock_fprog_kern *fprog;
1008 unsigned long count = 0;
1010 if (!capable(CAP_SYS_ADMIN) ||
1011 current->seccomp.mode != SECCOMP_MODE_DISABLED) {
1015 spin_lock_irq(&task->sighand->siglock);
1016 if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
1021 filter = task->seccomp.filter;
1023 filter = filter->prev;
1027 if (filter_off >= count) {
1031 count -= filter_off;
1033 filter = task->seccomp.filter;
1034 while (filter && count > 1) {
1035 filter = filter->prev;
1039 if (WARN_ON(count != 1 || !filter)) {
1040 /* The filter tree shouldn't shrink while we're using it. */
1045 fprog = filter->prog->orig_prog;
1047 /* This must be a new non-cBPF filter, since we save
1048 * every cBPF filter's orig_prog above when
1049 * CONFIG_CHECKPOINT_RESTORE is enabled.
1059 __get_seccomp_filter(filter);
1060 spin_unlock_irq(&task->sighand->siglock);
1062 if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
1065 __put_seccomp_filter(filter);
1069 spin_unlock_irq(&task->sighand->siglock);
1074 #ifdef CONFIG_SYSCTL
1076 /* Human readable action names for friendly sysctl interaction */
1077 #define SECCOMP_RET_KILL_PROCESS_NAME "kill_process"
1078 #define SECCOMP_RET_KILL_THREAD_NAME "kill_thread"
1079 #define SECCOMP_RET_TRAP_NAME "trap"
1080 #define SECCOMP_RET_ERRNO_NAME "errno"
1081 #define SECCOMP_RET_TRACE_NAME "trace"
1082 #define SECCOMP_RET_LOG_NAME "log"
1083 #define SECCOMP_RET_ALLOW_NAME "allow"
1085 static const char seccomp_actions_avail[] =
1086 SECCOMP_RET_KILL_PROCESS_NAME " "
1087 SECCOMP_RET_KILL_THREAD_NAME " "
1088 SECCOMP_RET_TRAP_NAME " "
1089 SECCOMP_RET_ERRNO_NAME " "
1090 SECCOMP_RET_TRACE_NAME " "
1091 SECCOMP_RET_LOG_NAME " "
1092 SECCOMP_RET_ALLOW_NAME;
1094 struct seccomp_log_name {
1099 static const struct seccomp_log_name seccomp_log_names[] = {
1100 { SECCOMP_LOG_KILL_PROCESS, SECCOMP_RET_KILL_PROCESS_NAME },
1101 { SECCOMP_LOG_KILL_THREAD, SECCOMP_RET_KILL_THREAD_NAME },
1102 { SECCOMP_LOG_TRAP, SECCOMP_RET_TRAP_NAME },
1103 { SECCOMP_LOG_ERRNO, SECCOMP_RET_ERRNO_NAME },
1104 { SECCOMP_LOG_TRACE, SECCOMP_RET_TRACE_NAME },
1105 { SECCOMP_LOG_LOG, SECCOMP_RET_LOG_NAME },
1106 { SECCOMP_LOG_ALLOW, SECCOMP_RET_ALLOW_NAME },
1110 static bool seccomp_names_from_actions_logged(char *names, size_t size,
1113 const struct seccomp_log_name *cur;
1114 bool append_space = false;
1116 for (cur = seccomp_log_names; cur->name && size; cur++) {
1119 if (!(actions_logged & cur->log))
1123 ret = strscpy(names, " ", size);
1130 append_space = true;
1132 ret = strscpy(names, cur->name, size);
1143 static bool seccomp_action_logged_from_name(u32 *action_logged,
1146 const struct seccomp_log_name *cur;
1148 for (cur = seccomp_log_names; cur->name; cur++) {
1149 if (!strcmp(cur->name, name)) {
1150 *action_logged = cur->log;
1158 static bool seccomp_actions_logged_from_names(u32 *actions_logged, char *names)
1162 *actions_logged = 0;
1163 while ((name = strsep(&names, " ")) && *name) {
1164 u32 action_logged = 0;
1166 if (!seccomp_action_logged_from_name(&action_logged, name))
1169 *actions_logged |= action_logged;
1175 static int seccomp_actions_logged_handler(struct ctl_table *ro_table, int write,
1176 void __user *buffer, size_t *lenp,
1179 char names[sizeof(seccomp_actions_avail)];
1180 struct ctl_table table;
1183 if (write && !capable(CAP_SYS_ADMIN))
1186 memset(names, 0, sizeof(names));
1189 if (!seccomp_names_from_actions_logged(names, sizeof(names),
1190 seccomp_actions_logged))
1196 table.maxlen = sizeof(names);
1197 ret = proc_dostring(&table, write, buffer, lenp, ppos);
1204 if (!seccomp_actions_logged_from_names(&actions_logged,
1208 if (actions_logged & SECCOMP_LOG_ALLOW)
1211 seccomp_actions_logged = actions_logged;
1217 static struct ctl_path seccomp_sysctl_path[] = {
1218 { .procname = "kernel", },
1219 { .procname = "seccomp", },
1223 static struct ctl_table seccomp_sysctl_table[] = {
1225 .procname = "actions_avail",
1226 .data = (void *) &seccomp_actions_avail,
1227 .maxlen = sizeof(seccomp_actions_avail),
1229 .proc_handler = proc_dostring,
1232 .procname = "actions_logged",
1234 .proc_handler = seccomp_actions_logged_handler,
1239 static int __init seccomp_sysctl_init(void)
1241 struct ctl_table_header *hdr;
1243 hdr = register_sysctl_paths(seccomp_sysctl_path, seccomp_sysctl_table);
1245 pr_warn("seccomp: sysctl registration failed\n");
1247 kmemleak_not_leak(hdr);
1252 device_initcall(seccomp_sysctl_init)
1254 #endif /* CONFIG_SYSCTL */