1 // SPDX-License-Identifier: GPL-2.0
3 * linux/kernel/seccomp.c
5 * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
7 * Copyright (C) 2012 Google, Inc.
8 * Will Drewry <wad@chromium.org>
10 * This defines a simple but solid secure-computing facility.
12 * Mode 1 uses a fixed list of allowed system calls.
13 * Mode 2 allows user-defined system call filters in the form
14 * of Berkeley Packet Filters/Linux Socket Filters.
17 #include <linux/refcount.h>
18 #include <linux/audit.h>
19 #include <linux/compat.h>
20 #include <linux/coredump.h>
21 #include <linux/kmemleak.h>
22 #include <linux/nospec.h>
23 #include <linux/prctl.h>
24 #include <linux/sched.h>
25 #include <linux/sched/task_stack.h>
26 #include <linux/seccomp.h>
27 #include <linux/slab.h>
28 #include <linux/syscalls.h>
29 #include <linux/sysctl.h>
31 /* Not exposed in headers: strictly internal use only. */
32 #define SECCOMP_MODE_DEAD (SECCOMP_MODE_FILTER + 1)
34 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
35 #include <asm/syscall.h>
38 #ifdef CONFIG_SECCOMP_FILTER
39 #include <linux/filter.h>
40 #include <linux/pid.h>
41 #include <linux/ptrace.h>
42 #include <linux/capability.h>
43 #include <linux/tracehook.h>
44 #include <linux/uaccess.h>
47 * struct seccomp_filter - container for seccomp BPF programs
49 * @usage: reference count to manage the object lifetime.
50 * get/put helpers should be used when accessing an instance
51 * outside of a lifetime-guarded section. In general, this
52 * is only needed for handling filters shared across tasks.
53 * @log: true if all actions except for SECCOMP_RET_ALLOW should be logged
54 * @prev: points to a previously installed, or inherited, filter
55 * @prog: the BPF program to evaluate
57 * seccomp_filter objects are organized in a tree linked via the @prev
58 * pointer. For any task, it appears to be a singly-linked list starting
59 * with current->seccomp.filter, the most recently attached or inherited filter.
60 * However, multiple filters may share a @prev node, by way of fork(), which
61 * results in a unidirectional tree existing in memory. This is similar to
62 * how namespaces work.
64 * seccomp_filter objects should never be modified after being attached
65 * to a task_struct (other than @usage).
67 struct seccomp_filter {
70 struct seccomp_filter *prev;
71 struct bpf_prog *prog;
74 /* Limit any path through the tree to 256KB worth of instructions. */
75 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
78 * Endianness is explicitly ignored and left for BPF program authors to manage
79 * as per the specific architecture.
81 static void populate_seccomp_data(struct seccomp_data *sd)
83 struct task_struct *task = current;
84 struct pt_regs *regs = task_pt_regs(task);
85 unsigned long args[6];
87 sd->nr = syscall_get_nr(task, regs);
88 sd->arch = syscall_get_arch();
89 syscall_get_arguments(task, regs, 0, 6, args);
90 sd->args[0] = args[0];
91 sd->args[1] = args[1];
92 sd->args[2] = args[2];
93 sd->args[3] = args[3];
94 sd->args[4] = args[4];
95 sd->args[5] = args[5];
96 sd->instruction_pointer = KSTK_EIP(task);
100 * seccomp_check_filter - verify seccomp filter code
101 * @filter: filter to verify
102 * @flen: length of filter
104 * Takes a previously checked filter (by bpf_check_classic) and
105 * redirects all filter code that loads struct sk_buff data
106 * and related data through seccomp_bpf_load. It also
107 * enforces length and alignment checking of those loads.
109 * Returns 0 if the rule set is legal or -EINVAL if not.
111 static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
114 for (pc = 0; pc < flen; pc++) {
115 struct sock_filter *ftest = &filter[pc];
116 u16 code = ftest->code;
120 case BPF_LD | BPF_W | BPF_ABS:
121 ftest->code = BPF_LDX | BPF_W | BPF_ABS;
122 /* 32-bit aligned and not out of bounds. */
123 if (k >= sizeof(struct seccomp_data) || k & 3)
126 case BPF_LD | BPF_W | BPF_LEN:
127 ftest->code = BPF_LD | BPF_IMM;
128 ftest->k = sizeof(struct seccomp_data);
130 case BPF_LDX | BPF_W | BPF_LEN:
131 ftest->code = BPF_LDX | BPF_IMM;
132 ftest->k = sizeof(struct seccomp_data);
134 /* Explicitly include allowed calls. */
135 case BPF_RET | BPF_K:
136 case BPF_RET | BPF_A:
137 case BPF_ALU | BPF_ADD | BPF_K:
138 case BPF_ALU | BPF_ADD | BPF_X:
139 case BPF_ALU | BPF_SUB | BPF_K:
140 case BPF_ALU | BPF_SUB | BPF_X:
141 case BPF_ALU | BPF_MUL | BPF_K:
142 case BPF_ALU | BPF_MUL | BPF_X:
143 case BPF_ALU | BPF_DIV | BPF_K:
144 case BPF_ALU | BPF_DIV | BPF_X:
145 case BPF_ALU | BPF_AND | BPF_K:
146 case BPF_ALU | BPF_AND | BPF_X:
147 case BPF_ALU | BPF_OR | BPF_K:
148 case BPF_ALU | BPF_OR | BPF_X:
149 case BPF_ALU | BPF_XOR | BPF_K:
150 case BPF_ALU | BPF_XOR | BPF_X:
151 case BPF_ALU | BPF_LSH | BPF_K:
152 case BPF_ALU | BPF_LSH | BPF_X:
153 case BPF_ALU | BPF_RSH | BPF_K:
154 case BPF_ALU | BPF_RSH | BPF_X:
155 case BPF_ALU | BPF_NEG:
156 case BPF_LD | BPF_IMM:
157 case BPF_LDX | BPF_IMM:
158 case BPF_MISC | BPF_TAX:
159 case BPF_MISC | BPF_TXA:
160 case BPF_LD | BPF_MEM:
161 case BPF_LDX | BPF_MEM:
164 case BPF_JMP | BPF_JA:
165 case BPF_JMP | BPF_JEQ | BPF_K:
166 case BPF_JMP | BPF_JEQ | BPF_X:
167 case BPF_JMP | BPF_JGE | BPF_K:
168 case BPF_JMP | BPF_JGE | BPF_X:
169 case BPF_JMP | BPF_JGT | BPF_K:
170 case BPF_JMP | BPF_JGT | BPF_X:
171 case BPF_JMP | BPF_JSET | BPF_K:
172 case BPF_JMP | BPF_JSET | BPF_X:
182 * seccomp_run_filters - evaluates all seccomp filters against @sd
183 * @sd: optional seccomp data to be passed to filters
184 * @match: stores struct seccomp_filter that resulted in the return value,
185 * unless filter returned SECCOMP_RET_ALLOW, in which case it will
188 * Returns valid seccomp BPF response codes.
190 #define ACTION_ONLY(ret) ((s32)((ret) & (SECCOMP_RET_ACTION_FULL)))
191 static u32 seccomp_run_filters(const struct seccomp_data *sd,
192 struct seccomp_filter **match)
194 struct seccomp_data sd_local;
195 u32 ret = SECCOMP_RET_ALLOW;
196 /* Make sure cross-thread synced filter points somewhere sane. */
197 struct seccomp_filter *f =
198 READ_ONCE(current->seccomp.filter);
200 /* Ensure unexpected behavior doesn't result in failing open. */
201 if (unlikely(WARN_ON(f == NULL)))
202 return SECCOMP_RET_KILL_PROCESS;
205 populate_seccomp_data(&sd_local);
210 * All filters in the list are evaluated and the lowest BPF return
211 * value always takes priority (ignoring the DATA).
213 for (; f; f = f->prev) {
214 u32 cur_ret = BPF_PROG_RUN(f->prog, sd);
216 if (ACTION_ONLY(cur_ret) < ACTION_ONLY(ret)) {
223 #endif /* CONFIG_SECCOMP_FILTER */
225 static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
227 assert_spin_locked(¤t->sighand->siglock);
229 if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
235 void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
237 static inline void seccomp_assign_mode(struct task_struct *task,
238 unsigned long seccomp_mode,
241 assert_spin_locked(&task->sighand->siglock);
243 task->seccomp.mode = seccomp_mode;
245 * Make sure TIF_SECCOMP cannot be set before the mode (and
248 smp_mb__before_atomic();
249 /* Assume default seccomp processes want spec flaw mitigation. */
250 if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
251 arch_seccomp_spec_mitigate(task);
252 set_tsk_thread_flag(task, TIF_SECCOMP);
255 #ifdef CONFIG_SECCOMP_FILTER
256 /* Returns 1 if the parent is an ancestor of the child. */
257 static int is_ancestor(struct seccomp_filter *parent,
258 struct seccomp_filter *child)
260 /* NULL is the root ancestor. */
263 for (; child; child = child->prev)
270 * seccomp_can_sync_threads: checks if all threads can be synchronized
272 * Expects sighand and cred_guard_mutex locks to be held.
274 * Returns 0 on success, -ve on error, or the pid of a thread which was
275 * either not in the correct seccomp mode or it did not have an ancestral
278 static inline pid_t seccomp_can_sync_threads(void)
280 struct task_struct *thread, *caller;
282 BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex));
283 assert_spin_locked(¤t->sighand->siglock);
285 /* Validate all threads being eligible for synchronization. */
287 for_each_thread(caller, thread) {
290 /* Skip current, since it is initiating the sync. */
291 if (thread == caller)
294 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
295 (thread->seccomp.mode == SECCOMP_MODE_FILTER &&
296 is_ancestor(thread->seccomp.filter,
297 caller->seccomp.filter)))
300 /* Return the first thread that cannot be synchronized. */
301 failed = task_pid_vnr(thread);
302 /* If the pid cannot be resolved, then return -ESRCH */
303 if (unlikely(WARN_ON(failed == 0)))
312 * seccomp_sync_threads: sets all threads to use current's filter
314 * Expects sighand and cred_guard_mutex locks to be held, and for
315 * seccomp_can_sync_threads() to have returned success already
316 * without dropping the locks.
319 static inline void seccomp_sync_threads(unsigned long flags)
321 struct task_struct *thread, *caller;
323 BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex));
324 assert_spin_locked(¤t->sighand->siglock);
326 /* Synchronize all threads. */
328 for_each_thread(caller, thread) {
329 /* Skip current, since it needs no changes. */
330 if (thread == caller)
333 /* Get a task reference for the new leaf node. */
334 get_seccomp_filter(caller);
336 * Drop the task reference to the shared ancestor since
337 * current's path will hold a reference. (This also
338 * allows a put before the assignment.)
340 put_seccomp_filter(thread);
341 smp_store_release(&thread->seccomp.filter,
342 caller->seccomp.filter);
345 * Don't let an unprivileged task work around
346 * the no_new_privs restriction by creating
347 * a thread that sets it up, enters seccomp,
350 if (task_no_new_privs(caller))
351 task_set_no_new_privs(thread);
354 * Opt the other thread into seccomp if needed.
355 * As threads are considered to be trust-realm
356 * equivalent (see ptrace_may_access), it is safe to
357 * allow one thread to transition the other.
359 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
360 seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
366 * seccomp_prepare_filter: Prepares a seccomp filter for use.
367 * @fprog: BPF program to install
369 * Returns filter on success or an ERR_PTR on failure.
371 static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
373 struct seccomp_filter *sfilter;
375 const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE);
377 if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
378 return ERR_PTR(-EINVAL);
380 BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
383 * Installing a seccomp filter requires that the task has
384 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
385 * This avoids scenarios where unprivileged tasks can affect the
386 * behavior of privileged children.
388 if (!task_no_new_privs(current) &&
389 !ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN))
390 return ERR_PTR(-EACCES);
392 /* Allocate a new seccomp_filter */
393 sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN);
395 return ERR_PTR(-ENOMEM);
397 ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
398 seccomp_check_filter, save_orig);
404 refcount_set(&sfilter->usage, 1);
410 * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
411 * @user_filter: pointer to the user data containing a sock_fprog.
413 * Returns 0 on success and non-zero otherwise.
415 static struct seccomp_filter *
416 seccomp_prepare_user_filter(const char __user *user_filter)
418 struct sock_fprog fprog;
419 struct seccomp_filter *filter = ERR_PTR(-EFAULT);
422 if (in_compat_syscall()) {
423 struct compat_sock_fprog fprog32;
424 if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
426 fprog.len = fprog32.len;
427 fprog.filter = compat_ptr(fprog32.filter);
428 } else /* falls through to the if below. */
430 if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
432 filter = seccomp_prepare_filter(&fprog);
438 * seccomp_attach_filter: validate and attach filter
439 * @flags: flags to change filter behavior
440 * @filter: seccomp filter to add to the current process
442 * Caller must be holding current->sighand->siglock lock.
444 * Returns 0 on success, -ve on error.
446 static long seccomp_attach_filter(unsigned int flags,
447 struct seccomp_filter *filter)
449 unsigned long total_insns;
450 struct seccomp_filter *walker;
452 assert_spin_locked(¤t->sighand->siglock);
454 /* Validate resulting filter length. */
455 total_insns = filter->prog->len;
456 for (walker = current->seccomp.filter; walker; walker = walker->prev)
457 total_insns += walker->prog->len + 4; /* 4 instr penalty */
458 if (total_insns > MAX_INSNS_PER_PATH)
461 /* If thread sync has been requested, check that it is possible. */
462 if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
465 ret = seccomp_can_sync_threads();
470 /* Set log flag, if present. */
471 if (flags & SECCOMP_FILTER_FLAG_LOG)
475 * If there is an existing filter, make it the prev and don't drop its
478 filter->prev = current->seccomp.filter;
479 current->seccomp.filter = filter;
481 /* Now that the new filter is in place, synchronize to all threads. */
482 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
483 seccomp_sync_threads(flags);
488 static void __get_seccomp_filter(struct seccomp_filter *filter)
490 /* Reference count is bounded by the number of total processes. */
491 refcount_inc(&filter->usage);
494 /* get_seccomp_filter - increments the reference count of the filter on @tsk */
495 void get_seccomp_filter(struct task_struct *tsk)
497 struct seccomp_filter *orig = tsk->seccomp.filter;
500 __get_seccomp_filter(orig);
503 static inline void seccomp_filter_free(struct seccomp_filter *filter)
506 bpf_prog_destroy(filter->prog);
511 static void __put_seccomp_filter(struct seccomp_filter *orig)
513 /* Clean up single-reference branches iteratively. */
514 while (orig && refcount_dec_and_test(&orig->usage)) {
515 struct seccomp_filter *freeme = orig;
517 seccomp_filter_free(freeme);
521 /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
522 void put_seccomp_filter(struct task_struct *tsk)
524 __put_seccomp_filter(tsk->seccomp.filter);
527 static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason)
530 info->si_signo = SIGSYS;
531 info->si_code = SYS_SECCOMP;
532 info->si_call_addr = (void __user *)KSTK_EIP(current);
533 info->si_errno = reason;
534 info->si_arch = syscall_get_arch();
535 info->si_syscall = syscall;
539 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
540 * @syscall: syscall number to send to userland
541 * @reason: filter-supplied reason code to send to userland (via si_errno)
543 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
545 static void seccomp_send_sigsys(int syscall, int reason)
548 seccomp_init_siginfo(&info, syscall, reason);
549 force_sig_info(SIGSYS, &info, current);
551 #endif /* CONFIG_SECCOMP_FILTER */
553 /* For use with seccomp_actions_logged */
554 #define SECCOMP_LOG_KILL_PROCESS (1 << 0)
555 #define SECCOMP_LOG_KILL_THREAD (1 << 1)
556 #define SECCOMP_LOG_TRAP (1 << 2)
557 #define SECCOMP_LOG_ERRNO (1 << 3)
558 #define SECCOMP_LOG_TRACE (1 << 4)
559 #define SECCOMP_LOG_LOG (1 << 5)
560 #define SECCOMP_LOG_ALLOW (1 << 6)
562 static u32 seccomp_actions_logged = SECCOMP_LOG_KILL_PROCESS |
563 SECCOMP_LOG_KILL_THREAD |
569 static inline void seccomp_log(unsigned long syscall, long signr, u32 action,
575 case SECCOMP_RET_ALLOW:
577 case SECCOMP_RET_TRAP:
578 log = requested && seccomp_actions_logged & SECCOMP_LOG_TRAP;
580 case SECCOMP_RET_ERRNO:
581 log = requested && seccomp_actions_logged & SECCOMP_LOG_ERRNO;
583 case SECCOMP_RET_TRACE:
584 log = requested && seccomp_actions_logged & SECCOMP_LOG_TRACE;
586 case SECCOMP_RET_LOG:
587 log = seccomp_actions_logged & SECCOMP_LOG_LOG;
589 case SECCOMP_RET_KILL_THREAD:
590 log = seccomp_actions_logged & SECCOMP_LOG_KILL_THREAD;
592 case SECCOMP_RET_KILL_PROCESS:
594 log = seccomp_actions_logged & SECCOMP_LOG_KILL_PROCESS;
598 * Emit an audit message when the action is RET_KILL_*, RET_LOG, or the
599 * FILTER_FLAG_LOG bit was set. The admin has the ability to silence
600 * any action from being logged by removing the action name from the
601 * seccomp_actions_logged sysctl.
606 audit_seccomp(syscall, signr, action);
610 * Secure computing mode 1 allows only read/write/exit/sigreturn.
611 * To be fully secure this must be combined with rlimit
612 * to limit the stack allocations too.
614 static const int mode1_syscalls[] = {
615 __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
616 0, /* null terminated */
619 static void __secure_computing_strict(int this_syscall)
621 const int *syscall_whitelist = mode1_syscalls;
623 if (in_compat_syscall())
624 syscall_whitelist = get_compat_mode1_syscalls();
627 if (*syscall_whitelist == this_syscall)
629 } while (*++syscall_whitelist);
634 current->seccomp.mode = SECCOMP_MODE_DEAD;
635 seccomp_log(this_syscall, SIGKILL, SECCOMP_RET_KILL_THREAD, true);
639 #ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
640 void secure_computing_strict(int this_syscall)
642 int mode = current->seccomp.mode;
644 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
645 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
648 if (mode == SECCOMP_MODE_DISABLED)
650 else if (mode == SECCOMP_MODE_STRICT)
651 __secure_computing_strict(this_syscall);
657 #ifdef CONFIG_SECCOMP_FILTER
658 static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
659 const bool recheck_after_trace)
661 u32 filter_ret, action;
662 struct seccomp_filter *match = NULL;
666 * Make sure that any changes to mode from another thread have
667 * been seen after TIF_SECCOMP was seen.
671 filter_ret = seccomp_run_filters(sd, &match);
672 data = filter_ret & SECCOMP_RET_DATA;
673 action = filter_ret & SECCOMP_RET_ACTION_FULL;
676 case SECCOMP_RET_ERRNO:
677 /* Set low-order bits as an errno, capped at MAX_ERRNO. */
678 if (data > MAX_ERRNO)
680 syscall_set_return_value(current, task_pt_regs(current),
684 case SECCOMP_RET_TRAP:
685 /* Show the handler the original registers. */
686 syscall_rollback(current, task_pt_regs(current));
687 /* Let the filter pass back 16 bits of data. */
688 seccomp_send_sigsys(this_syscall, data);
691 case SECCOMP_RET_TRACE:
692 /* We've been put in this state by the ptracer already. */
693 if (recheck_after_trace)
696 /* ENOSYS these calls if there is no tracer attached. */
697 if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
698 syscall_set_return_value(current,
699 task_pt_regs(current),
704 /* Allow the BPF to provide the event message */
705 ptrace_event(PTRACE_EVENT_SECCOMP, data);
707 * The delivery of a fatal signal during event
708 * notification may silently skip tracer notification,
709 * which could leave us with a potentially unmodified
710 * syscall that the tracer would have liked to have
711 * changed. Since the process is about to die, we just
712 * force the syscall to be skipped and let the signal
713 * kill the process and correctly handle any tracer exit
716 if (fatal_signal_pending(current))
718 /* Check if the tracer forced the syscall to be skipped. */
719 this_syscall = syscall_get_nr(current, task_pt_regs(current));
720 if (this_syscall < 0)
724 * Recheck the syscall, since it may have changed. This
725 * intentionally uses a NULL struct seccomp_data to force
726 * a reload of all registers. This does not goto skip since
727 * a skip would have already been reported.
729 if (__seccomp_filter(this_syscall, NULL, true))
734 case SECCOMP_RET_LOG:
735 seccomp_log(this_syscall, 0, action, true);
738 case SECCOMP_RET_ALLOW:
740 * Note that the "match" filter will always be NULL for
741 * this action since SECCOMP_RET_ALLOW is the starting
742 * state in seccomp_run_filters().
746 case SECCOMP_RET_KILL_THREAD:
747 case SECCOMP_RET_KILL_PROCESS:
749 current->seccomp.mode = SECCOMP_MODE_DEAD;
750 seccomp_log(this_syscall, SIGSYS, action, true);
751 /* Dump core only if this is the last remaining thread. */
752 if (action == SECCOMP_RET_KILL_PROCESS ||
753 get_nr_threads(current) == 1) {
756 /* Show the original registers in the dump. */
757 syscall_rollback(current, task_pt_regs(current));
758 /* Trigger a manual coredump since do_exit skips it. */
759 seccomp_init_siginfo(&info, this_syscall, data);
762 if (action == SECCOMP_RET_KILL_PROCESS)
763 do_group_exit(SIGSYS);
771 seccomp_log(this_syscall, 0, action, match ? match->log : false);
775 static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
776 const bool recheck_after_trace)
784 int __secure_computing(const struct seccomp_data *sd)
786 int mode = current->seccomp.mode;
789 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
790 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
793 this_syscall = sd ? sd->nr :
794 syscall_get_nr(current, task_pt_regs(current));
797 case SECCOMP_MODE_STRICT:
798 __secure_computing_strict(this_syscall); /* may call do_exit */
800 case SECCOMP_MODE_FILTER:
801 return __seccomp_filter(this_syscall, sd, false);
802 /* Surviving SECCOMP_RET_KILL_* must be proactively impossible. */
803 case SECCOMP_MODE_DEAD:
811 #endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
813 long prctl_get_seccomp(void)
815 return current->seccomp.mode;
819 * seccomp_set_mode_strict: internal function for setting strict seccomp
821 * Once current->seccomp.mode is non-zero, it may not be changed.
823 * Returns 0 on success or -EINVAL on failure.
825 static long seccomp_set_mode_strict(void)
827 const unsigned long seccomp_mode = SECCOMP_MODE_STRICT;
830 spin_lock_irq(¤t->sighand->siglock);
832 if (!seccomp_may_assign_mode(seccomp_mode))
838 seccomp_assign_mode(current, seccomp_mode, 0);
842 spin_unlock_irq(¤t->sighand->siglock);
847 #ifdef CONFIG_SECCOMP_FILTER
849 * seccomp_set_mode_filter: internal function for setting seccomp filter
850 * @flags: flags to change filter behavior
851 * @filter: struct sock_fprog containing filter
853 * This function may be called repeatedly to install additional filters.
854 * Every filter successfully installed will be evaluated (in reverse order)
855 * for each system call the task makes.
857 * Once current->seccomp.mode is non-zero, it may not be changed.
859 * Returns 0 on success or -EINVAL on failure.
861 static long seccomp_set_mode_filter(unsigned int flags,
862 const char __user *filter)
864 const unsigned long seccomp_mode = SECCOMP_MODE_FILTER;
865 struct seccomp_filter *prepared = NULL;
868 /* Validate flags. */
869 if (flags & ~SECCOMP_FILTER_FLAG_MASK)
872 /* Prepare the new filter before holding any locks. */
873 prepared = seccomp_prepare_user_filter(filter);
874 if (IS_ERR(prepared))
875 return PTR_ERR(prepared);
878 * Make sure we cannot change seccomp or nnp state via TSYNC
879 * while another thread is in the middle of calling exec.
881 if (flags & SECCOMP_FILTER_FLAG_TSYNC &&
882 mutex_lock_killable(¤t->signal->cred_guard_mutex))
885 spin_lock_irq(¤t->sighand->siglock);
887 if (!seccomp_may_assign_mode(seccomp_mode))
890 ret = seccomp_attach_filter(flags, prepared);
893 /* Do not free the successfully attached filter. */
896 seccomp_assign_mode(current, seccomp_mode, flags);
898 spin_unlock_irq(¤t->sighand->siglock);
899 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
900 mutex_unlock(¤t->signal->cred_guard_mutex);
902 seccomp_filter_free(prepared);
906 static inline long seccomp_set_mode_filter(unsigned int flags,
907 const char __user *filter)
913 static long seccomp_get_action_avail(const char __user *uaction)
917 if (copy_from_user(&action, uaction, sizeof(action)))
921 case SECCOMP_RET_KILL_PROCESS:
922 case SECCOMP_RET_KILL_THREAD:
923 case SECCOMP_RET_TRAP:
924 case SECCOMP_RET_ERRNO:
925 case SECCOMP_RET_TRACE:
926 case SECCOMP_RET_LOG:
927 case SECCOMP_RET_ALLOW:
936 /* Common entry point for both prctl and syscall. */
937 static long do_seccomp(unsigned int op, unsigned int flags,
938 const char __user *uargs)
941 case SECCOMP_SET_MODE_STRICT:
942 if (flags != 0 || uargs != NULL)
944 return seccomp_set_mode_strict();
945 case SECCOMP_SET_MODE_FILTER:
946 return seccomp_set_mode_filter(flags, uargs);
947 case SECCOMP_GET_ACTION_AVAIL:
951 return seccomp_get_action_avail(uargs);
957 SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags,
958 const char __user *, uargs)
960 return do_seccomp(op, flags, uargs);
964 * prctl_set_seccomp: configures current->seccomp.mode
965 * @seccomp_mode: requested mode to use
966 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
968 * Returns 0 on success or -EINVAL on failure.
970 long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
975 switch (seccomp_mode) {
976 case SECCOMP_MODE_STRICT:
977 op = SECCOMP_SET_MODE_STRICT;
979 * Setting strict mode through prctl always ignored filter,
980 * so make sure it is always NULL here to pass the internal
981 * check in do_seccomp().
985 case SECCOMP_MODE_FILTER:
986 op = SECCOMP_SET_MODE_FILTER;
993 /* prctl interface doesn't have flags, so they are always zero. */
994 return do_seccomp(op, 0, uargs);
997 #if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
998 static struct seccomp_filter *get_nth_filter(struct task_struct *task,
999 unsigned long filter_off)
1001 struct seccomp_filter *orig, *filter;
1002 unsigned long count;
1005 * Note: this is only correct because the caller should be the (ptrace)
1006 * tracer of the task, otherwise lock_task_sighand is needed.
1008 spin_lock_irq(&task->sighand->siglock);
1010 if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
1011 spin_unlock_irq(&task->sighand->siglock);
1012 return ERR_PTR(-EINVAL);
1015 orig = task->seccomp.filter;
1016 __get_seccomp_filter(orig);
1017 spin_unlock_irq(&task->sighand->siglock);
1020 for (filter = orig; filter; filter = filter->prev)
1023 if (filter_off >= count) {
1024 filter = ERR_PTR(-ENOENT);
1028 count -= filter_off;
1029 for (filter = orig; filter && count > 1; filter = filter->prev)
1032 if (WARN_ON(count != 1 || !filter)) {
1033 filter = ERR_PTR(-ENOENT);
1037 __get_seccomp_filter(filter);
1040 __put_seccomp_filter(orig);
1044 long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
1047 struct seccomp_filter *filter;
1048 struct sock_fprog_kern *fprog;
1051 if (!capable(CAP_SYS_ADMIN) ||
1052 current->seccomp.mode != SECCOMP_MODE_DISABLED) {
1056 filter = get_nth_filter(task, filter_off);
1058 return PTR_ERR(filter);
1060 fprog = filter->prog->orig_prog;
1062 /* This must be a new non-cBPF filter, since we save
1063 * every cBPF filter's orig_prog above when
1064 * CONFIG_CHECKPOINT_RESTORE is enabled.
1074 if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
1078 __put_seccomp_filter(filter);
1082 long seccomp_get_metadata(struct task_struct *task,
1083 unsigned long size, void __user *data)
1086 struct seccomp_filter *filter;
1087 struct seccomp_metadata kmd = {};
1089 if (!capable(CAP_SYS_ADMIN) ||
1090 current->seccomp.mode != SECCOMP_MODE_DISABLED) {
1094 size = min_t(unsigned long, size, sizeof(kmd));
1096 if (size < sizeof(kmd.filter_off))
1099 if (copy_from_user(&kmd.filter_off, data, sizeof(kmd.filter_off)))
1102 filter = get_nth_filter(task, kmd.filter_off);
1104 return PTR_ERR(filter);
1107 kmd.flags |= SECCOMP_FILTER_FLAG_LOG;
1110 if (copy_to_user(data, &kmd, size))
1113 __put_seccomp_filter(filter);
1118 #ifdef CONFIG_SYSCTL
1120 /* Human readable action names for friendly sysctl interaction */
1121 #define SECCOMP_RET_KILL_PROCESS_NAME "kill_process"
1122 #define SECCOMP_RET_KILL_THREAD_NAME "kill_thread"
1123 #define SECCOMP_RET_TRAP_NAME "trap"
1124 #define SECCOMP_RET_ERRNO_NAME "errno"
1125 #define SECCOMP_RET_TRACE_NAME "trace"
1126 #define SECCOMP_RET_LOG_NAME "log"
1127 #define SECCOMP_RET_ALLOW_NAME "allow"
1129 static const char seccomp_actions_avail[] =
1130 SECCOMP_RET_KILL_PROCESS_NAME " "
1131 SECCOMP_RET_KILL_THREAD_NAME " "
1132 SECCOMP_RET_TRAP_NAME " "
1133 SECCOMP_RET_ERRNO_NAME " "
1134 SECCOMP_RET_TRACE_NAME " "
1135 SECCOMP_RET_LOG_NAME " "
1136 SECCOMP_RET_ALLOW_NAME;
1138 struct seccomp_log_name {
1143 static const struct seccomp_log_name seccomp_log_names[] = {
1144 { SECCOMP_LOG_KILL_PROCESS, SECCOMP_RET_KILL_PROCESS_NAME },
1145 { SECCOMP_LOG_KILL_THREAD, SECCOMP_RET_KILL_THREAD_NAME },
1146 { SECCOMP_LOG_TRAP, SECCOMP_RET_TRAP_NAME },
1147 { SECCOMP_LOG_ERRNO, SECCOMP_RET_ERRNO_NAME },
1148 { SECCOMP_LOG_TRACE, SECCOMP_RET_TRACE_NAME },
1149 { SECCOMP_LOG_LOG, SECCOMP_RET_LOG_NAME },
1150 { SECCOMP_LOG_ALLOW, SECCOMP_RET_ALLOW_NAME },
1154 static bool seccomp_names_from_actions_logged(char *names, size_t size,
1158 const struct seccomp_log_name *cur;
1159 bool append_sep = false;
1161 for (cur = seccomp_log_names; cur->name && size; cur++) {
1164 if (!(actions_logged & cur->log))
1168 ret = strscpy(names, sep, size);
1177 ret = strscpy(names, cur->name, size);
1188 static bool seccomp_action_logged_from_name(u32 *action_logged,
1191 const struct seccomp_log_name *cur;
1193 for (cur = seccomp_log_names; cur->name; cur++) {
1194 if (!strcmp(cur->name, name)) {
1195 *action_logged = cur->log;
1203 static bool seccomp_actions_logged_from_names(u32 *actions_logged, char *names)
1207 *actions_logged = 0;
1208 while ((name = strsep(&names, " ")) && *name) {
1209 u32 action_logged = 0;
1211 if (!seccomp_action_logged_from_name(&action_logged, name))
1214 *actions_logged |= action_logged;
1220 static int read_actions_logged(struct ctl_table *ro_table, void __user *buffer,
1221 size_t *lenp, loff_t *ppos)
1223 char names[sizeof(seccomp_actions_avail)];
1224 struct ctl_table table;
1226 memset(names, 0, sizeof(names));
1228 if (!seccomp_names_from_actions_logged(names, sizeof(names),
1229 seccomp_actions_logged, " "))
1234 table.maxlen = sizeof(names);
1235 return proc_dostring(&table, 0, buffer, lenp, ppos);
1238 static int write_actions_logged(struct ctl_table *ro_table, void __user *buffer,
1239 size_t *lenp, loff_t *ppos, u32 *actions_logged)
1241 char names[sizeof(seccomp_actions_avail)];
1242 struct ctl_table table;
1245 if (!capable(CAP_SYS_ADMIN))
1248 memset(names, 0, sizeof(names));
1252 table.maxlen = sizeof(names);
1253 ret = proc_dostring(&table, 1, buffer, lenp, ppos);
1257 if (!seccomp_actions_logged_from_names(actions_logged, table.data))
1260 if (*actions_logged & SECCOMP_LOG_ALLOW)
1263 seccomp_actions_logged = *actions_logged;
1267 static void audit_actions_logged(u32 actions_logged, u32 old_actions_logged,
1270 char names[sizeof(seccomp_actions_avail)];
1271 char old_names[sizeof(seccomp_actions_avail)];
1272 const char *new = names;
1273 const char *old = old_names;
1278 memset(names, 0, sizeof(names));
1279 memset(old_names, 0, sizeof(old_names));
1283 else if (!actions_logged)
1285 else if (!seccomp_names_from_actions_logged(names, sizeof(names),
1286 actions_logged, ","))
1289 if (!old_actions_logged)
1291 else if (!seccomp_names_from_actions_logged(old_names,
1293 old_actions_logged, ","))
1296 return audit_seccomp_actions_logged(new, old, !ret);
1299 static int seccomp_actions_logged_handler(struct ctl_table *ro_table, int write,
1300 void __user *buffer, size_t *lenp,
1306 u32 actions_logged = 0;
1307 u32 old_actions_logged = seccomp_actions_logged;
1309 ret = write_actions_logged(ro_table, buffer, lenp, ppos,
1311 audit_actions_logged(actions_logged, old_actions_logged, ret);
1313 ret = read_actions_logged(ro_table, buffer, lenp, ppos);
1318 static struct ctl_path seccomp_sysctl_path[] = {
1319 { .procname = "kernel", },
1320 { .procname = "seccomp", },
1324 static struct ctl_table seccomp_sysctl_table[] = {
1326 .procname = "actions_avail",
1327 .data = (void *) &seccomp_actions_avail,
1328 .maxlen = sizeof(seccomp_actions_avail),
1330 .proc_handler = proc_dostring,
1333 .procname = "actions_logged",
1335 .proc_handler = seccomp_actions_logged_handler,
1340 static int __init seccomp_sysctl_init(void)
1342 struct ctl_table_header *hdr;
1344 hdr = register_sysctl_paths(seccomp_sysctl_path, seccomp_sysctl_table);
1346 pr_warn("seccomp: sysctl registration failed\n");
1348 kmemleak_not_leak(hdr);
1353 device_initcall(seccomp_sysctl_init)
1355 #endif /* CONFIG_SYSCTL */