1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "cgroup-internal.h"
4 #include <linux/ctype.h>
5 #include <linux/kmod.h>
6 #include <linux/sort.h>
7 #include <linux/delay.h>
9 #include <linux/sched/signal.h>
10 #include <linux/sched/task.h>
11 #include <linux/magic.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
14 #include <linux/delayacct.h>
15 #include <linux/pid_namespace.h>
16 #include <linux/cgroupstats.h>
17 #include <linux/fs_parser.h>
18 #include <linux/cpu.h>
20 #include <trace/events/cgroup.h>
22 #define cg_invalf(fc, fmt, ...) invalf(fc, fmt, ## __VA_ARGS__)
25 * pidlists linger the following amount before being destroyed. The goal
26 * is avoiding frequent destruction in the middle of consecutive read calls
27 * Expiring in the middle is a performance problem not a correctness one.
28 * 1 sec should be enough.
30 #define CGROUP_PIDLIST_DESTROY_DELAY HZ
32 /* Controllers blocked by the commandline in v1 */
33 static u16 cgroup_no_v1_mask;
35 /* disable named v1 mounts */
36 static bool cgroup_no_v1_named;
39 * pidlist destructions need to be flushed on cgroup destruction. Use a
40 * separate workqueue as flush domain.
42 static struct workqueue_struct *cgroup_pidlist_destroy_wq;
45 * Protects cgroup_subsys->release_agent_path. Modifying it also requires
46 * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock.
48 static DEFINE_SPINLOCK(release_agent_path_lock);
50 bool cgroup1_ssid_disabled(int ssid)
52 return cgroup_no_v1_mask & (1 << ssid);
56 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
57 * @from: attach to all cgroups of a given task
58 * @tsk: the task to be attached
60 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
62 struct cgroup_root *root;
65 mutex_lock(&cgroup_mutex);
67 percpu_down_write(&cgroup_threadgroup_rwsem);
69 struct cgroup *from_cgrp;
71 if (root == &cgrp_dfl_root)
74 spin_lock_irq(&css_set_lock);
75 from_cgrp = task_cgroup_from_root(from, root);
76 spin_unlock_irq(&css_set_lock);
78 retval = cgroup_attach_task(from_cgrp, tsk, false);
82 percpu_up_write(&cgroup_threadgroup_rwsem);
84 mutex_unlock(&cgroup_mutex);
88 EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
91 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
92 * @to: cgroup to which the tasks will be moved
93 * @from: cgroup in which the tasks currently reside
95 * Locking rules between cgroup_post_fork() and the migration path
96 * guarantee that, if a task is forking while being migrated, the new child
97 * is guaranteed to be either visible in the source cgroup after the
98 * parent's migration is complete or put into the target cgroup. No task
99 * can slip out of migration through forking.
101 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
103 DEFINE_CGROUP_MGCTX(mgctx);
104 struct cgrp_cset_link *link;
105 struct css_task_iter it;
106 struct task_struct *task;
109 if (cgroup_on_dfl(to))
112 ret = cgroup_migrate_vet_dst(to);
116 mutex_lock(&cgroup_mutex);
118 percpu_down_write(&cgroup_threadgroup_rwsem);
120 /* all tasks in @from are being moved, all csets are source */
121 spin_lock_irq(&css_set_lock);
122 list_for_each_entry(link, &from->cset_links, cset_link)
123 cgroup_migrate_add_src(link->cset, to, &mgctx);
124 spin_unlock_irq(&css_set_lock);
126 ret = cgroup_migrate_prepare_dst(&mgctx);
131 * Migrate tasks one-by-one until @from is empty. This fails iff
132 * ->can_attach() fails.
135 css_task_iter_start(&from->self, 0, &it);
138 task = css_task_iter_next(&it);
139 } while (task && (task->flags & PF_EXITING));
142 get_task_struct(task);
143 css_task_iter_end(&it);
146 ret = cgroup_migrate(task, false, &mgctx);
148 TRACE_CGROUP_PATH(transfer_tasks, to, task, false);
149 put_task_struct(task);
151 } while (task && !ret);
153 cgroup_migrate_finish(&mgctx);
154 percpu_up_write(&cgroup_threadgroup_rwsem);
155 mutex_unlock(&cgroup_mutex);
160 * Stuff for reading the 'tasks'/'procs' files.
162 * Reading this file can return large amounts of data if a cgroup has
163 * *lots* of attached tasks. So it may need several calls to read(),
164 * but we cannot guarantee that the information we produce is correct
165 * unless we produce it entirely atomically.
169 /* which pidlist file are we talking about? */
170 enum cgroup_filetype {
176 * A pidlist is a list of pids that virtually represents the contents of one
177 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
178 * a pair (one each for procs, tasks) for each pid namespace that's relevant
181 struct cgroup_pidlist {
183 * used to find which pidlist is wanted. doesn't change as long as
184 * this particular list stays in the list.
186 struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
189 /* how many elements the above list has */
191 /* each of these stored in a list by its cgroup */
192 struct list_head links;
193 /* pointer to the cgroup we belong to, for list removal purposes */
194 struct cgroup *owner;
195 /* for delayed destruction */
196 struct delayed_work destroy_dwork;
200 * Used to destroy all pidlists lingering waiting for destroy timer. None
201 * should be left afterwards.
203 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
205 struct cgroup_pidlist *l, *tmp_l;
207 mutex_lock(&cgrp->pidlist_mutex);
208 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
209 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
210 mutex_unlock(&cgrp->pidlist_mutex);
212 flush_workqueue(cgroup_pidlist_destroy_wq);
213 BUG_ON(!list_empty(&cgrp->pidlists));
216 static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
218 struct delayed_work *dwork = to_delayed_work(work);
219 struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
221 struct cgroup_pidlist *tofree = NULL;
223 mutex_lock(&l->owner->pidlist_mutex);
226 * Destroy iff we didn't get queued again. The state won't change
227 * as destroy_dwork can only be queued while locked.
229 if (!delayed_work_pending(dwork)) {
232 put_pid_ns(l->key.ns);
236 mutex_unlock(&l->owner->pidlist_mutex);
241 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
242 * Returns the number of unique elements.
244 static int pidlist_uniq(pid_t *list, int length)
249 * we presume the 0th element is unique, so i starts at 1. trivial
250 * edge cases first; no work needs to be done for either
252 if (length == 0 || length == 1)
254 /* src and dest walk down the list; dest counts unique elements */
255 for (src = 1; src < length; src++) {
256 /* find next unique element */
257 while (list[src] == list[src-1]) {
262 /* dest always points to where the next unique element goes */
263 list[dest] = list[src];
271 * The two pid files - task and cgroup.procs - guaranteed that the result
272 * is sorted, which forced this whole pidlist fiasco. As pid order is
273 * different per namespace, each namespace needs differently sorted list,
274 * making it impossible to use, for example, single rbtree of member tasks
275 * sorted by task pointer. As pidlists can be fairly large, allocating one
276 * per open file is dangerous, so cgroup had to implement shared pool of
277 * pidlists keyed by cgroup and namespace.
279 static int cmppid(const void *a, const void *b)
281 return *(pid_t *)a - *(pid_t *)b;
284 static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
285 enum cgroup_filetype type)
287 struct cgroup_pidlist *l;
288 /* don't need task_nsproxy() if we're looking at ourself */
289 struct pid_namespace *ns = task_active_pid_ns(current);
291 lockdep_assert_held(&cgrp->pidlist_mutex);
293 list_for_each_entry(l, &cgrp->pidlists, links)
294 if (l->key.type == type && l->key.ns == ns)
300 * find the appropriate pidlist for our purpose (given procs vs tasks)
301 * returns with the lock on that pidlist already held, and takes care
302 * of the use count, or returns NULL with no locks held if we're out of
305 static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
306 enum cgroup_filetype type)
308 struct cgroup_pidlist *l;
310 lockdep_assert_held(&cgrp->pidlist_mutex);
312 l = cgroup_pidlist_find(cgrp, type);
316 /* entry not found; create a new one */
317 l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
321 INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
323 /* don't need task_nsproxy() if we're looking at ourself */
324 l->key.ns = get_pid_ns(task_active_pid_ns(current));
326 list_add(&l->links, &cgrp->pidlists);
331 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
333 static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
334 struct cgroup_pidlist **lp)
338 int pid, n = 0; /* used for populating the array */
339 struct css_task_iter it;
340 struct task_struct *tsk;
341 struct cgroup_pidlist *l;
343 lockdep_assert_held(&cgrp->pidlist_mutex);
346 * If cgroup gets more users after we read count, we won't have
347 * enough space - tough. This race is indistinguishable to the
348 * caller from the case that the additional cgroup users didn't
349 * show up until sometime later on.
351 length = cgroup_task_count(cgrp);
352 array = kvmalloc_array(length, sizeof(pid_t), GFP_KERNEL);
355 /* now, populate the array */
356 css_task_iter_start(&cgrp->self, 0, &it);
357 while ((tsk = css_task_iter_next(&it))) {
358 if (unlikely(n == length))
360 /* get tgid or pid for procs or tasks file respectively */
361 if (type == CGROUP_FILE_PROCS)
362 pid = task_tgid_vnr(tsk);
364 pid = task_pid_vnr(tsk);
365 if (pid > 0) /* make sure to only use valid results */
368 css_task_iter_end(&it);
370 /* now sort & (if procs) strip out duplicates */
371 sort(array, length, sizeof(pid_t), cmppid, NULL);
372 if (type == CGROUP_FILE_PROCS)
373 length = pidlist_uniq(array, length);
375 l = cgroup_pidlist_find_create(cgrp, type);
381 /* store array, freeing old if necessary */
390 * seq_file methods for the tasks/procs files. The seq_file position is the
391 * next pid to display; the seq_file iterator is a pointer to the pid
392 * in the cgroup->l->list array.
395 static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
398 * Initially we receive a position value that corresponds to
399 * one more than the last pid shown (or 0 on the first call or
400 * after a seek to the start). Use a binary-search to find the
401 * next pid to display, if any
403 struct kernfs_open_file *of = s->private;
404 struct cgroup_file_ctx *ctx = of->priv;
405 struct cgroup *cgrp = seq_css(s)->cgroup;
406 struct cgroup_pidlist *l;
407 enum cgroup_filetype type = seq_cft(s)->private;
408 int index = 0, pid = *pos;
411 mutex_lock(&cgrp->pidlist_mutex);
414 * !NULL @ctx->procs1.pidlist indicates that this isn't the first
415 * start() after open. If the matching pidlist is around, we can use
416 * that. Look for it. Note that @ctx->procs1.pidlist can't be used
417 * directly. It could already have been destroyed.
419 if (ctx->procs1.pidlist)
420 ctx->procs1.pidlist = cgroup_pidlist_find(cgrp, type);
423 * Either this is the first start() after open or the matching
424 * pidlist has been destroyed inbetween. Create a new one.
426 if (!ctx->procs1.pidlist) {
427 ret = pidlist_array_load(cgrp, type, &ctx->procs1.pidlist);
431 l = ctx->procs1.pidlist;
436 while (index < end) {
437 int mid = (index + end) / 2;
438 if (l->list[mid] == pid) {
441 } else if (l->list[mid] <= pid)
447 /* If we're off the end of the array, we're done */
448 if (index >= l->length)
450 /* Update the abstract position to be the actual pid that we found */
451 iter = l->list + index;
456 static void cgroup_pidlist_stop(struct seq_file *s, void *v)
458 struct kernfs_open_file *of = s->private;
459 struct cgroup_file_ctx *ctx = of->priv;
460 struct cgroup_pidlist *l = ctx->procs1.pidlist;
463 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
464 CGROUP_PIDLIST_DESTROY_DELAY);
465 mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
468 static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
470 struct kernfs_open_file *of = s->private;
471 struct cgroup_file_ctx *ctx = of->priv;
472 struct cgroup_pidlist *l = ctx->procs1.pidlist;
474 pid_t *end = l->list + l->length;
476 * Advance to the next pid in the array. If this goes off the
489 static int cgroup_pidlist_show(struct seq_file *s, void *v)
491 seq_printf(s, "%d\n", *(int *)v);
496 static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
497 char *buf, size_t nbytes, loff_t off,
501 struct task_struct *task;
502 const struct cred *cred, *tcred;
506 cgrp = cgroup_kn_lock_live(of->kn, false);
510 task = cgroup_procs_write_start(buf, threadgroup, &locked);
511 ret = PTR_ERR_OR_ZERO(task);
516 * Even if we're attaching all tasks in the thread group, we only need
517 * to check permissions on one of them. Check permissions using the
518 * credentials from file open to protect against inherited fd attacks.
520 cred = of->file->f_cred;
521 tcred = get_task_cred(task);
522 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
523 !uid_eq(cred->euid, tcred->uid) &&
524 !uid_eq(cred->euid, tcred->suid))
530 ret = cgroup_attach_task(cgrp, task, threadgroup);
533 cgroup_procs_write_finish(task, locked);
535 cgroup_kn_unlock(of->kn);
537 return ret ?: nbytes;
540 static ssize_t cgroup1_procs_write(struct kernfs_open_file *of,
541 char *buf, size_t nbytes, loff_t off)
543 return __cgroup1_procs_write(of, buf, nbytes, off, true);
546 static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of,
547 char *buf, size_t nbytes, loff_t off)
549 return __cgroup1_procs_write(of, buf, nbytes, off, false);
552 static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
553 char *buf, size_t nbytes, loff_t off)
557 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
560 * Release agent gets called with all capabilities,
561 * require capabilities to set release agent.
563 if ((of->file->f_cred->user_ns != &init_user_ns) ||
564 !capable(CAP_SYS_ADMIN))
567 cgrp = cgroup_kn_lock_live(of->kn, false);
570 spin_lock(&release_agent_path_lock);
571 strlcpy(cgrp->root->release_agent_path, strstrip(buf),
572 sizeof(cgrp->root->release_agent_path));
573 spin_unlock(&release_agent_path_lock);
574 cgroup_kn_unlock(of->kn);
578 static int cgroup_release_agent_show(struct seq_file *seq, void *v)
580 struct cgroup *cgrp = seq_css(seq)->cgroup;
582 spin_lock(&release_agent_path_lock);
583 seq_puts(seq, cgrp->root->release_agent_path);
584 spin_unlock(&release_agent_path_lock);
589 static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
591 seq_puts(seq, "0\n");
595 static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
598 return notify_on_release(css->cgroup);
601 static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
602 struct cftype *cft, u64 val)
605 set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
607 clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
611 static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
614 return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
617 static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
618 struct cftype *cft, u64 val)
621 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
623 clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
627 /* cgroup core interface files for the legacy hierarchies */
628 struct cftype cgroup1_base_files[] = {
630 .name = "cgroup.procs",
631 .seq_start = cgroup_pidlist_start,
632 .seq_next = cgroup_pidlist_next,
633 .seq_stop = cgroup_pidlist_stop,
634 .seq_show = cgroup_pidlist_show,
635 .private = CGROUP_FILE_PROCS,
636 .write = cgroup1_procs_write,
639 .name = "cgroup.clone_children",
640 .read_u64 = cgroup_clone_children_read,
641 .write_u64 = cgroup_clone_children_write,
644 .name = "cgroup.sane_behavior",
645 .flags = CFTYPE_ONLY_ON_ROOT,
646 .seq_show = cgroup_sane_behavior_show,
650 .seq_start = cgroup_pidlist_start,
651 .seq_next = cgroup_pidlist_next,
652 .seq_stop = cgroup_pidlist_stop,
653 .seq_show = cgroup_pidlist_show,
654 .private = CGROUP_FILE_TASKS,
655 .write = cgroup1_tasks_write,
658 .name = "notify_on_release",
659 .read_u64 = cgroup_read_notify_on_release,
660 .write_u64 = cgroup_write_notify_on_release,
663 .name = "release_agent",
664 .flags = CFTYPE_ONLY_ON_ROOT,
665 .seq_show = cgroup_release_agent_show,
666 .write = cgroup_release_agent_write,
667 .max_write_len = PATH_MAX - 1,
672 /* Display information about each subsystem and each hierarchy */
673 int proc_cgroupstats_show(struct seq_file *m, void *v)
675 struct cgroup_subsys *ss;
678 seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
680 * ideally we don't want subsystems moving around while we do this.
681 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
682 * subsys/hierarchy state.
684 mutex_lock(&cgroup_mutex);
686 for_each_subsys(ss, i)
687 seq_printf(m, "%s\t%d\t%d\t%d\n",
688 ss->legacy_name, ss->root->hierarchy_id,
689 atomic_read(&ss->root->nr_cgrps),
690 cgroup_ssid_enabled(i));
692 mutex_unlock(&cgroup_mutex);
697 * cgroupstats_build - build and fill cgroupstats
698 * @stats: cgroupstats to fill information into
699 * @dentry: A dentry entry belonging to the cgroup for which stats have
702 * Build and fill cgroupstats so that taskstats can export it to user
705 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
707 struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
709 struct css_task_iter it;
710 struct task_struct *tsk;
712 /* it should be kernfs_node belonging to cgroupfs and is a directory */
713 if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
714 kernfs_type(kn) != KERNFS_DIR)
717 mutex_lock(&cgroup_mutex);
720 * We aren't being called from kernfs and there's no guarantee on
721 * @kn->priv's validity. For this and css_tryget_online_from_dir(),
722 * @kn->priv is RCU safe. Let's do the RCU dancing.
725 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
726 if (!cgrp || cgroup_is_dead(cgrp)) {
728 mutex_unlock(&cgroup_mutex);
733 css_task_iter_start(&cgrp->self, 0, &it);
734 while ((tsk = css_task_iter_next(&it))) {
735 switch (tsk->state) {
739 case TASK_INTERRUPTIBLE:
740 stats->nr_sleeping++;
742 case TASK_UNINTERRUPTIBLE:
743 stats->nr_uninterruptible++;
749 if (delayacct_is_task_waiting_on_io(tsk))
754 css_task_iter_end(&it);
756 mutex_unlock(&cgroup_mutex);
760 void cgroup1_check_for_release(struct cgroup *cgrp)
762 if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
763 !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
764 schedule_work(&cgrp->release_agent_work);
768 * Notify userspace when a cgroup is released, by running the
769 * configured release agent with the name of the cgroup (path
770 * relative to the root of cgroup file system) as the argument.
772 * Most likely, this user command will try to rmdir this cgroup.
774 * This races with the possibility that some other task will be
775 * attached to this cgroup before it is removed, or that some other
776 * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
777 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
778 * unused, and this cgroup will be reprieved from its death sentence,
779 * to continue to serve a useful existence. Next time it's released,
780 * we will get notified again, if it still has 'notify_on_release' set.
782 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
783 * means only wait until the task is successfully execve()'d. The
784 * separate release agent task is forked by call_usermodehelper(),
785 * then control in this thread returns here, without waiting for the
786 * release agent task. We don't bother to wait because the caller of
787 * this routine has no use for the exit status of the release agent
788 * task, so no sense holding our caller up for that.
790 void cgroup1_release_agent(struct work_struct *work)
792 struct cgroup *cgrp =
793 container_of(work, struct cgroup, release_agent_work);
794 char *pathbuf = NULL, *agentbuf = NULL;
795 char *argv[3], *envp[3];
798 mutex_lock(&cgroup_mutex);
800 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
801 agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
802 if (!pathbuf || !agentbuf || !strlen(agentbuf))
805 spin_lock_irq(&css_set_lock);
806 ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
807 spin_unlock_irq(&css_set_lock);
808 if (ret < 0 || ret >= PATH_MAX)
815 /* minimal command environment */
817 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
820 mutex_unlock(&cgroup_mutex);
821 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
824 mutex_unlock(&cgroup_mutex);
831 * cgroup_rename - Only allow simple rename of directories in place.
833 static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
834 const char *new_name_str)
836 struct cgroup *cgrp = kn->priv;
839 /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
840 if (strchr(new_name_str, '\n'))
843 if (kernfs_type(kn) != KERNFS_DIR)
845 if (kn->parent != new_parent)
849 * We're gonna grab cgroup_mutex which nests outside kernfs
850 * active_ref. kernfs_rename() doesn't require active_ref
851 * protection. Break them before grabbing cgroup_mutex.
853 kernfs_break_active_protection(new_parent);
854 kernfs_break_active_protection(kn);
856 mutex_lock(&cgroup_mutex);
858 ret = kernfs_rename(kn, new_parent, new_name_str);
860 TRACE_CGROUP_PATH(rename, cgrp);
862 mutex_unlock(&cgroup_mutex);
864 kernfs_unbreak_active_protection(kn);
865 kernfs_unbreak_active_protection(new_parent);
869 static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
871 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
872 struct cgroup_subsys *ss;
875 for_each_subsys(ss, ssid)
876 if (root->subsys_mask & (1 << ssid))
877 seq_show_option(seq, ss->legacy_name, NULL);
878 if (root->flags & CGRP_ROOT_NOPREFIX)
879 seq_puts(seq, ",noprefix");
880 if (root->flags & CGRP_ROOT_XATTR)
881 seq_puts(seq, ",xattr");
882 if (root->flags & CGRP_ROOT_CPUSET_V2_MODE)
883 seq_puts(seq, ",cpuset_v2_mode");
885 spin_lock(&release_agent_path_lock);
886 if (strlen(root->release_agent_path))
887 seq_show_option(seq, "release_agent",
888 root->release_agent_path);
889 spin_unlock(&release_agent_path_lock);
891 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
892 seq_puts(seq, ",clone_children");
893 if (strlen(root->name))
894 seq_show_option(seq, "name", root->name);
909 static const struct fs_parameter_spec cgroup1_param_specs[] = {
910 fsparam_flag ("all", Opt_all),
911 fsparam_flag ("clone_children", Opt_clone_children),
912 fsparam_flag ("cpuset_v2_mode", Opt_cpuset_v2_mode),
913 fsparam_string("name", Opt_name),
914 fsparam_flag ("none", Opt_none),
915 fsparam_flag ("noprefix", Opt_noprefix),
916 fsparam_string("release_agent", Opt_release_agent),
917 fsparam_flag ("xattr", Opt_xattr),
921 const struct fs_parameter_description cgroup1_fs_parameters = {
923 .specs = cgroup1_param_specs,
926 int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
928 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
929 struct cgroup_subsys *ss;
930 struct fs_parse_result result;
933 opt = fs_parse(fc, &cgroup1_fs_parameters, param, &result);
934 if (opt == -ENOPARAM) {
935 if (strcmp(param->key, "source") == 0) {
936 if (param->type != fs_value_is_string)
937 return invalf(fc, "Non-string source");
939 return invalf(fc, "Multiple sources not supported");
940 fc->source = param->string;
941 param->string = NULL;
944 for_each_subsys(ss, i) {
945 if (strcmp(param->key, ss->legacy_name))
947 if (!cgroup_ssid_enabled(i) || cgroup1_ssid_disabled(i))
948 return invalf(fc, "Disabled controller '%s'",
950 ctx->subsys_mask |= (1 << i);
953 return cg_invalf(fc, "cgroup1: Unknown subsys name '%s'", param->key);
960 /* Explicitly have no subsystems */
967 ctx->flags |= CGRP_ROOT_NOPREFIX;
969 case Opt_clone_children:
970 ctx->cpuset_clone_children = true;
972 case Opt_cpuset_v2_mode:
973 ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE;
976 ctx->flags |= CGRP_ROOT_XATTR;
978 case Opt_release_agent:
979 /* Specifying two release agents is forbidden */
980 if (ctx->release_agent)
981 return cg_invalf(fc, "cgroup1: release_agent respecified");
983 * Release agent gets called with all capabilities,
984 * require capabilities to set release agent.
986 if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN))
987 return cg_invalf(fc, "cgroup1: Setting release_agent not allowed");
988 ctx->release_agent = param->string;
989 param->string = NULL;
992 /* blocked by boot param? */
993 if (cgroup_no_v1_named)
995 /* Can't specify an empty name */
997 return cg_invalf(fc, "cgroup1: Empty name");
998 if (param->size > MAX_CGROUP_ROOT_NAMELEN - 1)
999 return cg_invalf(fc, "cgroup1: Name too long");
1000 /* Must match [\w.-]+ */
1001 for (i = 0; i < param->size; i++) {
1002 char c = param->string[i];
1005 if ((c == '.') || (c == '-') || (c == '_'))
1007 return cg_invalf(fc, "cgroup1: Invalid name");
1009 /* Specifying two names is forbidden */
1011 return cg_invalf(fc, "cgroup1: name respecified");
1012 ctx->name = param->string;
1013 param->string = NULL;
1019 static int check_cgroupfs_options(struct fs_context *fc)
1021 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1024 struct cgroup_subsys *ss;
1027 #ifdef CONFIG_CPUSETS
1028 mask = ~((u16)1 << cpuset_cgrp_id);
1030 for_each_subsys(ss, i)
1031 if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
1034 ctx->subsys_mask &= enabled;
1037 * In absense of 'none', 'name=' or subsystem name options,
1038 * let's default to 'all'.
1040 if (!ctx->subsys_mask && !ctx->none && !ctx->name)
1044 /* Mutually exclusive option 'all' + subsystem name */
1045 if (ctx->subsys_mask)
1046 return cg_invalf(fc, "cgroup1: subsys name conflicts with all");
1047 /* 'all' => select all the subsystems */
1048 ctx->subsys_mask = enabled;
1052 * We either have to specify by name or by subsystems. (So all
1053 * empty hierarchies must have a name).
1055 if (!ctx->subsys_mask && !ctx->name)
1056 return cg_invalf(fc, "cgroup1: Need name or subsystem set");
1059 * Option noprefix was introduced just for backward compatibility
1060 * with the old cpuset, so we allow noprefix only if mounting just
1061 * the cpuset subsystem.
1063 if ((ctx->flags & CGRP_ROOT_NOPREFIX) && (ctx->subsys_mask & mask))
1064 return cg_invalf(fc, "cgroup1: noprefix used incorrectly");
1066 /* Can't specify "none" and some subsystems */
1067 if (ctx->subsys_mask && ctx->none)
1068 return cg_invalf(fc, "cgroup1: none used incorrectly");
1073 int cgroup1_reconfigure(struct fs_context *fc)
1075 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1076 struct kernfs_root *kf_root = kernfs_root_from_sb(fc->root->d_sb);
1077 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1079 u16 added_mask, removed_mask;
1081 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1083 /* See what subsystems are wanted */
1084 ret = check_cgroupfs_options(fc);
1088 if (ctx->subsys_mask != root->subsys_mask || ctx->release_agent)
1089 pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1090 task_tgid_nr(current), current->comm);
1092 added_mask = ctx->subsys_mask & ~root->subsys_mask;
1093 removed_mask = root->subsys_mask & ~ctx->subsys_mask;
1095 /* Don't allow flags or name to change at remount */
1096 if ((ctx->flags ^ root->flags) ||
1097 (ctx->name && strcmp(ctx->name, root->name))) {
1098 cg_invalf(fc, "option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"",
1099 ctx->flags, ctx->name ?: "", root->flags, root->name);
1104 /* remounting is not allowed for populated hierarchies */
1105 if (!list_empty(&root->cgrp.self.children)) {
1110 ret = rebind_subsystems(root, added_mask);
1114 WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1116 if (ctx->release_agent) {
1117 spin_lock(&release_agent_path_lock);
1118 strcpy(root->release_agent_path, ctx->release_agent);
1119 spin_unlock(&release_agent_path_lock);
1122 trace_cgroup_remount(root);
1125 mutex_unlock(&cgroup_mutex);
1129 struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1130 .rename = cgroup1_rename,
1131 .show_options = cgroup1_show_options,
1132 .mkdir = cgroup_mkdir,
1133 .rmdir = cgroup_rmdir,
1134 .show_path = cgroup_show_path,
1138 * The guts of cgroup1 mount - find or create cgroup_root to use.
1139 * Called with cgroup_mutex held; returns 0 on success, -E... on
1140 * error and positive - in case when the candidate is busy dying.
1141 * On success it stashes a reference to cgroup_root into given
1142 * cgroup_fs_context; that reference is *NOT* counting towards the
1143 * cgroup_root refcount.
1145 static int cgroup1_root_to_use(struct fs_context *fc)
1147 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1148 struct cgroup_root *root;
1149 struct cgroup_subsys *ss;
1152 /* First find the desired set of subsystems */
1153 ret = check_cgroupfs_options(fc);
1158 * Destruction of cgroup root is asynchronous, so subsystems may
1159 * still be dying after the previous unmount. Let's drain the
1160 * dying subsystems. We just need to ensure that the ones
1161 * unmounted previously finish dying and don't care about new ones
1162 * starting. Testing ref liveliness is good enough.
1164 for_each_subsys(ss, i) {
1165 if (!(ctx->subsys_mask & (1 << i)) ||
1166 ss->root == &cgrp_dfl_root)
1169 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt))
1170 return 1; /* restart */
1171 cgroup_put(&ss->root->cgrp);
1174 for_each_root(root) {
1175 bool name_match = false;
1177 if (root == &cgrp_dfl_root)
1181 * If we asked for a name then it must match. Also, if
1182 * name matches but sybsys_mask doesn't, we should fail.
1183 * Remember whether name matched.
1186 if (strcmp(ctx->name, root->name))
1192 * If we asked for subsystems (or explicitly for no
1193 * subsystems) then they must match.
1195 if ((ctx->subsys_mask || ctx->none) &&
1196 (ctx->subsys_mask != root->subsys_mask)) {
1202 if (root->flags ^ ctx->flags)
1203 pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1210 * No such thing, create a new one. name= matching without subsys
1211 * specification is allowed for already existing hierarchies but we
1212 * can't create new one without subsys specification.
1214 if (!ctx->subsys_mask && !ctx->none)
1215 return cg_invalf(fc, "cgroup1: No subsys list or none specified");
1217 /* Hierarchies may only be created in the initial cgroup namespace. */
1218 if (ctx->ns != &init_cgroup_ns)
1221 root = kzalloc(sizeof(*root), GFP_KERNEL);
1226 init_cgroup_root(ctx);
1228 ret = cgroup_setup_root(root, ctx->subsys_mask);
1230 cgroup_free_root(root);
1234 int cgroup1_get_tree(struct fs_context *fc)
1236 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1239 /* Check if the caller has permission to mount. */
1240 if (!ns_capable(ctx->ns->user_ns, CAP_SYS_ADMIN))
1243 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1245 ret = cgroup1_root_to_use(fc);
1246 if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt))
1247 ret = 1; /* restart */
1249 mutex_unlock(&cgroup_mutex);
1252 ret = cgroup_do_get_tree(fc);
1254 if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
1259 if (unlikely(ret > 0)) {
1261 return restart_syscall();
1266 static int __init cgroup1_wq_init(void)
1269 * Used to destroy pidlists and separate to serve as flush domain.
1270 * Cap @max_active to 1 too.
1272 cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
1274 BUG_ON(!cgroup_pidlist_destroy_wq);
1277 core_initcall(cgroup1_wq_init);
1279 static int __init cgroup_no_v1(char *str)
1281 struct cgroup_subsys *ss;
1285 while ((token = strsep(&str, ",")) != NULL) {
1289 if (!strcmp(token, "all")) {
1290 cgroup_no_v1_mask = U16_MAX;
1294 if (!strcmp(token, "named")) {
1295 cgroup_no_v1_named = true;
1299 for_each_subsys(ss, i) {
1300 if (strcmp(token, ss->name) &&
1301 strcmp(token, ss->legacy_name))
1304 cgroup_no_v1_mask |= 1 << i;
1309 __setup("cgroup_no_v1=", cgroup_no_v1);