1 #include "cgroup-internal.h"
3 #include <linux/ctype.h>
4 #include <linux/kmod.h>
5 #include <linux/sort.h>
6 #include <linux/delay.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/task.h>
10 #include <linux/magic.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 #include <linux/delayacct.h>
14 #include <linux/pid_namespace.h>
15 #include <linux/cgroupstats.h>
17 #include <trace/events/cgroup.h>
20 * pidlists linger the following amount before being destroyed. The goal
21 * is avoiding frequent destruction in the middle of consecutive read calls
22 * Expiring in the middle is a performance problem not a correctness one.
23 * 1 sec should be enough.
25 #define CGROUP_PIDLIST_DESTROY_DELAY HZ
27 /* Controllers blocked by the commandline in v1 */
28 static u16 cgroup_no_v1_mask;
31 * pidlist destructions need to be flushed on cgroup destruction. Use a
32 * separate workqueue as flush domain.
34 static struct workqueue_struct *cgroup_pidlist_destroy_wq;
37 * Protects cgroup_subsys->release_agent_path. Modifying it also requires
38 * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock.
40 static DEFINE_SPINLOCK(release_agent_path_lock);
42 bool cgroup1_ssid_disabled(int ssid)
44 return cgroup_no_v1_mask & (1 << ssid);
48 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
49 * @from: attach to all cgroups of a given task
50 * @tsk: the task to be attached
52 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
54 struct cgroup_root *root;
57 mutex_lock(&cgroup_mutex);
58 percpu_down_write(&cgroup_threadgroup_rwsem);
60 struct cgroup *from_cgrp;
62 if (root == &cgrp_dfl_root)
65 spin_lock_irq(&css_set_lock);
66 from_cgrp = task_cgroup_from_root(from, root);
67 spin_unlock_irq(&css_set_lock);
69 retval = cgroup_attach_task(from_cgrp, tsk, false);
73 percpu_up_write(&cgroup_threadgroup_rwsem);
74 mutex_unlock(&cgroup_mutex);
78 EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
81 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
82 * @to: cgroup to which the tasks will be moved
83 * @from: cgroup in which the tasks currently reside
85 * Locking rules between cgroup_post_fork() and the migration path
86 * guarantee that, if a task is forking while being migrated, the new child
87 * is guaranteed to be either visible in the source cgroup after the
88 * parent's migration is complete or put into the target cgroup. No task
89 * can slip out of migration through forking.
91 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
93 DEFINE_CGROUP_MGCTX(mgctx);
94 struct cgrp_cset_link *link;
95 struct css_task_iter it;
96 struct task_struct *task;
99 if (cgroup_on_dfl(to))
102 ret = cgroup_migrate_vet_dst(to);
106 mutex_lock(&cgroup_mutex);
108 percpu_down_write(&cgroup_threadgroup_rwsem);
110 /* all tasks in @from are being moved, all csets are source */
111 spin_lock_irq(&css_set_lock);
112 list_for_each_entry(link, &from->cset_links, cset_link)
113 cgroup_migrate_add_src(link->cset, to, &mgctx);
114 spin_unlock_irq(&css_set_lock);
116 ret = cgroup_migrate_prepare_dst(&mgctx);
121 * Migrate tasks one-by-one until @from is empty. This fails iff
122 * ->can_attach() fails.
125 css_task_iter_start(&from->self, 0, &it);
128 task = css_task_iter_next(&it);
129 } while (task && (task->flags & PF_EXITING));
132 get_task_struct(task);
133 css_task_iter_end(&it);
136 ret = cgroup_migrate(task, false, &mgctx);
138 TRACE_CGROUP_PATH(transfer_tasks, to, task, false);
139 put_task_struct(task);
141 } while (task && !ret);
143 cgroup_migrate_finish(&mgctx);
144 percpu_up_write(&cgroup_threadgroup_rwsem);
145 mutex_unlock(&cgroup_mutex);
150 * Stuff for reading the 'tasks'/'procs' files.
152 * Reading this file can return large amounts of data if a cgroup has
153 * *lots* of attached tasks. So it may need several calls to read(),
154 * but we cannot guarantee that the information we produce is correct
155 * unless we produce it entirely atomically.
159 /* which pidlist file are we talking about? */
160 enum cgroup_filetype {
166 * A pidlist is a list of pids that virtually represents the contents of one
167 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
168 * a pair (one each for procs, tasks) for each pid namespace that's relevant
171 struct cgroup_pidlist {
173 * used to find which pidlist is wanted. doesn't change as long as
174 * this particular list stays in the list.
176 struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
179 /* how many elements the above list has */
181 /* each of these stored in a list by its cgroup */
182 struct list_head links;
183 /* pointer to the cgroup we belong to, for list removal purposes */
184 struct cgroup *owner;
185 /* for delayed destruction */
186 struct delayed_work destroy_dwork;
190 * The following two functions "fix" the issue where there are more pids
191 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
192 * TODO: replace with a kernel-wide solution to this problem
194 #define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
195 static void *pidlist_allocate(int count)
197 if (PIDLIST_TOO_LARGE(count))
198 return vmalloc(array_size(count, sizeof(pid_t)));
200 return kmalloc_array(count, sizeof(pid_t), GFP_KERNEL);
203 static void pidlist_free(void *p)
209 * Used to destroy all pidlists lingering waiting for destroy timer. None
210 * should be left afterwards.
212 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
214 struct cgroup_pidlist *l, *tmp_l;
216 mutex_lock(&cgrp->pidlist_mutex);
217 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
218 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
219 mutex_unlock(&cgrp->pidlist_mutex);
221 flush_workqueue(cgroup_pidlist_destroy_wq);
222 BUG_ON(!list_empty(&cgrp->pidlists));
225 static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
227 struct delayed_work *dwork = to_delayed_work(work);
228 struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
230 struct cgroup_pidlist *tofree = NULL;
232 mutex_lock(&l->owner->pidlist_mutex);
235 * Destroy iff we didn't get queued again. The state won't change
236 * as destroy_dwork can only be queued while locked.
238 if (!delayed_work_pending(dwork)) {
240 pidlist_free(l->list);
241 put_pid_ns(l->key.ns);
245 mutex_unlock(&l->owner->pidlist_mutex);
250 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
251 * Returns the number of unique elements.
253 static int pidlist_uniq(pid_t *list, int length)
258 * we presume the 0th element is unique, so i starts at 1. trivial
259 * edge cases first; no work needs to be done for either
261 if (length == 0 || length == 1)
263 /* src and dest walk down the list; dest counts unique elements */
264 for (src = 1; src < length; src++) {
265 /* find next unique element */
266 while (list[src] == list[src-1]) {
271 /* dest always points to where the next unique element goes */
272 list[dest] = list[src];
280 * The two pid files - task and cgroup.procs - guaranteed that the result
281 * is sorted, which forced this whole pidlist fiasco. As pid order is
282 * different per namespace, each namespace needs differently sorted list,
283 * making it impossible to use, for example, single rbtree of member tasks
284 * sorted by task pointer. As pidlists can be fairly large, allocating one
285 * per open file is dangerous, so cgroup had to implement shared pool of
286 * pidlists keyed by cgroup and namespace.
288 static int cmppid(const void *a, const void *b)
290 return *(pid_t *)a - *(pid_t *)b;
293 static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
294 enum cgroup_filetype type)
296 struct cgroup_pidlist *l;
297 /* don't need task_nsproxy() if we're looking at ourself */
298 struct pid_namespace *ns = task_active_pid_ns(current);
300 lockdep_assert_held(&cgrp->pidlist_mutex);
302 list_for_each_entry(l, &cgrp->pidlists, links)
303 if (l->key.type == type && l->key.ns == ns)
309 * find the appropriate pidlist for our purpose (given procs vs tasks)
310 * returns with the lock on that pidlist already held, and takes care
311 * of the use count, or returns NULL with no locks held if we're out of
314 static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
315 enum cgroup_filetype type)
317 struct cgroup_pidlist *l;
319 lockdep_assert_held(&cgrp->pidlist_mutex);
321 l = cgroup_pidlist_find(cgrp, type);
325 /* entry not found; create a new one */
326 l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
330 INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
332 /* don't need task_nsproxy() if we're looking at ourself */
333 l->key.ns = get_pid_ns(task_active_pid_ns(current));
335 list_add(&l->links, &cgrp->pidlists);
340 * cgroup_task_count - count the number of tasks in a cgroup.
341 * @cgrp: the cgroup in question
343 int cgroup_task_count(const struct cgroup *cgrp)
346 struct cgrp_cset_link *link;
348 spin_lock_irq(&css_set_lock);
349 list_for_each_entry(link, &cgrp->cset_links, cset_link)
350 count += link->cset->nr_tasks;
351 spin_unlock_irq(&css_set_lock);
356 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
358 static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
359 struct cgroup_pidlist **lp)
363 int pid, n = 0; /* used for populating the array */
364 struct css_task_iter it;
365 struct task_struct *tsk;
366 struct cgroup_pidlist *l;
368 lockdep_assert_held(&cgrp->pidlist_mutex);
371 * If cgroup gets more users after we read count, we won't have
372 * enough space - tough. This race is indistinguishable to the
373 * caller from the case that the additional cgroup users didn't
374 * show up until sometime later on.
376 length = cgroup_task_count(cgrp);
377 array = pidlist_allocate(length);
380 /* now, populate the array */
381 css_task_iter_start(&cgrp->self, 0, &it);
382 while ((tsk = css_task_iter_next(&it))) {
383 if (unlikely(n == length))
385 /* get tgid or pid for procs or tasks file respectively */
386 if (type == CGROUP_FILE_PROCS)
387 pid = task_tgid_vnr(tsk);
389 pid = task_pid_vnr(tsk);
390 if (pid > 0) /* make sure to only use valid results */
393 css_task_iter_end(&it);
395 /* now sort & (if procs) strip out duplicates */
396 sort(array, length, sizeof(pid_t), cmppid, NULL);
397 if (type == CGROUP_FILE_PROCS)
398 length = pidlist_uniq(array, length);
400 l = cgroup_pidlist_find_create(cgrp, type);
406 /* store array, freeing old if necessary */
407 pidlist_free(l->list);
415 * seq_file methods for the tasks/procs files. The seq_file position is the
416 * next pid to display; the seq_file iterator is a pointer to the pid
417 * in the cgroup->l->list array.
420 static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
423 * Initially we receive a position value that corresponds to
424 * one more than the last pid shown (or 0 on the first call or
425 * after a seek to the start). Use a binary-search to find the
426 * next pid to display, if any
428 struct kernfs_open_file *of = s->private;
429 struct cgroup_file_ctx *ctx = of->priv;
430 struct cgroup *cgrp = seq_css(s)->cgroup;
431 struct cgroup_pidlist *l;
432 enum cgroup_filetype type = seq_cft(s)->private;
433 int index = 0, pid = *pos;
436 mutex_lock(&cgrp->pidlist_mutex);
439 * !NULL @ctx->procs1.pidlist indicates that this isn't the first
440 * start() after open. If the matching pidlist is around, we can use
441 * that. Look for it. Note that @ctx->procs1.pidlist can't be used
442 * directly. It could already have been destroyed.
444 if (ctx->procs1.pidlist)
445 ctx->procs1.pidlist = cgroup_pidlist_find(cgrp, type);
448 * Either this is the first start() after open or the matching
449 * pidlist has been destroyed inbetween. Create a new one.
451 if (!ctx->procs1.pidlist) {
452 ret = pidlist_array_load(cgrp, type, &ctx->procs1.pidlist);
456 l = ctx->procs1.pidlist;
461 while (index < end) {
462 int mid = (index + end) / 2;
463 if (l->list[mid] == pid) {
466 } else if (l->list[mid] <= pid)
472 /* If we're off the end of the array, we're done */
473 if (index >= l->length)
475 /* Update the abstract position to be the actual pid that we found */
476 iter = l->list + index;
481 static void cgroup_pidlist_stop(struct seq_file *s, void *v)
483 struct kernfs_open_file *of = s->private;
484 struct cgroup_file_ctx *ctx = of->priv;
485 struct cgroup_pidlist *l = ctx->procs1.pidlist;
488 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
489 CGROUP_PIDLIST_DESTROY_DELAY);
490 mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
493 static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
495 struct kernfs_open_file *of = s->private;
496 struct cgroup_file_ctx *ctx = of->priv;
497 struct cgroup_pidlist *l = ctx->procs1.pidlist;
499 pid_t *end = l->list + l->length;
501 * Advance to the next pid in the array. If this goes off the
514 static int cgroup_pidlist_show(struct seq_file *s, void *v)
516 seq_printf(s, "%d\n", *(int *)v);
521 static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
522 char *buf, size_t nbytes, loff_t off,
526 struct task_struct *task;
527 const struct cred *cred, *tcred;
530 cgrp = cgroup_kn_lock_live(of->kn, false);
534 task = cgroup_procs_write_start(buf, threadgroup);
535 ret = PTR_ERR_OR_ZERO(task);
540 * Even if we're attaching all tasks in the thread group, we only need
541 * to check permissions on one of them. Check permissions using the
542 * credentials from file open to protect against inherited fd attacks.
544 cred = of->file->f_cred;
545 tcred = get_task_cred(task);
546 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
547 !uid_eq(cred->euid, tcred->uid) &&
548 !uid_eq(cred->euid, tcred->suid))
554 ret = cgroup_attach_task(cgrp, task, threadgroup);
557 cgroup_procs_write_finish(task);
559 cgroup_kn_unlock(of->kn);
561 return ret ?: nbytes;
564 static ssize_t cgroup1_procs_write(struct kernfs_open_file *of,
565 char *buf, size_t nbytes, loff_t off)
567 return __cgroup1_procs_write(of, buf, nbytes, off, true);
570 static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of,
571 char *buf, size_t nbytes, loff_t off)
573 return __cgroup1_procs_write(of, buf, nbytes, off, false);
576 static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
577 char *buf, size_t nbytes, loff_t off)
581 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
584 * Release agent gets called with all capabilities,
585 * require capabilities to set release agent.
587 if ((of->file->f_cred->user_ns != &init_user_ns) ||
588 !capable(CAP_SYS_ADMIN))
591 cgrp = cgroup_kn_lock_live(of->kn, false);
594 spin_lock(&release_agent_path_lock);
595 strlcpy(cgrp->root->release_agent_path, strstrip(buf),
596 sizeof(cgrp->root->release_agent_path));
597 spin_unlock(&release_agent_path_lock);
598 cgroup_kn_unlock(of->kn);
602 static int cgroup_release_agent_show(struct seq_file *seq, void *v)
604 struct cgroup *cgrp = seq_css(seq)->cgroup;
606 spin_lock(&release_agent_path_lock);
607 seq_puts(seq, cgrp->root->release_agent_path);
608 spin_unlock(&release_agent_path_lock);
613 static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
615 seq_puts(seq, "0\n");
619 static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
622 return notify_on_release(css->cgroup);
625 static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
626 struct cftype *cft, u64 val)
629 set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
631 clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
635 static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
638 return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
641 static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
642 struct cftype *cft, u64 val)
645 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
647 clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
651 /* cgroup core interface files for the legacy hierarchies */
652 struct cftype cgroup1_base_files[] = {
654 .name = "cgroup.procs",
655 .seq_start = cgroup_pidlist_start,
656 .seq_next = cgroup_pidlist_next,
657 .seq_stop = cgroup_pidlist_stop,
658 .seq_show = cgroup_pidlist_show,
659 .private = CGROUP_FILE_PROCS,
660 .write = cgroup1_procs_write,
663 .name = "cgroup.clone_children",
664 .read_u64 = cgroup_clone_children_read,
665 .write_u64 = cgroup_clone_children_write,
668 .name = "cgroup.sane_behavior",
669 .flags = CFTYPE_ONLY_ON_ROOT,
670 .seq_show = cgroup_sane_behavior_show,
674 .seq_start = cgroup_pidlist_start,
675 .seq_next = cgroup_pidlist_next,
676 .seq_stop = cgroup_pidlist_stop,
677 .seq_show = cgroup_pidlist_show,
678 .private = CGROUP_FILE_TASKS,
679 .write = cgroup1_tasks_write,
682 .name = "notify_on_release",
683 .read_u64 = cgroup_read_notify_on_release,
684 .write_u64 = cgroup_write_notify_on_release,
687 .name = "release_agent",
688 .flags = CFTYPE_ONLY_ON_ROOT,
689 .seq_show = cgroup_release_agent_show,
690 .write = cgroup_release_agent_write,
691 .max_write_len = PATH_MAX - 1,
696 /* Display information about each subsystem and each hierarchy */
697 int proc_cgroupstats_show(struct seq_file *m, void *v)
699 struct cgroup_subsys *ss;
702 seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
704 * ideally we don't want subsystems moving around while we do this.
705 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
706 * subsys/hierarchy state.
708 mutex_lock(&cgroup_mutex);
710 for_each_subsys(ss, i)
711 seq_printf(m, "%s\t%d\t%d\t%d\n",
712 ss->legacy_name, ss->root->hierarchy_id,
713 atomic_read(&ss->root->nr_cgrps),
714 cgroup_ssid_enabled(i));
716 mutex_unlock(&cgroup_mutex);
721 * cgroupstats_build - build and fill cgroupstats
722 * @stats: cgroupstats to fill information into
723 * @dentry: A dentry entry belonging to the cgroup for which stats have
726 * Build and fill cgroupstats so that taskstats can export it to user
729 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
731 struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
733 struct css_task_iter it;
734 struct task_struct *tsk;
736 /* it should be kernfs_node belonging to cgroupfs and is a directory */
737 if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
738 kernfs_type(kn) != KERNFS_DIR)
741 mutex_lock(&cgroup_mutex);
744 * We aren't being called from kernfs and there's no guarantee on
745 * @kn->priv's validity. For this and css_tryget_online_from_dir(),
746 * @kn->priv is RCU safe. Let's do the RCU dancing.
749 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
750 if (!cgrp || cgroup_is_dead(cgrp)) {
752 mutex_unlock(&cgroup_mutex);
757 css_task_iter_start(&cgrp->self, 0, &it);
758 while ((tsk = css_task_iter_next(&it))) {
759 switch (tsk->state) {
763 case TASK_INTERRUPTIBLE:
764 stats->nr_sleeping++;
766 case TASK_UNINTERRUPTIBLE:
767 stats->nr_uninterruptible++;
773 if (delayacct_is_task_waiting_on_io(tsk))
778 css_task_iter_end(&it);
780 mutex_unlock(&cgroup_mutex);
784 void cgroup1_check_for_release(struct cgroup *cgrp)
786 if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
787 !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
788 schedule_work(&cgrp->release_agent_work);
792 * Notify userspace when a cgroup is released, by running the
793 * configured release agent with the name of the cgroup (path
794 * relative to the root of cgroup file system) as the argument.
796 * Most likely, this user command will try to rmdir this cgroup.
798 * This races with the possibility that some other task will be
799 * attached to this cgroup before it is removed, or that some other
800 * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
801 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
802 * unused, and this cgroup will be reprieved from its death sentence,
803 * to continue to serve a useful existence. Next time it's released,
804 * we will get notified again, if it still has 'notify_on_release' set.
806 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
807 * means only wait until the task is successfully execve()'d. The
808 * separate release agent task is forked by call_usermodehelper(),
809 * then control in this thread returns here, without waiting for the
810 * release agent task. We don't bother to wait because the caller of
811 * this routine has no use for the exit status of the release agent
812 * task, so no sense holding our caller up for that.
814 void cgroup1_release_agent(struct work_struct *work)
816 struct cgroup *cgrp =
817 container_of(work, struct cgroup, release_agent_work);
818 char *pathbuf = NULL, *agentbuf = NULL;
819 char *argv[3], *envp[3];
822 mutex_lock(&cgroup_mutex);
824 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
825 agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
826 if (!pathbuf || !agentbuf || !strlen(agentbuf))
829 spin_lock_irq(&css_set_lock);
830 ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
831 spin_unlock_irq(&css_set_lock);
832 if (ret < 0 || ret >= PATH_MAX)
839 /* minimal command environment */
841 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
844 mutex_unlock(&cgroup_mutex);
845 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
848 mutex_unlock(&cgroup_mutex);
855 * cgroup_rename - Only allow simple rename of directories in place.
857 static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
858 const char *new_name_str)
860 struct cgroup *cgrp = kn->priv;
863 /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
864 if (strchr(new_name_str, '\n'))
867 if (kernfs_type(kn) != KERNFS_DIR)
869 if (kn->parent != new_parent)
873 * We're gonna grab cgroup_mutex which nests outside kernfs
874 * active_ref. kernfs_rename() doesn't require active_ref
875 * protection. Break them before grabbing cgroup_mutex.
877 kernfs_break_active_protection(new_parent);
878 kernfs_break_active_protection(kn);
880 mutex_lock(&cgroup_mutex);
882 ret = kernfs_rename(kn, new_parent, new_name_str);
884 TRACE_CGROUP_PATH(rename, cgrp);
886 mutex_unlock(&cgroup_mutex);
888 kernfs_unbreak_active_protection(kn);
889 kernfs_unbreak_active_protection(new_parent);
893 static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
895 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
896 struct cgroup_subsys *ss;
899 for_each_subsys(ss, ssid)
900 if (root->subsys_mask & (1 << ssid))
901 seq_show_option(seq, ss->legacy_name, NULL);
902 if (root->flags & CGRP_ROOT_NOPREFIX)
903 seq_puts(seq, ",noprefix");
904 if (root->flags & CGRP_ROOT_XATTR)
905 seq_puts(seq, ",xattr");
906 if (root->flags & CGRP_ROOT_CPUSET_V2_MODE)
907 seq_puts(seq, ",cpuset_v2_mode");
909 spin_lock(&release_agent_path_lock);
910 if (strlen(root->release_agent_path))
911 seq_show_option(seq, "release_agent",
912 root->release_agent_path);
913 spin_unlock(&release_agent_path_lock);
915 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
916 seq_puts(seq, ",clone_children");
917 if (strlen(root->name))
918 seq_show_option(seq, "name", root->name);
922 static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
924 char *token, *o = data;
925 bool all_ss = false, one_ss = false;
927 struct cgroup_subsys *ss;
931 #ifdef CONFIG_CPUSETS
932 mask = ~((u16)1 << cpuset_cgrp_id);
935 memset(opts, 0, sizeof(*opts));
937 while ((token = strsep(&o, ",")) != NULL) {
942 if (!strcmp(token, "none")) {
943 /* Explicitly have no subsystems */
947 if (!strcmp(token, "all")) {
948 /* Mutually exclusive option 'all' + subsystem name */
954 if (!strcmp(token, "noprefix")) {
955 opts->flags |= CGRP_ROOT_NOPREFIX;
958 if (!strcmp(token, "clone_children")) {
959 opts->cpuset_clone_children = true;
962 if (!strcmp(token, "cpuset_v2_mode")) {
963 opts->flags |= CGRP_ROOT_CPUSET_V2_MODE;
966 if (!strcmp(token, "xattr")) {
967 opts->flags |= CGRP_ROOT_XATTR;
970 if (!strncmp(token, "release_agent=", 14)) {
971 /* Specifying two release agents is forbidden */
972 if (opts->release_agent)
974 opts->release_agent =
975 kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
976 if (!opts->release_agent)
980 if (!strncmp(token, "name=", 5)) {
981 const char *name = token + 5;
982 /* Can't specify an empty name */
985 /* Must match [\w.-]+ */
986 for (i = 0; i < strlen(name); i++) {
990 if ((c == '.') || (c == '-') || (c == '_'))
994 /* Specifying two names is forbidden */
997 opts->name = kstrndup(name,
998 MAX_CGROUP_ROOT_NAMELEN - 1,
1006 for_each_subsys(ss, i) {
1007 if (strcmp(token, ss->legacy_name))
1009 if (!cgroup_ssid_enabled(i))
1011 if (cgroup1_ssid_disabled(i))
1014 /* Mutually exclusive option 'all' + subsystem name */
1017 opts->subsys_mask |= (1 << i);
1022 if (i == CGROUP_SUBSYS_COUNT)
1027 * If the 'all' option was specified select all the subsystems,
1028 * otherwise if 'none', 'name=' and a subsystem name options were
1029 * not specified, let's default to 'all'
1031 if (all_ss || (!one_ss && !opts->none && !opts->name))
1032 for_each_subsys(ss, i)
1033 if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
1034 opts->subsys_mask |= (1 << i);
1037 * We either have to specify by name or by subsystems. (So all
1038 * empty hierarchies must have a name).
1040 if (!opts->subsys_mask && !opts->name)
1044 * Option noprefix was introduced just for backward compatibility
1045 * with the old cpuset, so we allow noprefix only if mounting just
1046 * the cpuset subsystem.
1048 if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
1051 /* Can't specify "none" and some subsystems */
1052 if (opts->subsys_mask && opts->none)
1058 static int cgroup1_remount(struct kernfs_root *kf_root, int *flags, char *data)
1061 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1062 struct cgroup_namespace *ns = current->nsproxy->cgroup_ns;
1063 struct cgroup_sb_opts opts;
1064 u16 added_mask, removed_mask;
1066 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1068 /* See what subsystems are wanted */
1069 ret = parse_cgroupfs_options(data, &opts);
1073 if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
1074 pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1075 task_tgid_nr(current), current->comm);
1076 /* See cgroup1_mount release_agent handling */
1077 if (opts.release_agent &&
1078 ((ns->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN))) {
1083 added_mask = opts.subsys_mask & ~root->subsys_mask;
1084 removed_mask = root->subsys_mask & ~opts.subsys_mask;
1086 /* Don't allow flags or name to change at remount */
1087 if ((opts.flags ^ root->flags) ||
1088 (opts.name && strcmp(opts.name, root->name))) {
1089 pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
1090 opts.flags, opts.name ?: "", root->flags, root->name);
1095 /* remounting is not allowed for populated hierarchies */
1096 if (!list_empty(&root->cgrp.self.children)) {
1101 ret = rebind_subsystems(root, added_mask);
1105 WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1107 if (opts.release_agent) {
1108 spin_lock(&release_agent_path_lock);
1109 strcpy(root->release_agent_path, opts.release_agent);
1110 spin_unlock(&release_agent_path_lock);
1113 trace_cgroup_remount(root);
1116 kfree(opts.release_agent);
1118 mutex_unlock(&cgroup_mutex);
1122 struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1123 .rename = cgroup1_rename,
1124 .show_options = cgroup1_show_options,
1125 .remount_fs = cgroup1_remount,
1126 .mkdir = cgroup_mkdir,
1127 .rmdir = cgroup_rmdir,
1128 .show_path = cgroup_show_path,
1131 struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
1132 void *data, unsigned long magic,
1133 struct cgroup_namespace *ns)
1135 struct super_block *pinned_sb = NULL;
1136 struct cgroup_sb_opts opts;
1137 struct cgroup_root *root;
1138 struct cgroup_subsys *ss;
1139 struct dentry *dentry;
1141 bool new_root = false;
1143 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1145 /* First find the desired set of subsystems */
1146 ret = parse_cgroupfs_options(data, &opts);
1151 * Destruction of cgroup root is asynchronous, so subsystems may
1152 * still be dying after the previous unmount. Let's drain the
1153 * dying subsystems. We just need to ensure that the ones
1154 * unmounted previously finish dying and don't care about new ones
1155 * starting. Testing ref liveliness is good enough.
1157 for_each_subsys(ss, i) {
1158 if (!(opts.subsys_mask & (1 << i)) ||
1159 ss->root == &cgrp_dfl_root)
1162 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
1163 mutex_unlock(&cgroup_mutex);
1165 ret = restart_syscall();
1168 cgroup_put(&ss->root->cgrp);
1171 for_each_root(root) {
1172 bool name_match = false;
1174 if (root == &cgrp_dfl_root)
1178 * If we asked for a name then it must match. Also, if
1179 * name matches but sybsys_mask doesn't, we should fail.
1180 * Remember whether name matched.
1183 if (strcmp(opts.name, root->name))
1189 * If we asked for subsystems (or explicitly for no
1190 * subsystems) then they must match.
1192 if ((opts.subsys_mask || opts.none) &&
1193 (opts.subsys_mask != root->subsys_mask)) {
1200 if (root->flags ^ opts.flags)
1201 pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1204 * We want to reuse @root whose lifetime is governed by its
1205 * ->cgrp. Let's check whether @root is alive and keep it
1206 * that way. As cgroup_kill_sb() can happen anytime, we
1207 * want to block it by pinning the sb so that @root doesn't
1208 * get killed before mount is complete.
1210 * With the sb pinned, tryget_live can reliably indicate
1211 * whether @root can be reused. If it's being killed,
1212 * drain it. We can use wait_queue for the wait but this
1213 * path is super cold. Let's just sleep a bit and retry.
1215 pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
1216 if (IS_ERR(pinned_sb) ||
1217 !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
1218 mutex_unlock(&cgroup_mutex);
1219 if (!IS_ERR_OR_NULL(pinned_sb))
1220 deactivate_super(pinned_sb);
1222 ret = restart_syscall();
1231 * No such thing, create a new one. name= matching without subsys
1232 * specification is allowed for already existing hierarchies but we
1233 * can't create new one without subsys specification.
1235 if (!opts.subsys_mask && !opts.none) {
1240 /* Hierarchies may only be created in the initial cgroup namespace. */
1241 if (ns != &init_cgroup_ns) {
1246 * Release agent gets called with all capabilities,
1247 * require capabilities to set release agent.
1249 if (opts.release_agent &&
1250 ((ns->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN))) {
1255 root = kzalloc(sizeof(*root), GFP_KERNEL);
1262 init_cgroup_root(root, &opts);
1264 ret = cgroup_setup_root(root, opts.subsys_mask, PERCPU_REF_INIT_DEAD);
1266 cgroup_free_root(root);
1269 mutex_unlock(&cgroup_mutex);
1271 kfree(opts.release_agent);
1275 return ERR_PTR(ret);
1277 dentry = cgroup_do_mount(&cgroup_fs_type, flags, root,
1278 CGROUP_SUPER_MAGIC, ns);
1281 * There's a race window after we release cgroup_mutex and before
1282 * allocating a superblock. Make sure a concurrent process won't
1283 * be able to re-use the root during this window by delaying the
1284 * initialization of root refcnt.
1287 mutex_lock(&cgroup_mutex);
1288 percpu_ref_reinit(&root->cgrp.self.refcnt);
1289 mutex_unlock(&cgroup_mutex);
1293 * If @pinned_sb, we're reusing an existing root and holding an
1294 * extra ref on its sb. Mount is complete. Put the extra ref.
1297 deactivate_super(pinned_sb);
1302 static int __init cgroup1_wq_init(void)
1305 * Used to destroy pidlists and separate to serve as flush domain.
1306 * Cap @max_active to 1 too.
1308 cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
1310 BUG_ON(!cgroup_pidlist_destroy_wq);
1313 core_initcall(cgroup1_wq_init);
1315 static int __init cgroup_no_v1(char *str)
1317 struct cgroup_subsys *ss;
1321 while ((token = strsep(&str, ",")) != NULL) {
1325 if (!strcmp(token, "all")) {
1326 cgroup_no_v1_mask = U16_MAX;
1330 for_each_subsys(ss, i) {
1331 if (strcmp(token, ss->name) &&
1332 strcmp(token, ss->legacy_name))
1335 cgroup_no_v1_mask |= 1 << i;
1340 __setup("cgroup_no_v1=", cgroup_no_v1);