1 /* SPDX-License-Identifier: GPL-2.0 */
4 #include <linux/limits.h>
5 #include <linux/sched.h>
14 #include "../kselftest.h"
15 #include "cgroup_util.h"
21 * A, B and C's "populated" fields would be 1 while D's 0.
22 * test that after the one process in C is moved to root,
23 * A,B and C's "populated" fields would flip to "0" and file
24 * modified events will be generated on the
25 * "cgroup.events" files of both cgroups.
27 static int test_cgcore_populated(const char *root)
30 char *cg_test_a = NULL, *cg_test_b = NULL;
31 char *cg_test_c = NULL, *cg_test_d = NULL;
33 cg_test_a = cg_name(root, "cg_test_a");
34 cg_test_b = cg_name(root, "cg_test_a/cg_test_b");
35 cg_test_c = cg_name(root, "cg_test_a/cg_test_b/cg_test_c");
36 cg_test_d = cg_name(root, "cg_test_a/cg_test_b/cg_test_d");
38 if (!cg_test_a || !cg_test_b || !cg_test_c || !cg_test_d)
41 if (cg_create(cg_test_a))
44 if (cg_create(cg_test_b))
47 if (cg_create(cg_test_c))
50 if (cg_create(cg_test_d))
53 if (cg_enter_current(cg_test_c))
56 if (cg_read_strcmp(cg_test_a, "cgroup.events", "populated 1\n"))
59 if (cg_read_strcmp(cg_test_b, "cgroup.events", "populated 1\n"))
62 if (cg_read_strcmp(cg_test_c, "cgroup.events", "populated 1\n"))
65 if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
68 if (cg_enter_current(root))
71 if (cg_read_strcmp(cg_test_a, "cgroup.events", "populated 0\n"))
74 if (cg_read_strcmp(cg_test_b, "cgroup.events", "populated 0\n"))
77 if (cg_read_strcmp(cg_test_c, "cgroup.events", "populated 0\n"))
80 if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
87 cg_destroy(cg_test_d);
89 cg_destroy(cg_test_c);
91 cg_destroy(cg_test_b);
93 cg_destroy(cg_test_a);
102 * A (domain threaded) - B (threaded) - C (domain)
104 * test that C can't be used until it is turned into a
105 * threaded cgroup. "cgroup.type" file will report "domain (invalid)" in
106 * these cases. Operations which fail due to invalid topology use
107 * EOPNOTSUPP as the errno.
109 static int test_cgcore_invalid_domain(const char *root)
112 char *grandparent = NULL, *parent = NULL, *child = NULL;
114 grandparent = cg_name(root, "cg_test_grandparent");
115 parent = cg_name(root, "cg_test_grandparent/cg_test_parent");
116 child = cg_name(root, "cg_test_grandparent/cg_test_parent/cg_test_child");
117 if (!parent || !child || !grandparent)
120 if (cg_create(grandparent))
123 if (cg_create(parent))
126 if (cg_create(child))
129 if (cg_write(parent, "cgroup.type", "threaded"))
132 if (cg_read_strcmp(child, "cgroup.type", "domain invalid\n"))
135 if (!cg_enter_current(child))
138 if (errno != EOPNOTSUPP)
144 cg_enter_current(root);
150 cg_destroy(grandparent);
158 * Test that when a child becomes threaded
159 * the parent type becomes domain threaded.
161 static int test_cgcore_parent_becomes_threaded(const char *root)
164 char *parent = NULL, *child = NULL;
166 parent = cg_name(root, "cg_test_parent");
167 child = cg_name(root, "cg_test_parent/cg_test_child");
168 if (!parent || !child)
171 if (cg_create(parent))
174 if (cg_create(child))
177 if (cg_write(child, "cgroup.type", "threaded"))
180 if (cg_read_strcmp(parent, "cgroup.type", "domain threaded\n"))
197 * Test that there's no internal process constrain on threaded cgroups.
198 * You can add threads/processes on a parent with a controller enabled.
200 static int test_cgcore_no_internal_process_constraint_on_threads(const char *root)
203 char *parent = NULL, *child = NULL;
205 if (cg_read_strstr(root, "cgroup.controllers", "cpu") ||
206 cg_write(root, "cgroup.subtree_control", "+cpu")) {
211 parent = cg_name(root, "cg_test_parent");
212 child = cg_name(root, "cg_test_parent/cg_test_child");
213 if (!parent || !child)
216 if (cg_create(parent))
219 if (cg_create(child))
222 if (cg_write(parent, "cgroup.type", "threaded"))
225 if (cg_write(child, "cgroup.type", "threaded"))
228 if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
231 if (cg_enter_current(parent))
237 cg_enter_current(root);
238 cg_enter_current(root);
249 * Test that you can't enable a controller on a child if it's not enabled
252 static int test_cgcore_top_down_constraint_enable(const char *root)
255 char *parent = NULL, *child = NULL;
257 parent = cg_name(root, "cg_test_parent");
258 child = cg_name(root, "cg_test_parent/cg_test_child");
259 if (!parent || !child)
262 if (cg_create(parent))
265 if (cg_create(child))
268 if (!cg_write(child, "cgroup.subtree_control", "+memory"))
284 * Test that you can't disable a controller on a parent
285 * if it's enabled in a child.
287 static int test_cgcore_top_down_constraint_disable(const char *root)
290 char *parent = NULL, *child = NULL;
292 parent = cg_name(root, "cg_test_parent");
293 child = cg_name(root, "cg_test_parent/cg_test_child");
294 if (!parent || !child)
297 if (cg_create(parent))
300 if (cg_create(child))
303 if (cg_write(parent, "cgroup.subtree_control", "+memory"))
306 if (cg_write(child, "cgroup.subtree_control", "+memory"))
309 if (!cg_write(parent, "cgroup.subtree_control", "-memory"))
325 * Test internal process constraint.
326 * You can't add a pid to a domain parent if a controller is enabled.
328 static int test_cgcore_internal_process_constraint(const char *root)
331 char *parent = NULL, *child = NULL;
333 parent = cg_name(root, "cg_test_parent");
334 child = cg_name(root, "cg_test_parent/cg_test_child");
335 if (!parent || !child)
338 if (cg_create(parent))
341 if (cg_create(child))
344 if (cg_write(parent, "cgroup.subtree_control", "+memory"))
347 if (!cg_enter_current(parent))
363 * cgroup migration permission check should be performed based on the
364 * credentials at the time of open instead of write.
366 static int test_cgcore_lesser_euid_open(const char *root)
368 const uid_t test_euid = 65534; /* usually nobody, any !root is fine */
370 char *cg_test_a = NULL, *cg_test_b = NULL;
371 char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL;
372 int cg_test_b_procs_fd = -1;
375 cg_test_a = cg_name(root, "cg_test_a");
376 cg_test_b = cg_name(root, "cg_test_b");
378 if (!cg_test_a || !cg_test_b)
381 cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs");
382 cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs");
384 if (!cg_test_a_procs || !cg_test_b_procs)
387 if (cg_create(cg_test_a) || cg_create(cg_test_b))
390 if (cg_enter_current(cg_test_a))
393 if (chown(cg_test_a_procs, test_euid, -1) ||
394 chown(cg_test_b_procs, test_euid, -1))
397 saved_uid = geteuid();
398 if (seteuid(test_euid))
401 cg_test_b_procs_fd = open(cg_test_b_procs, O_RDWR);
403 if (seteuid(saved_uid))
406 if (cg_test_b_procs_fd < 0)
409 if (write(cg_test_b_procs_fd, "0", 1) >= 0 || errno != EACCES)
415 cg_enter_current(root);
416 if (cg_test_b_procs_fd >= 0)
417 close(cg_test_b_procs_fd);
419 cg_destroy(cg_test_b);
421 cg_destroy(cg_test_a);
422 free(cg_test_b_procs);
423 free(cg_test_a_procs);
429 struct lesser_ns_open_thread_arg {
435 static int lesser_ns_open_thread_fn(void *arg)
437 struct lesser_ns_open_thread_arg *targ = arg;
439 targ->fd = open(targ->path, O_RDWR);
445 * cgroup migration permission check should be performed based on the cgroup
446 * namespace at the time of open instead of write.
448 static int test_cgcore_lesser_ns_open(const char *root)
450 static char stack[65536];
451 const uid_t test_euid = 65534; /* usually nobody, any !root is fine */
453 char *cg_test_a = NULL, *cg_test_b = NULL;
454 char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL;
455 int cg_test_b_procs_fd = -1;
456 struct lesser_ns_open_thread_arg targ = { .fd = -1 };
460 cg_test_a = cg_name(root, "cg_test_a");
461 cg_test_b = cg_name(root, "cg_test_b");
463 if (!cg_test_a || !cg_test_b)
466 cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs");
467 cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs");
469 if (!cg_test_a_procs || !cg_test_b_procs)
472 if (cg_create(cg_test_a) || cg_create(cg_test_b))
475 if (cg_enter_current(cg_test_b))
478 if (chown(cg_test_a_procs, test_euid, -1) ||
479 chown(cg_test_b_procs, test_euid, -1))
482 targ.path = cg_test_b_procs;
483 pid = clone(lesser_ns_open_thread_fn, stack + sizeof(stack),
484 CLONE_NEWCGROUP | CLONE_FILES | CLONE_VM | SIGCHLD,
489 if (waitpid(pid, &status, 0) < 0)
492 if (!WIFEXITED(status))
495 cg_test_b_procs_fd = targ.fd;
496 if (cg_test_b_procs_fd < 0)
499 if (cg_enter_current(cg_test_a))
502 if ((status = write(cg_test_b_procs_fd, "0", 1)) >= 0 || errno != ENOENT)
508 cg_enter_current(root);
509 if (cg_test_b_procs_fd >= 0)
510 close(cg_test_b_procs_fd);
512 cg_destroy(cg_test_b);
514 cg_destroy(cg_test_a);
515 free(cg_test_b_procs);
516 free(cg_test_a_procs);
522 #define T(x) { x, #x }
524 int (*fn)(const char *root);
527 T(test_cgcore_internal_process_constraint),
528 T(test_cgcore_top_down_constraint_enable),
529 T(test_cgcore_top_down_constraint_disable),
530 T(test_cgcore_no_internal_process_constraint_on_threads),
531 T(test_cgcore_parent_becomes_threaded),
532 T(test_cgcore_invalid_domain),
533 T(test_cgcore_populated),
534 T(test_cgcore_lesser_euid_open),
535 T(test_cgcore_lesser_ns_open),
539 int main(int argc, char *argv[])
542 int i, ret = EXIT_SUCCESS;
544 if (cg_find_unified_root(root, sizeof(root)))
545 ksft_exit_skip("cgroup v2 isn't mounted\n");
547 if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
548 if (cg_write(root, "cgroup.subtree_control", "+memory"))
549 ksft_exit_skip("Failed to set memory controller\n");
551 for (i = 0; i < ARRAY_SIZE(tests); i++) {
552 switch (tests[i].fn(root)) {
554 ksft_test_result_pass("%s\n", tests[i].name);
557 ksft_test_result_skip("%s\n", tests[i].name);
561 ksft_test_result_fail("%s\n", tests[i].name);