1 /* SPDX-License-Identifier: GPL-2.0 */
3 #include <linux/limits.h>
15 #include "../kselftest.h"
16 #include "cgroup_util.h"
18 static int touch_anon(char *buf, size_t size)
23 fd = open("/dev/urandom", O_RDONLY);
28 ssize_t ret = read(fd, pos, size);
45 static int alloc_and_touch_anon_noexit(const char *cgroup, void *arg)
48 size_t size = (size_t)arg;
51 buf = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
53 if (buf == MAP_FAILED)
56 if (touch_anon((char *)buf, size)) {
61 while (getppid() == ppid)
69 * Create a child process that allocates and touches 100MB, then waits to be
70 * killed. Wait until the child is attached to the cgroup, kill all processes
71 * in that cgroup and wait until "cgroup.procs" is empty. At this point try to
72 * destroy the empty cgroup. The test helps detect race conditions between
73 * dying processes leaving the cgroup and cgroup destruction path.
75 static int test_cgcore_destroy(const char *root)
82 cg_test = cg_name(root, "cg_test");
87 for (int i = 0; i < 10; i++) {
88 if (cg_create(cg_test))
91 child_pid = cg_run_nowait(cg_test, alloc_and_touch_anon_noexit,
97 /* wait for the child to enter cgroup */
98 if (cg_wait_for_proc_count(cg_test, 1))
101 if (cg_killall(cg_test))
104 /* wait for cgroup to be empty */
106 if (cg_read(cg_test, "cgroup.procs", buf, sizeof(buf)))
116 if (waitpid(child_pid, NULL, 0) < 0)
131 * A, B and C's "populated" fields would be 1 while D's 0.
132 * test that after the one process in C is moved to root,
133 * A,B and C's "populated" fields would flip to "0" and file
134 * modified events will be generated on the
135 * "cgroup.events" files of both cgroups.
137 static int test_cgcore_populated(const char *root)
141 char *cg_test_a = NULL, *cg_test_b = NULL;
142 char *cg_test_c = NULL, *cg_test_d = NULL;
143 int cgroup_fd = -EBADF;
146 cg_test_a = cg_name(root, "cg_test_a");
147 cg_test_b = cg_name(root, "cg_test_a/cg_test_b");
148 cg_test_c = cg_name(root, "cg_test_a/cg_test_b/cg_test_c");
149 cg_test_d = cg_name(root, "cg_test_a/cg_test_b/cg_test_d");
151 if (!cg_test_a || !cg_test_b || !cg_test_c || !cg_test_d)
154 if (cg_create(cg_test_a))
157 if (cg_create(cg_test_b))
160 if (cg_create(cg_test_c))
163 if (cg_create(cg_test_d))
166 if (cg_enter_current(cg_test_c))
169 if (cg_read_strcmp(cg_test_a, "cgroup.events", "populated 1\n"))
172 if (cg_read_strcmp(cg_test_b, "cgroup.events", "populated 1\n"))
175 if (cg_read_strcmp(cg_test_c, "cgroup.events", "populated 1\n"))
178 if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
181 if (cg_enter_current(root))
184 if (cg_read_strcmp(cg_test_a, "cgroup.events", "populated 0\n"))
187 if (cg_read_strcmp(cg_test_b, "cgroup.events", "populated 0\n"))
190 if (cg_read_strcmp(cg_test_c, "cgroup.events", "populated 0\n"))
193 if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
196 /* Test that we can directly clone into a new cgroup. */
197 cgroup_fd = dirfd_open_opath(cg_test_d);
201 pid = clone_into_cgroup(cgroup_fd);
214 err = cg_read_strcmp(cg_test_d, "cgroup.events", "populated 1\n");
216 (void)clone_reap(pid, WSTOPPED);
217 (void)kill(pid, SIGCONT);
218 (void)clone_reap(pid, WEXITED);
223 if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
228 cg_destroy(cg_test_d);
233 pid = clone_into_cgroup(cgroup_fd);
238 (void)clone_reap(pid, WEXITED);
246 cg_destroy(cg_test_d);
248 cg_destroy(cg_test_c);
250 cg_destroy(cg_test_b);
252 cg_destroy(cg_test_a);
263 * A (domain threaded) - B (threaded) - C (domain)
265 * test that C can't be used until it is turned into a
266 * threaded cgroup. "cgroup.type" file will report "domain (invalid)" in
267 * these cases. Operations which fail due to invalid topology use
268 * EOPNOTSUPP as the errno.
270 static int test_cgcore_invalid_domain(const char *root)
273 char *grandparent = NULL, *parent = NULL, *child = NULL;
275 grandparent = cg_name(root, "cg_test_grandparent");
276 parent = cg_name(root, "cg_test_grandparent/cg_test_parent");
277 child = cg_name(root, "cg_test_grandparent/cg_test_parent/cg_test_child");
278 if (!parent || !child || !grandparent)
281 if (cg_create(grandparent))
284 if (cg_create(parent))
287 if (cg_create(child))
290 if (cg_write(parent, "cgroup.type", "threaded"))
293 if (cg_read_strcmp(child, "cgroup.type", "domain invalid\n"))
296 if (!cg_enter_current(child))
299 if (errno != EOPNOTSUPP)
302 if (!clone_into_cgroup_run_wait(child))
308 if (errno != EOPNOTSUPP)
315 cg_enter_current(root);
321 cg_destroy(grandparent);
329 * Test that when a child becomes threaded
330 * the parent type becomes domain threaded.
332 static int test_cgcore_parent_becomes_threaded(const char *root)
335 char *parent = NULL, *child = NULL;
337 parent = cg_name(root, "cg_test_parent");
338 child = cg_name(root, "cg_test_parent/cg_test_child");
339 if (!parent || !child)
342 if (cg_create(parent))
345 if (cg_create(child))
348 if (cg_write(child, "cgroup.type", "threaded"))
351 if (cg_read_strcmp(parent, "cgroup.type", "domain threaded\n"))
368 * Test that there's no internal process constrain on threaded cgroups.
369 * You can add threads/processes on a parent with a controller enabled.
371 static int test_cgcore_no_internal_process_constraint_on_threads(const char *root)
374 char *parent = NULL, *child = NULL;
376 if (cg_read_strstr(root, "cgroup.controllers", "cpu") ||
377 cg_write(root, "cgroup.subtree_control", "+cpu")) {
382 parent = cg_name(root, "cg_test_parent");
383 child = cg_name(root, "cg_test_parent/cg_test_child");
384 if (!parent || !child)
387 if (cg_create(parent))
390 if (cg_create(child))
393 if (cg_write(parent, "cgroup.type", "threaded"))
396 if (cg_write(child, "cgroup.type", "threaded"))
399 if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
402 if (cg_enter_current(parent))
408 cg_enter_current(root);
409 cg_enter_current(root);
420 * Test that you can't enable a controller on a child if it's not enabled
423 static int test_cgcore_top_down_constraint_enable(const char *root)
426 char *parent = NULL, *child = NULL;
428 parent = cg_name(root, "cg_test_parent");
429 child = cg_name(root, "cg_test_parent/cg_test_child");
430 if (!parent || !child)
433 if (cg_create(parent))
436 if (cg_create(child))
439 if (!cg_write(child, "cgroup.subtree_control", "+memory"))
455 * Test that you can't disable a controller on a parent
456 * if it's enabled in a child.
458 static int test_cgcore_top_down_constraint_disable(const char *root)
461 char *parent = NULL, *child = NULL;
463 parent = cg_name(root, "cg_test_parent");
464 child = cg_name(root, "cg_test_parent/cg_test_child");
465 if (!parent || !child)
468 if (cg_create(parent))
471 if (cg_create(child))
474 if (cg_write(parent, "cgroup.subtree_control", "+memory"))
477 if (cg_write(child, "cgroup.subtree_control", "+memory"))
480 if (!cg_write(parent, "cgroup.subtree_control", "-memory"))
496 * Test internal process constraint.
497 * You can't add a pid to a domain parent if a controller is enabled.
499 static int test_cgcore_internal_process_constraint(const char *root)
502 char *parent = NULL, *child = NULL;
504 parent = cg_name(root, "cg_test_parent");
505 child = cg_name(root, "cg_test_parent/cg_test_child");
506 if (!parent || !child)
509 if (cg_create(parent))
512 if (cg_create(child))
515 if (cg_write(parent, "cgroup.subtree_control", "+memory"))
518 if (!cg_enter_current(parent))
521 if (!clone_into_cgroup_run_wait(parent))
536 static void *dummy_thread_fn(void *arg)
538 return (void *)(size_t)pause();
542 * Test threadgroup migration.
543 * All threads of a process are migrated together.
545 static int test_cgcore_proc_migration(const char *root)
548 int t, c_threads = 0, n_threads = 13;
549 char *src = NULL, *dst = NULL;
550 pthread_t threads[n_threads];
552 src = cg_name(root, "cg_src");
553 dst = cg_name(root, "cg_dst");
562 if (cg_enter_current(src))
565 for (c_threads = 0; c_threads < n_threads; ++c_threads) {
566 if (pthread_create(&threads[c_threads], NULL, dummy_thread_fn, NULL))
570 cg_enter_current(dst);
571 if (cg_read_lc(dst, "cgroup.threads") != n_threads + 1)
577 for (t = 0; t < c_threads; ++t) {
578 pthread_cancel(threads[t]);
581 for (t = 0; t < c_threads; ++t) {
582 pthread_join(threads[t], NULL);
585 cg_enter_current(root);
596 static void *migrating_thread_fn(void *arg)
598 int g, i, n_iterations = 1000;
600 char lines[3][PATH_MAX];
602 for (g = 1; g < 3; ++g)
603 snprintf(lines[g], sizeof(lines[g]), "0::%s", grps[g] + strlen(grps[0]));
605 for (i = 0; i < n_iterations; ++i) {
606 cg_enter_current_thread(grps[(i % 2) + 1]);
608 if (proc_read_strstr(0, 1, "cgroup", lines[(i % 2) + 1]))
615 * Test single thread migration.
616 * Threaded cgroups allow successful migration of a thread.
618 static int test_cgcore_thread_migration(const char *root)
623 char *grps[3] = { (char *)root, NULL, NULL };
627 dom = cg_name(root, "cg_dom");
628 grps[1] = cg_name(root, "cg_dom/cg_src");
629 grps[2] = cg_name(root, "cg_dom/cg_dst");
630 if (!grps[1] || !grps[2] || !dom)
635 if (cg_create(grps[1]))
637 if (cg_create(grps[2]))
640 if (cg_write(grps[1], "cgroup.type", "threaded"))
642 if (cg_write(grps[2], "cgroup.type", "threaded"))
645 if (cg_enter_current(grps[1]))
648 if (pthread_create(&thr, NULL, migrating_thread_fn, grps))
651 if (pthread_join(thr, &retval))
657 snprintf(line, sizeof(line), "0::%s", grps[1] + strlen(grps[0]));
658 if (proc_read_strstr(0, 1, "cgroup", line))
664 cg_enter_current(root);
677 #define T(x) { x, #x }
679 int (*fn)(const char *root);
682 T(test_cgcore_internal_process_constraint),
683 T(test_cgcore_top_down_constraint_enable),
684 T(test_cgcore_top_down_constraint_disable),
685 T(test_cgcore_no_internal_process_constraint_on_threads),
686 T(test_cgcore_parent_becomes_threaded),
687 T(test_cgcore_invalid_domain),
688 T(test_cgcore_populated),
689 T(test_cgcore_proc_migration),
690 T(test_cgcore_thread_migration),
691 T(test_cgcore_destroy),
695 int main(int argc, char *argv[])
698 int i, ret = EXIT_SUCCESS;
700 if (cg_find_unified_root(root, sizeof(root)))
701 ksft_exit_skip("cgroup v2 isn't mounted\n");
703 if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
704 if (cg_write(root, "cgroup.subtree_control", "+memory"))
705 ksft_exit_skip("Failed to set memory controller\n");
707 for (i = 0; i < ARRAY_SIZE(tests); i++) {
708 switch (tests[i].fn(root)) {
710 ksft_test_result_pass("%s\n", tests[i].name);
713 ksft_test_result_skip("%s\n", tests[i].name);
717 ksft_test_result_fail("%s\n", tests[i].name);