4 * WARNING: This controller is for cgroup core debugging only.
5 * Its interfaces are unstable and subject to changes at any time.
7 #include <linux/ctype.h>
9 #include <linux/slab.h>
11 #include "cgroup-internal.h"
13 static struct cgroup_subsys_state *
14 debug_css_alloc(struct cgroup_subsys_state *parent_css)
16 struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
19 return ERR_PTR(-ENOMEM);
24 static void debug_css_free(struct cgroup_subsys_state *css)
30 * debug_taskcount_read - return the number of tasks in a cgroup.
31 * @cgrp: the cgroup in question
33 static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
36 return cgroup_task_count(css->cgroup);
39 static int current_css_set_read(struct seq_file *seq, void *v)
41 struct kernfs_open_file *of = seq->private;
43 struct cgroup_subsys *ss;
44 struct cgroup_subsys_state *css;
47 if (!cgroup_kn_lock_live(of->kn, false))
50 spin_lock_irq(&css_set_lock);
52 cset = rcu_dereference(current->cgroups);
53 refcnt = refcount_read(&cset->refcount);
54 seq_printf(seq, "css_set %pK %d", cset, refcnt);
55 if (refcnt > cset->nr_tasks)
56 seq_printf(seq, " +%d", refcnt - cset->nr_tasks);
60 * Print the css'es stored in the current css_set.
62 for_each_subsys(ss, i) {
63 css = cset->subsys[ss->id];
66 seq_printf(seq, "%2d: %-4s\t- %lx[%d]\n", ss->id, ss->name,
67 (unsigned long)css, css->id);
70 spin_unlock_irq(&css_set_lock);
71 cgroup_kn_unlock(of->kn);
75 static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
81 count = refcount_read(&task_css_set(current)->refcount);
86 static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
88 struct cgrp_cset_link *link;
92 name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
96 spin_lock_irq(&css_set_lock);
98 cset = rcu_dereference(current->cgroups);
99 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
100 struct cgroup *c = link->cgrp;
102 cgroup_name(c, name_buf, NAME_MAX + 1);
103 seq_printf(seq, "Root %d group %s\n",
104 c->root->hierarchy_id, name_buf);
107 spin_unlock_irq(&css_set_lock);
112 #define MAX_TASKS_SHOWN_PER_CSS 25
113 static int cgroup_css_links_read(struct seq_file *seq, void *v)
115 struct cgroup_subsys_state *css = seq_css(seq);
116 struct cgrp_cset_link *link;
117 int dead_cnt = 0, extra_refs = 0, threaded_csets = 0;
119 spin_lock_irq(&css_set_lock);
121 list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
122 struct css_set *cset = link->cset;
123 struct task_struct *task;
125 int refcnt = refcount_read(&cset->refcount);
128 * Print out the proc_cset and threaded_cset relationship
129 * and highlight difference between refcount and task_count.
131 seq_printf(seq, "css_set %pK", cset);
132 if (rcu_dereference_protected(cset->dom_cset, 1) != cset) {
134 seq_printf(seq, "=>%pK", cset->dom_cset);
136 if (!list_empty(&cset->threaded_csets)) {
137 struct css_set *tcset;
140 list_for_each_entry(tcset, &cset->threaded_csets,
141 threaded_csets_node) {
142 seq_puts(seq, idx ? "," : "<=");
143 seq_printf(seq, "%pK", tcset);
147 seq_printf(seq, " %d", refcnt);
148 if (refcnt - cset->nr_tasks > 0) {
149 int extra = refcnt - cset->nr_tasks;
151 seq_printf(seq, " +%d", extra);
153 * Take out the one additional reference in
156 if (cset == &init_css_set)
163 list_for_each_entry(task, &cset->tasks, cg_list) {
164 if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
165 seq_printf(seq, " task %d\n",
169 list_for_each_entry(task, &cset->mg_tasks, cg_list) {
170 if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
171 seq_printf(seq, " task %d\n",
174 /* show # of overflowed tasks */
175 if (count > MAX_TASKS_SHOWN_PER_CSS)
176 seq_printf(seq, " ... (%d)\n",
177 count - MAX_TASKS_SHOWN_PER_CSS);
180 seq_puts(seq, " [dead]\n");
184 WARN_ON(count != cset->nr_tasks);
186 spin_unlock_irq(&css_set_lock);
188 if (!dead_cnt && !extra_refs && !threaded_csets)
193 seq_printf(seq, "threaded css_sets = %d\n", threaded_csets);
195 seq_printf(seq, "extra references = %d\n", extra_refs);
197 seq_printf(seq, "dead css_sets = %d\n", dead_cnt);
202 static int cgroup_subsys_states_read(struct seq_file *seq, void *v)
204 struct kernfs_open_file *of = seq->private;
206 struct cgroup_subsys *ss;
207 struct cgroup_subsys_state *css;
211 cgrp = cgroup_kn_lock_live(of->kn, false);
215 for_each_subsys(ss, i) {
216 css = rcu_dereference_check(cgrp->subsys[ss->id], true);
222 /* Show the parent CSS if applicable*/
224 snprintf(pbuf, sizeof(pbuf) - 1, " P=%d",
226 seq_printf(seq, "%2d: %-4s\t- %lx[%d] %d%s\n", ss->id, ss->name,
227 (unsigned long)css, css->id,
228 atomic_read(&css->online_cnt), pbuf);
231 cgroup_kn_unlock(of->kn);
235 static void cgroup_masks_read_one(struct seq_file *seq, const char *name,
238 struct cgroup_subsys *ss;
242 seq_printf(seq, "%-17s: ", name);
243 for_each_subsys(ss, ssid) {
244 if (!(mask & (1 << ssid)))
248 seq_puts(seq, ss->name);
254 static int cgroup_masks_read(struct seq_file *seq, void *v)
256 struct kernfs_open_file *of = seq->private;
259 cgrp = cgroup_kn_lock_live(of->kn, false);
263 cgroup_masks_read_one(seq, "subtree_control", cgrp->subtree_control);
264 cgroup_masks_read_one(seq, "subtree_ss_mask", cgrp->subtree_ss_mask);
266 cgroup_kn_unlock(of->kn);
270 static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
272 return (!cgroup_is_populated(css->cgroup) &&
273 !css_has_online_children(&css->cgroup->self));
276 static struct cftype debug_legacy_files[] = {
279 .read_u64 = debug_taskcount_read,
283 .name = "current_css_set",
284 .seq_show = current_css_set_read,
285 .flags = CFTYPE_ONLY_ON_ROOT,
289 .name = "current_css_set_refcount",
290 .read_u64 = current_css_set_refcount_read,
291 .flags = CFTYPE_ONLY_ON_ROOT,
295 .name = "current_css_set_cg_links",
296 .seq_show = current_css_set_cg_links_read,
297 .flags = CFTYPE_ONLY_ON_ROOT,
301 .name = "cgroup_css_links",
302 .seq_show = cgroup_css_links_read,
306 .name = "cgroup_subsys_states",
307 .seq_show = cgroup_subsys_states_read,
311 .name = "cgroup_masks",
312 .seq_show = cgroup_masks_read,
316 .name = "releasable",
317 .read_u64 = releasable_read,
323 static struct cftype debug_files[] = {
326 .read_u64 = debug_taskcount_read,
330 .name = "current_css_set",
331 .seq_show = current_css_set_read,
332 .flags = CFTYPE_ONLY_ON_ROOT,
336 .name = "current_css_set_refcount",
337 .read_u64 = current_css_set_refcount_read,
338 .flags = CFTYPE_ONLY_ON_ROOT,
342 .name = "current_css_set_cg_links",
343 .seq_show = current_css_set_cg_links_read,
344 .flags = CFTYPE_ONLY_ON_ROOT,
349 .seq_show = cgroup_css_links_read,
354 .seq_show = cgroup_subsys_states_read,
359 .seq_show = cgroup_masks_read,
365 struct cgroup_subsys debug_cgrp_subsys = {
366 .css_alloc = debug_css_alloc,
367 .css_free = debug_css_free,
368 .legacy_cftypes = debug_legacy_files,
372 * On v2, debug is an implicit controller enabled by "cgroup_debug" boot
375 static int __init enable_cgroup_debug(char *str)
377 debug_cgrp_subsys.dfl_cftypes = debug_files;
378 debug_cgrp_subsys.implicit_on_dfl = true;
379 debug_cgrp_subsys.threaded = true;
382 __setup("cgroup_debug", enable_cgroup_debug);