1 // SPDX-License-Identifier: GPL-2.0
3 * DAMON sysfs Interface
5 * Copyright (c) 2022 SeongJae Park <sj@kernel.org>
8 #include <linux/damon.h>
9 #include <linux/kobject.h>
10 #include <linux/pid.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
14 static DEFINE_MUTEX(damon_sysfs_lock);
17 * unsigned long range directory
20 struct damon_sysfs_ul_range {
26 static struct damon_sysfs_ul_range *damon_sysfs_ul_range_alloc(
30 struct damon_sysfs_ul_range *range = kmalloc(sizeof(*range),
35 range->kobj = (struct kobject){};
42 static ssize_t min_show(struct kobject *kobj, struct kobj_attribute *attr,
45 struct damon_sysfs_ul_range *range = container_of(kobj,
46 struct damon_sysfs_ul_range, kobj);
48 return sysfs_emit(buf, "%lu\n", range->min);
51 static ssize_t min_store(struct kobject *kobj, struct kobj_attribute *attr,
52 const char *buf, size_t count)
54 struct damon_sysfs_ul_range *range = container_of(kobj,
55 struct damon_sysfs_ul_range, kobj);
59 err = kstrtoul(buf, 0, &min);
67 static ssize_t max_show(struct kobject *kobj, struct kobj_attribute *attr,
70 struct damon_sysfs_ul_range *range = container_of(kobj,
71 struct damon_sysfs_ul_range, kobj);
73 return sysfs_emit(buf, "%lu\n", range->max);
76 static ssize_t max_store(struct kobject *kobj, struct kobj_attribute *attr,
77 const char *buf, size_t count)
79 struct damon_sysfs_ul_range *range = container_of(kobj,
80 struct damon_sysfs_ul_range, kobj);
84 err = kstrtoul(buf, 0, &max);
92 static void damon_sysfs_ul_range_release(struct kobject *kobj)
94 kfree(container_of(kobj, struct damon_sysfs_ul_range, kobj));
97 static struct kobj_attribute damon_sysfs_ul_range_min_attr =
98 __ATTR_RW_MODE(min, 0600);
100 static struct kobj_attribute damon_sysfs_ul_range_max_attr =
101 __ATTR_RW_MODE(max, 0600);
103 static struct attribute *damon_sysfs_ul_range_attrs[] = {
104 &damon_sysfs_ul_range_min_attr.attr,
105 &damon_sysfs_ul_range_max_attr.attr,
108 ATTRIBUTE_GROUPS(damon_sysfs_ul_range);
110 static struct kobj_type damon_sysfs_ul_range_ktype = {
111 .release = damon_sysfs_ul_range_release,
112 .sysfs_ops = &kobj_sysfs_ops,
113 .default_groups = damon_sysfs_ul_range_groups,
117 * scheme/weights directory
120 struct damon_sysfs_weights {
123 unsigned int nr_accesses;
127 static struct damon_sysfs_weights *damon_sysfs_weights_alloc(unsigned int sz,
128 unsigned int nr_accesses, unsigned int age)
130 struct damon_sysfs_weights *weights = kmalloc(sizeof(*weights),
135 weights->kobj = (struct kobject){};
137 weights->nr_accesses = nr_accesses;
142 static ssize_t sz_permil_show(struct kobject *kobj,
143 struct kobj_attribute *attr, char *buf)
145 struct damon_sysfs_weights *weights = container_of(kobj,
146 struct damon_sysfs_weights, kobj);
148 return sysfs_emit(buf, "%u\n", weights->sz);
151 static ssize_t sz_permil_store(struct kobject *kobj,
152 struct kobj_attribute *attr, const char *buf, size_t count)
154 struct damon_sysfs_weights *weights = container_of(kobj,
155 struct damon_sysfs_weights, kobj);
156 int err = kstrtouint(buf, 0, &weights->sz);
163 static ssize_t nr_accesses_permil_show(struct kobject *kobj,
164 struct kobj_attribute *attr, char *buf)
166 struct damon_sysfs_weights *weights = container_of(kobj,
167 struct damon_sysfs_weights, kobj);
169 return sysfs_emit(buf, "%u\n", weights->nr_accesses);
172 static ssize_t nr_accesses_permil_store(struct kobject *kobj,
173 struct kobj_attribute *attr, const char *buf, size_t count)
175 struct damon_sysfs_weights *weights = container_of(kobj,
176 struct damon_sysfs_weights, kobj);
177 int err = kstrtouint(buf, 0, &weights->nr_accesses);
184 static ssize_t age_permil_show(struct kobject *kobj,
185 struct kobj_attribute *attr, char *buf)
187 struct damon_sysfs_weights *weights = container_of(kobj,
188 struct damon_sysfs_weights, kobj);
190 return sysfs_emit(buf, "%u\n", weights->age);
193 static ssize_t age_permil_store(struct kobject *kobj,
194 struct kobj_attribute *attr, const char *buf, size_t count)
196 struct damon_sysfs_weights *weights = container_of(kobj,
197 struct damon_sysfs_weights, kobj);
198 int err = kstrtouint(buf, 0, &weights->age);
205 static void damon_sysfs_weights_release(struct kobject *kobj)
207 kfree(container_of(kobj, struct damon_sysfs_weights, kobj));
210 static struct kobj_attribute damon_sysfs_weights_sz_attr =
211 __ATTR_RW_MODE(sz_permil, 0600);
213 static struct kobj_attribute damon_sysfs_weights_nr_accesses_attr =
214 __ATTR_RW_MODE(nr_accesses_permil, 0600);
216 static struct kobj_attribute damon_sysfs_weights_age_attr =
217 __ATTR_RW_MODE(age_permil, 0600);
219 static struct attribute *damon_sysfs_weights_attrs[] = {
220 &damon_sysfs_weights_sz_attr.attr,
221 &damon_sysfs_weights_nr_accesses_attr.attr,
222 &damon_sysfs_weights_age_attr.attr,
225 ATTRIBUTE_GROUPS(damon_sysfs_weights);
227 static struct kobj_type damon_sysfs_weights_ktype = {
228 .release = damon_sysfs_weights_release,
229 .sysfs_ops = &kobj_sysfs_ops,
230 .default_groups = damon_sysfs_weights_groups,
237 struct damon_sysfs_quotas {
239 struct damon_sysfs_weights *weights;
242 unsigned long reset_interval_ms;
245 static struct damon_sysfs_quotas *damon_sysfs_quotas_alloc(void)
247 return kzalloc(sizeof(struct damon_sysfs_quotas), GFP_KERNEL);
250 static int damon_sysfs_quotas_add_dirs(struct damon_sysfs_quotas *quotas)
252 struct damon_sysfs_weights *weights;
255 weights = damon_sysfs_weights_alloc(0, 0, 0);
259 err = kobject_init_and_add(&weights->kobj, &damon_sysfs_weights_ktype,
260 "as->kobj, "weights");
262 kobject_put(&weights->kobj);
264 quotas->weights = weights;
268 static void damon_sysfs_quotas_rm_dirs(struct damon_sysfs_quotas *quotas)
270 kobject_put("as->weights->kobj);
273 static ssize_t ms_show(struct kobject *kobj, struct kobj_attribute *attr,
276 struct damon_sysfs_quotas *quotas = container_of(kobj,
277 struct damon_sysfs_quotas, kobj);
279 return sysfs_emit(buf, "%lu\n", quotas->ms);
282 static ssize_t ms_store(struct kobject *kobj, struct kobj_attribute *attr,
283 const char *buf, size_t count)
285 struct damon_sysfs_quotas *quotas = container_of(kobj,
286 struct damon_sysfs_quotas, kobj);
287 int err = kstrtoul(buf, 0, "as->ms);
294 static ssize_t bytes_show(struct kobject *kobj, struct kobj_attribute *attr,
297 struct damon_sysfs_quotas *quotas = container_of(kobj,
298 struct damon_sysfs_quotas, kobj);
300 return sysfs_emit(buf, "%lu\n", quotas->sz);
303 static ssize_t bytes_store(struct kobject *kobj,
304 struct kobj_attribute *attr, const char *buf, size_t count)
306 struct damon_sysfs_quotas *quotas = container_of(kobj,
307 struct damon_sysfs_quotas, kobj);
308 int err = kstrtoul(buf, 0, "as->sz);
315 static ssize_t reset_interval_ms_show(struct kobject *kobj,
316 struct kobj_attribute *attr, char *buf)
318 struct damon_sysfs_quotas *quotas = container_of(kobj,
319 struct damon_sysfs_quotas, kobj);
321 return sysfs_emit(buf, "%lu\n", quotas->reset_interval_ms);
324 static ssize_t reset_interval_ms_store(struct kobject *kobj,
325 struct kobj_attribute *attr, const char *buf, size_t count)
327 struct damon_sysfs_quotas *quotas = container_of(kobj,
328 struct damon_sysfs_quotas, kobj);
329 int err = kstrtoul(buf, 0, "as->reset_interval_ms);
336 static void damon_sysfs_quotas_release(struct kobject *kobj)
338 kfree(container_of(kobj, struct damon_sysfs_quotas, kobj));
341 static struct kobj_attribute damon_sysfs_quotas_ms_attr =
342 __ATTR_RW_MODE(ms, 0600);
344 static struct kobj_attribute damon_sysfs_quotas_sz_attr =
345 __ATTR_RW_MODE(bytes, 0600);
347 static struct kobj_attribute damon_sysfs_quotas_reset_interval_ms_attr =
348 __ATTR_RW_MODE(reset_interval_ms, 0600);
350 static struct attribute *damon_sysfs_quotas_attrs[] = {
351 &damon_sysfs_quotas_ms_attr.attr,
352 &damon_sysfs_quotas_sz_attr.attr,
353 &damon_sysfs_quotas_reset_interval_ms_attr.attr,
356 ATTRIBUTE_GROUPS(damon_sysfs_quotas);
358 static struct kobj_type damon_sysfs_quotas_ktype = {
359 .release = damon_sysfs_quotas_release,
360 .sysfs_ops = &kobj_sysfs_ops,
361 .default_groups = damon_sysfs_quotas_groups,
365 * access_pattern directory
368 struct damon_sysfs_access_pattern {
370 struct damon_sysfs_ul_range *sz;
371 struct damon_sysfs_ul_range *nr_accesses;
372 struct damon_sysfs_ul_range *age;
376 struct damon_sysfs_access_pattern *damon_sysfs_access_pattern_alloc(void)
378 struct damon_sysfs_access_pattern *access_pattern =
379 kmalloc(sizeof(*access_pattern), GFP_KERNEL);
383 access_pattern->kobj = (struct kobject){};
384 return access_pattern;
387 static int damon_sysfs_access_pattern_add_range_dir(
388 struct damon_sysfs_access_pattern *access_pattern,
389 struct damon_sysfs_ul_range **range_dir_ptr,
392 struct damon_sysfs_ul_range *range = damon_sysfs_ul_range_alloc(0, 0);
397 err = kobject_init_and_add(&range->kobj, &damon_sysfs_ul_range_ktype,
398 &access_pattern->kobj, name);
400 kobject_put(&range->kobj);
402 *range_dir_ptr = range;
406 static int damon_sysfs_access_pattern_add_dirs(
407 struct damon_sysfs_access_pattern *access_pattern)
411 err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
412 &access_pattern->sz, "sz");
416 err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
417 &access_pattern->nr_accesses, "nr_accesses");
419 goto put_nr_accesses_sz_out;
421 err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
422 &access_pattern->age, "age");
424 goto put_age_nr_accesses_sz_out;
427 put_age_nr_accesses_sz_out:
428 kobject_put(&access_pattern->age->kobj);
429 access_pattern->age = NULL;
430 put_nr_accesses_sz_out:
431 kobject_put(&access_pattern->nr_accesses->kobj);
432 access_pattern->nr_accesses = NULL;
434 kobject_put(&access_pattern->sz->kobj);
435 access_pattern->sz = NULL;
439 static void damon_sysfs_access_pattern_rm_dirs(
440 struct damon_sysfs_access_pattern *access_pattern)
442 kobject_put(&access_pattern->sz->kobj);
443 kobject_put(&access_pattern->nr_accesses->kobj);
444 kobject_put(&access_pattern->age->kobj);
447 static void damon_sysfs_access_pattern_release(struct kobject *kobj)
449 kfree(container_of(kobj, struct damon_sysfs_access_pattern, kobj));
452 static struct attribute *damon_sysfs_access_pattern_attrs[] = {
455 ATTRIBUTE_GROUPS(damon_sysfs_access_pattern);
457 static struct kobj_type damon_sysfs_access_pattern_ktype = {
458 .release = damon_sysfs_access_pattern_release,
459 .sysfs_ops = &kobj_sysfs_ops,
460 .default_groups = damon_sysfs_access_pattern_groups,
467 struct damon_sysfs_scheme {
469 enum damos_action action;
470 struct damon_sysfs_access_pattern *access_pattern;
471 struct damon_sysfs_quotas *quotas;
474 /* This should match with enum damos_action */
475 static const char * const damon_sysfs_damos_action_strs[] = {
484 static struct damon_sysfs_scheme *damon_sysfs_scheme_alloc(
485 enum damos_action action)
487 struct damon_sysfs_scheme *scheme = kmalloc(sizeof(*scheme),
492 scheme->kobj = (struct kobject){};
493 scheme->action = action;
497 static int damon_sysfs_scheme_set_access_pattern(
498 struct damon_sysfs_scheme *scheme)
500 struct damon_sysfs_access_pattern *access_pattern;
503 access_pattern = damon_sysfs_access_pattern_alloc();
506 err = kobject_init_and_add(&access_pattern->kobj,
507 &damon_sysfs_access_pattern_ktype, &scheme->kobj,
511 err = damon_sysfs_access_pattern_add_dirs(access_pattern);
514 scheme->access_pattern = access_pattern;
518 kobject_put(&access_pattern->kobj);
522 static int damon_sysfs_scheme_set_quotas(struct damon_sysfs_scheme *scheme)
524 struct damon_sysfs_quotas *quotas = damon_sysfs_quotas_alloc();
529 err = kobject_init_and_add("as->kobj, &damon_sysfs_quotas_ktype,
530 &scheme->kobj, "quotas");
533 err = damon_sysfs_quotas_add_dirs(quotas);
536 scheme->quotas = quotas;
540 kobject_put("as->kobj);
544 static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme)
548 err = damon_sysfs_scheme_set_access_pattern(scheme);
551 err = damon_sysfs_scheme_set_quotas(scheme);
553 goto put_access_pattern_out;
556 put_access_pattern_out:
557 kobject_put(&scheme->access_pattern->kobj);
558 scheme->access_pattern = NULL;
562 static void damon_sysfs_scheme_rm_dirs(struct damon_sysfs_scheme *scheme)
564 damon_sysfs_access_pattern_rm_dirs(scheme->access_pattern);
565 kobject_put(&scheme->access_pattern->kobj);
566 damon_sysfs_quotas_rm_dirs(scheme->quotas);
567 kobject_put(&scheme->quotas->kobj);
570 static ssize_t action_show(struct kobject *kobj, struct kobj_attribute *attr,
573 struct damon_sysfs_scheme *scheme = container_of(kobj,
574 struct damon_sysfs_scheme, kobj);
576 return sysfs_emit(buf, "%s\n",
577 damon_sysfs_damos_action_strs[scheme->action]);
580 static ssize_t action_store(struct kobject *kobj, struct kobj_attribute *attr,
581 const char *buf, size_t count)
583 struct damon_sysfs_scheme *scheme = container_of(kobj,
584 struct damon_sysfs_scheme, kobj);
585 enum damos_action action;
587 for (action = 0; action < NR_DAMOS_ACTIONS; action++) {
588 if (sysfs_streq(buf, damon_sysfs_damos_action_strs[action])) {
589 scheme->action = action;
596 static void damon_sysfs_scheme_release(struct kobject *kobj)
598 kfree(container_of(kobj, struct damon_sysfs_scheme, kobj));
601 static struct kobj_attribute damon_sysfs_scheme_action_attr =
602 __ATTR_RW_MODE(action, 0600);
604 static struct attribute *damon_sysfs_scheme_attrs[] = {
605 &damon_sysfs_scheme_action_attr.attr,
608 ATTRIBUTE_GROUPS(damon_sysfs_scheme);
610 static struct kobj_type damon_sysfs_scheme_ktype = {
611 .release = damon_sysfs_scheme_release,
612 .sysfs_ops = &kobj_sysfs_ops,
613 .default_groups = damon_sysfs_scheme_groups,
620 struct damon_sysfs_schemes {
622 struct damon_sysfs_scheme **schemes_arr;
626 static struct damon_sysfs_schemes *damon_sysfs_schemes_alloc(void)
628 return kzalloc(sizeof(struct damon_sysfs_schemes), GFP_KERNEL);
631 static void damon_sysfs_schemes_rm_dirs(struct damon_sysfs_schemes *schemes)
633 struct damon_sysfs_scheme **schemes_arr = schemes->schemes_arr;
636 for (i = 0; i < schemes->nr; i++) {
637 damon_sysfs_scheme_rm_dirs(schemes_arr[i]);
638 kobject_put(&schemes_arr[i]->kobj);
642 schemes->schemes_arr = NULL;
645 static int damon_sysfs_schemes_add_dirs(struct damon_sysfs_schemes *schemes,
648 struct damon_sysfs_scheme **schemes_arr, *scheme;
651 damon_sysfs_schemes_rm_dirs(schemes);
655 schemes_arr = kmalloc_array(nr_schemes, sizeof(*schemes_arr),
656 GFP_KERNEL | __GFP_NOWARN);
659 schemes->schemes_arr = schemes_arr;
661 for (i = 0; i < nr_schemes; i++) {
662 scheme = damon_sysfs_scheme_alloc(DAMOS_STAT);
664 damon_sysfs_schemes_rm_dirs(schemes);
668 err = kobject_init_and_add(&scheme->kobj,
669 &damon_sysfs_scheme_ktype, &schemes->kobj,
673 err = damon_sysfs_scheme_add_dirs(scheme);
677 schemes_arr[i] = scheme;
683 damon_sysfs_schemes_rm_dirs(schemes);
684 kobject_put(&scheme->kobj);
688 static ssize_t nr_schemes_show(struct kobject *kobj,
689 struct kobj_attribute *attr, char *buf)
691 struct damon_sysfs_schemes *schemes = container_of(kobj,
692 struct damon_sysfs_schemes, kobj);
694 return sysfs_emit(buf, "%d\n", schemes->nr);
697 static ssize_t nr_schemes_store(struct kobject *kobj,
698 struct kobj_attribute *attr, const char *buf, size_t count)
700 struct damon_sysfs_schemes *schemes = container_of(kobj,
701 struct damon_sysfs_schemes, kobj);
702 int nr, err = kstrtoint(buf, 0, &nr);
709 if (!mutex_trylock(&damon_sysfs_lock))
711 err = damon_sysfs_schemes_add_dirs(schemes, nr);
712 mutex_unlock(&damon_sysfs_lock);
718 static void damon_sysfs_schemes_release(struct kobject *kobj)
720 kfree(container_of(kobj, struct damon_sysfs_schemes, kobj));
723 static struct kobj_attribute damon_sysfs_schemes_nr_attr =
724 __ATTR_RW_MODE(nr_schemes, 0600);
726 static struct attribute *damon_sysfs_schemes_attrs[] = {
727 &damon_sysfs_schemes_nr_attr.attr,
730 ATTRIBUTE_GROUPS(damon_sysfs_schemes);
732 static struct kobj_type damon_sysfs_schemes_ktype = {
733 .release = damon_sysfs_schemes_release,
734 .sysfs_ops = &kobj_sysfs_ops,
735 .default_groups = damon_sysfs_schemes_groups,
739 * init region directory
742 struct damon_sysfs_region {
748 static struct damon_sysfs_region *damon_sysfs_region_alloc(
752 struct damon_sysfs_region *region = kmalloc(sizeof(*region),
757 region->kobj = (struct kobject){};
758 region->start = start;
763 static ssize_t start_show(struct kobject *kobj, struct kobj_attribute *attr,
766 struct damon_sysfs_region *region = container_of(kobj,
767 struct damon_sysfs_region, kobj);
769 return sysfs_emit(buf, "%lu\n", region->start);
772 static ssize_t start_store(struct kobject *kobj, struct kobj_attribute *attr,
773 const char *buf, size_t count)
775 struct damon_sysfs_region *region = container_of(kobj,
776 struct damon_sysfs_region, kobj);
777 int err = kstrtoul(buf, 0, ®ion->start);
784 static ssize_t end_show(struct kobject *kobj, struct kobj_attribute *attr,
787 struct damon_sysfs_region *region = container_of(kobj,
788 struct damon_sysfs_region, kobj);
790 return sysfs_emit(buf, "%lu\n", region->end);
793 static ssize_t end_store(struct kobject *kobj, struct kobj_attribute *attr,
794 const char *buf, size_t count)
796 struct damon_sysfs_region *region = container_of(kobj,
797 struct damon_sysfs_region, kobj);
798 int err = kstrtoul(buf, 0, ®ion->end);
805 static void damon_sysfs_region_release(struct kobject *kobj)
807 kfree(container_of(kobj, struct damon_sysfs_region, kobj));
810 static struct kobj_attribute damon_sysfs_region_start_attr =
811 __ATTR_RW_MODE(start, 0600);
813 static struct kobj_attribute damon_sysfs_region_end_attr =
814 __ATTR_RW_MODE(end, 0600);
816 static struct attribute *damon_sysfs_region_attrs[] = {
817 &damon_sysfs_region_start_attr.attr,
818 &damon_sysfs_region_end_attr.attr,
821 ATTRIBUTE_GROUPS(damon_sysfs_region);
823 static struct kobj_type damon_sysfs_region_ktype = {
824 .release = damon_sysfs_region_release,
825 .sysfs_ops = &kobj_sysfs_ops,
826 .default_groups = damon_sysfs_region_groups,
830 * init_regions directory
833 struct damon_sysfs_regions {
835 struct damon_sysfs_region **regions_arr;
839 static struct damon_sysfs_regions *damon_sysfs_regions_alloc(void)
841 return kzalloc(sizeof(struct damon_sysfs_regions), GFP_KERNEL);
844 static void damon_sysfs_regions_rm_dirs(struct damon_sysfs_regions *regions)
846 struct damon_sysfs_region **regions_arr = regions->regions_arr;
849 for (i = 0; i < regions->nr; i++)
850 kobject_put(®ions_arr[i]->kobj);
853 regions->regions_arr = NULL;
856 static int damon_sysfs_regions_add_dirs(struct damon_sysfs_regions *regions,
859 struct damon_sysfs_region **regions_arr, *region;
862 damon_sysfs_regions_rm_dirs(regions);
866 regions_arr = kmalloc_array(nr_regions, sizeof(*regions_arr),
867 GFP_KERNEL | __GFP_NOWARN);
870 regions->regions_arr = regions_arr;
872 for (i = 0; i < nr_regions; i++) {
873 region = damon_sysfs_region_alloc(0, 0);
875 damon_sysfs_regions_rm_dirs(regions);
879 err = kobject_init_and_add(®ion->kobj,
880 &damon_sysfs_region_ktype, ®ions->kobj,
883 kobject_put(®ion->kobj);
884 damon_sysfs_regions_rm_dirs(regions);
888 regions_arr[i] = region;
894 static ssize_t nr_regions_show(struct kobject *kobj,
895 struct kobj_attribute *attr, char *buf)
897 struct damon_sysfs_regions *regions = container_of(kobj,
898 struct damon_sysfs_regions, kobj);
900 return sysfs_emit(buf, "%d\n", regions->nr);
903 static ssize_t nr_regions_store(struct kobject *kobj,
904 struct kobj_attribute *attr, const char *buf, size_t count)
906 struct damon_sysfs_regions *regions = container_of(kobj,
907 struct damon_sysfs_regions, kobj);
908 int nr, err = kstrtoint(buf, 0, &nr);
915 if (!mutex_trylock(&damon_sysfs_lock))
917 err = damon_sysfs_regions_add_dirs(regions, nr);
918 mutex_unlock(&damon_sysfs_lock);
925 static void damon_sysfs_regions_release(struct kobject *kobj)
927 kfree(container_of(kobj, struct damon_sysfs_regions, kobj));
930 static struct kobj_attribute damon_sysfs_regions_nr_attr =
931 __ATTR_RW_MODE(nr_regions, 0600);
933 static struct attribute *damon_sysfs_regions_attrs[] = {
934 &damon_sysfs_regions_nr_attr.attr,
937 ATTRIBUTE_GROUPS(damon_sysfs_regions);
939 static struct kobj_type damon_sysfs_regions_ktype = {
940 .release = damon_sysfs_regions_release,
941 .sysfs_ops = &kobj_sysfs_ops,
942 .default_groups = damon_sysfs_regions_groups,
949 struct damon_sysfs_target {
951 struct damon_sysfs_regions *regions;
955 static struct damon_sysfs_target *damon_sysfs_target_alloc(void)
957 return kzalloc(sizeof(struct damon_sysfs_target), GFP_KERNEL);
960 static int damon_sysfs_target_add_dirs(struct damon_sysfs_target *target)
962 struct damon_sysfs_regions *regions = damon_sysfs_regions_alloc();
968 err = kobject_init_and_add(®ions->kobj, &damon_sysfs_regions_ktype,
969 &target->kobj, "regions");
971 kobject_put(®ions->kobj);
973 target->regions = regions;
977 static void damon_sysfs_target_rm_dirs(struct damon_sysfs_target *target)
979 damon_sysfs_regions_rm_dirs(target->regions);
980 kobject_put(&target->regions->kobj);
983 static ssize_t pid_target_show(struct kobject *kobj,
984 struct kobj_attribute *attr, char *buf)
986 struct damon_sysfs_target *target = container_of(kobj,
987 struct damon_sysfs_target, kobj);
989 return sysfs_emit(buf, "%d\n", target->pid);
992 static ssize_t pid_target_store(struct kobject *kobj,
993 struct kobj_attribute *attr, const char *buf, size_t count)
995 struct damon_sysfs_target *target = container_of(kobj,
996 struct damon_sysfs_target, kobj);
997 int err = kstrtoint(buf, 0, &target->pid);
1004 static void damon_sysfs_target_release(struct kobject *kobj)
1006 kfree(container_of(kobj, struct damon_sysfs_target, kobj));
1009 static struct kobj_attribute damon_sysfs_target_pid_attr =
1010 __ATTR_RW_MODE(pid_target, 0600);
1012 static struct attribute *damon_sysfs_target_attrs[] = {
1013 &damon_sysfs_target_pid_attr.attr,
1016 ATTRIBUTE_GROUPS(damon_sysfs_target);
1018 static struct kobj_type damon_sysfs_target_ktype = {
1019 .release = damon_sysfs_target_release,
1020 .sysfs_ops = &kobj_sysfs_ops,
1021 .default_groups = damon_sysfs_target_groups,
1028 struct damon_sysfs_targets {
1029 struct kobject kobj;
1030 struct damon_sysfs_target **targets_arr;
1034 static struct damon_sysfs_targets *damon_sysfs_targets_alloc(void)
1036 return kzalloc(sizeof(struct damon_sysfs_targets), GFP_KERNEL);
1039 static void damon_sysfs_targets_rm_dirs(struct damon_sysfs_targets *targets)
1041 struct damon_sysfs_target **targets_arr = targets->targets_arr;
1044 for (i = 0; i < targets->nr; i++) {
1045 damon_sysfs_target_rm_dirs(targets_arr[i]);
1046 kobject_put(&targets_arr[i]->kobj);
1050 targets->targets_arr = NULL;
1053 static int damon_sysfs_targets_add_dirs(struct damon_sysfs_targets *targets,
1056 struct damon_sysfs_target **targets_arr, *target;
1059 damon_sysfs_targets_rm_dirs(targets);
1063 targets_arr = kmalloc_array(nr_targets, sizeof(*targets_arr),
1064 GFP_KERNEL | __GFP_NOWARN);
1067 targets->targets_arr = targets_arr;
1069 for (i = 0; i < nr_targets; i++) {
1070 target = damon_sysfs_target_alloc();
1072 damon_sysfs_targets_rm_dirs(targets);
1076 err = kobject_init_and_add(&target->kobj,
1077 &damon_sysfs_target_ktype, &targets->kobj,
1082 err = damon_sysfs_target_add_dirs(target);
1086 targets_arr[i] = target;
1092 damon_sysfs_targets_rm_dirs(targets);
1093 kobject_put(&target->kobj);
1097 static ssize_t nr_targets_show(struct kobject *kobj,
1098 struct kobj_attribute *attr, char *buf)
1100 struct damon_sysfs_targets *targets = container_of(kobj,
1101 struct damon_sysfs_targets, kobj);
1103 return sysfs_emit(buf, "%d\n", targets->nr);
1106 static ssize_t nr_targets_store(struct kobject *kobj,
1107 struct kobj_attribute *attr, const char *buf, size_t count)
1109 struct damon_sysfs_targets *targets = container_of(kobj,
1110 struct damon_sysfs_targets, kobj);
1111 int nr, err = kstrtoint(buf, 0, &nr);
1118 if (!mutex_trylock(&damon_sysfs_lock))
1120 err = damon_sysfs_targets_add_dirs(targets, nr);
1121 mutex_unlock(&damon_sysfs_lock);
1128 static void damon_sysfs_targets_release(struct kobject *kobj)
1130 kfree(container_of(kobj, struct damon_sysfs_targets, kobj));
1133 static struct kobj_attribute damon_sysfs_targets_nr_attr =
1134 __ATTR_RW_MODE(nr_targets, 0600);
1136 static struct attribute *damon_sysfs_targets_attrs[] = {
1137 &damon_sysfs_targets_nr_attr.attr,
1140 ATTRIBUTE_GROUPS(damon_sysfs_targets);
1142 static struct kobj_type damon_sysfs_targets_ktype = {
1143 .release = damon_sysfs_targets_release,
1144 .sysfs_ops = &kobj_sysfs_ops,
1145 .default_groups = damon_sysfs_targets_groups,
1149 * intervals directory
1152 struct damon_sysfs_intervals {
1153 struct kobject kobj;
1154 unsigned long sample_us;
1155 unsigned long aggr_us;
1156 unsigned long update_us;
1159 static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc(
1160 unsigned long sample_us, unsigned long aggr_us,
1161 unsigned long update_us)
1163 struct damon_sysfs_intervals *intervals = kmalloc(sizeof(*intervals),
1169 intervals->kobj = (struct kobject){};
1170 intervals->sample_us = sample_us;
1171 intervals->aggr_us = aggr_us;
1172 intervals->update_us = update_us;
1176 static ssize_t sample_us_show(struct kobject *kobj,
1177 struct kobj_attribute *attr, char *buf)
1179 struct damon_sysfs_intervals *intervals = container_of(kobj,
1180 struct damon_sysfs_intervals, kobj);
1182 return sysfs_emit(buf, "%lu\n", intervals->sample_us);
1185 static ssize_t sample_us_store(struct kobject *kobj,
1186 struct kobj_attribute *attr, const char *buf, size_t count)
1188 struct damon_sysfs_intervals *intervals = container_of(kobj,
1189 struct damon_sysfs_intervals, kobj);
1191 int err = kstrtoul(buf, 0, &us);
1196 intervals->sample_us = us;
1200 static ssize_t aggr_us_show(struct kobject *kobj, struct kobj_attribute *attr,
1203 struct damon_sysfs_intervals *intervals = container_of(kobj,
1204 struct damon_sysfs_intervals, kobj);
1206 return sysfs_emit(buf, "%lu\n", intervals->aggr_us);
1209 static ssize_t aggr_us_store(struct kobject *kobj, struct kobj_attribute *attr,
1210 const char *buf, size_t count)
1212 struct damon_sysfs_intervals *intervals = container_of(kobj,
1213 struct damon_sysfs_intervals, kobj);
1215 int err = kstrtoul(buf, 0, &us);
1220 intervals->aggr_us = us;
1224 static ssize_t update_us_show(struct kobject *kobj,
1225 struct kobj_attribute *attr, char *buf)
1227 struct damon_sysfs_intervals *intervals = container_of(kobj,
1228 struct damon_sysfs_intervals, kobj);
1230 return sysfs_emit(buf, "%lu\n", intervals->update_us);
1233 static ssize_t update_us_store(struct kobject *kobj,
1234 struct kobj_attribute *attr, const char *buf, size_t count)
1236 struct damon_sysfs_intervals *intervals = container_of(kobj,
1237 struct damon_sysfs_intervals, kobj);
1239 int err = kstrtoul(buf, 0, &us);
1244 intervals->update_us = us;
1248 static void damon_sysfs_intervals_release(struct kobject *kobj)
1250 kfree(container_of(kobj, struct damon_sysfs_intervals, kobj));
1253 static struct kobj_attribute damon_sysfs_intervals_sample_us_attr =
1254 __ATTR_RW_MODE(sample_us, 0600);
1256 static struct kobj_attribute damon_sysfs_intervals_aggr_us_attr =
1257 __ATTR_RW_MODE(aggr_us, 0600);
1259 static struct kobj_attribute damon_sysfs_intervals_update_us_attr =
1260 __ATTR_RW_MODE(update_us, 0600);
1262 static struct attribute *damon_sysfs_intervals_attrs[] = {
1263 &damon_sysfs_intervals_sample_us_attr.attr,
1264 &damon_sysfs_intervals_aggr_us_attr.attr,
1265 &damon_sysfs_intervals_update_us_attr.attr,
1268 ATTRIBUTE_GROUPS(damon_sysfs_intervals);
1270 static struct kobj_type damon_sysfs_intervals_ktype = {
1271 .release = damon_sysfs_intervals_release,
1272 .sysfs_ops = &kobj_sysfs_ops,
1273 .default_groups = damon_sysfs_intervals_groups,
1277 * monitoring_attrs directory
1280 struct damon_sysfs_attrs {
1281 struct kobject kobj;
1282 struct damon_sysfs_intervals *intervals;
1283 struct damon_sysfs_ul_range *nr_regions_range;
1286 static struct damon_sysfs_attrs *damon_sysfs_attrs_alloc(void)
1288 struct damon_sysfs_attrs *attrs = kmalloc(sizeof(*attrs), GFP_KERNEL);
1292 attrs->kobj = (struct kobject){};
1296 static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs)
1298 struct damon_sysfs_intervals *intervals;
1299 struct damon_sysfs_ul_range *nr_regions_range;
1302 intervals = damon_sysfs_intervals_alloc(5000, 100000, 60000000);
1306 err = kobject_init_and_add(&intervals->kobj,
1307 &damon_sysfs_intervals_ktype, &attrs->kobj,
1310 goto put_intervals_out;
1311 attrs->intervals = intervals;
1313 nr_regions_range = damon_sysfs_ul_range_alloc(10, 1000);
1314 if (!nr_regions_range) {
1316 goto put_intervals_out;
1319 err = kobject_init_and_add(&nr_regions_range->kobj,
1320 &damon_sysfs_ul_range_ktype, &attrs->kobj,
1323 goto put_nr_regions_intervals_out;
1324 attrs->nr_regions_range = nr_regions_range;
1327 put_nr_regions_intervals_out:
1328 kobject_put(&nr_regions_range->kobj);
1329 attrs->nr_regions_range = NULL;
1331 kobject_put(&intervals->kobj);
1332 attrs->intervals = NULL;
1336 static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs *attrs)
1338 kobject_put(&attrs->nr_regions_range->kobj);
1339 kobject_put(&attrs->intervals->kobj);
1342 static void damon_sysfs_attrs_release(struct kobject *kobj)
1344 kfree(container_of(kobj, struct damon_sysfs_attrs, kobj));
1347 static struct attribute *damon_sysfs_attrs_attrs[] = {
1350 ATTRIBUTE_GROUPS(damon_sysfs_attrs);
1352 static struct kobj_type damon_sysfs_attrs_ktype = {
1353 .release = damon_sysfs_attrs_release,
1354 .sysfs_ops = &kobj_sysfs_ops,
1355 .default_groups = damon_sysfs_attrs_groups,
1362 /* This should match with enum damon_ops_id */
1363 static const char * const damon_sysfs_ops_strs[] = {
1368 struct damon_sysfs_context {
1369 struct kobject kobj;
1370 enum damon_ops_id ops_id;
1371 struct damon_sysfs_attrs *attrs;
1372 struct damon_sysfs_targets *targets;
1373 struct damon_sysfs_schemes *schemes;
1376 static struct damon_sysfs_context *damon_sysfs_context_alloc(
1377 enum damon_ops_id ops_id)
1379 struct damon_sysfs_context *context = kmalloc(sizeof(*context),
1384 context->kobj = (struct kobject){};
1385 context->ops_id = ops_id;
1389 static int damon_sysfs_context_set_attrs(struct damon_sysfs_context *context)
1391 struct damon_sysfs_attrs *attrs = damon_sysfs_attrs_alloc();
1396 err = kobject_init_and_add(&attrs->kobj, &damon_sysfs_attrs_ktype,
1397 &context->kobj, "monitoring_attrs");
1400 err = damon_sysfs_attrs_add_dirs(attrs);
1403 context->attrs = attrs;
1407 kobject_put(&attrs->kobj);
1411 static int damon_sysfs_context_set_targets(struct damon_sysfs_context *context)
1413 struct damon_sysfs_targets *targets = damon_sysfs_targets_alloc();
1418 err = kobject_init_and_add(&targets->kobj, &damon_sysfs_targets_ktype,
1419 &context->kobj, "targets");
1421 kobject_put(&targets->kobj);
1424 context->targets = targets;
1428 static int damon_sysfs_context_set_schemes(struct damon_sysfs_context *context)
1430 struct damon_sysfs_schemes *schemes = damon_sysfs_schemes_alloc();
1435 err = kobject_init_and_add(&schemes->kobj, &damon_sysfs_schemes_ktype,
1436 &context->kobj, "schemes");
1438 kobject_put(&schemes->kobj);
1441 context->schemes = schemes;
1445 static int damon_sysfs_context_add_dirs(struct damon_sysfs_context *context)
1449 err = damon_sysfs_context_set_attrs(context);
1453 err = damon_sysfs_context_set_targets(context);
1457 err = damon_sysfs_context_set_schemes(context);
1459 goto put_targets_attrs_out;
1462 put_targets_attrs_out:
1463 kobject_put(&context->targets->kobj);
1464 context->targets = NULL;
1466 kobject_put(&context->attrs->kobj);
1467 context->attrs = NULL;
1471 static void damon_sysfs_context_rm_dirs(struct damon_sysfs_context *context)
1473 damon_sysfs_attrs_rm_dirs(context->attrs);
1474 kobject_put(&context->attrs->kobj);
1475 damon_sysfs_targets_rm_dirs(context->targets);
1476 kobject_put(&context->targets->kobj);
1477 damon_sysfs_schemes_rm_dirs(context->schemes);
1478 kobject_put(&context->schemes->kobj);
1481 static ssize_t operations_show(struct kobject *kobj,
1482 struct kobj_attribute *attr, char *buf)
1484 struct damon_sysfs_context *context = container_of(kobj,
1485 struct damon_sysfs_context, kobj);
1487 return sysfs_emit(buf, "%s\n", damon_sysfs_ops_strs[context->ops_id]);
1490 static ssize_t operations_store(struct kobject *kobj,
1491 struct kobj_attribute *attr, const char *buf, size_t count)
1493 struct damon_sysfs_context *context = container_of(kobj,
1494 struct damon_sysfs_context, kobj);
1495 enum damon_ops_id id;
1497 for (id = 0; id < NR_DAMON_OPS; id++) {
1498 if (sysfs_streq(buf, damon_sysfs_ops_strs[id])) {
1499 context->ops_id = id;
1506 static void damon_sysfs_context_release(struct kobject *kobj)
1508 kfree(container_of(kobj, struct damon_sysfs_context, kobj));
1511 static struct kobj_attribute damon_sysfs_context_operations_attr =
1512 __ATTR_RW_MODE(operations, 0600);
1514 static struct attribute *damon_sysfs_context_attrs[] = {
1515 &damon_sysfs_context_operations_attr.attr,
1518 ATTRIBUTE_GROUPS(damon_sysfs_context);
1520 static struct kobj_type damon_sysfs_context_ktype = {
1521 .release = damon_sysfs_context_release,
1522 .sysfs_ops = &kobj_sysfs_ops,
1523 .default_groups = damon_sysfs_context_groups,
1527 * contexts directory
1530 struct damon_sysfs_contexts {
1531 struct kobject kobj;
1532 struct damon_sysfs_context **contexts_arr;
1536 static struct damon_sysfs_contexts *damon_sysfs_contexts_alloc(void)
1538 return kzalloc(sizeof(struct damon_sysfs_contexts), GFP_KERNEL);
1541 static void damon_sysfs_contexts_rm_dirs(struct damon_sysfs_contexts *contexts)
1543 struct damon_sysfs_context **contexts_arr = contexts->contexts_arr;
1546 for (i = 0; i < contexts->nr; i++) {
1547 damon_sysfs_context_rm_dirs(contexts_arr[i]);
1548 kobject_put(&contexts_arr[i]->kobj);
1551 kfree(contexts_arr);
1552 contexts->contexts_arr = NULL;
1555 static int damon_sysfs_contexts_add_dirs(struct damon_sysfs_contexts *contexts,
1558 struct damon_sysfs_context **contexts_arr, *context;
1561 damon_sysfs_contexts_rm_dirs(contexts);
1565 contexts_arr = kmalloc_array(nr_contexts, sizeof(*contexts_arr),
1566 GFP_KERNEL | __GFP_NOWARN);
1569 contexts->contexts_arr = contexts_arr;
1571 for (i = 0; i < nr_contexts; i++) {
1572 context = damon_sysfs_context_alloc(DAMON_OPS_VADDR);
1574 damon_sysfs_contexts_rm_dirs(contexts);
1578 err = kobject_init_and_add(&context->kobj,
1579 &damon_sysfs_context_ktype, &contexts->kobj,
1584 err = damon_sysfs_context_add_dirs(context);
1588 contexts_arr[i] = context;
1594 damon_sysfs_contexts_rm_dirs(contexts);
1595 kobject_put(&context->kobj);
1599 static ssize_t nr_contexts_show(struct kobject *kobj,
1600 struct kobj_attribute *attr, char *buf)
1602 struct damon_sysfs_contexts *contexts = container_of(kobj,
1603 struct damon_sysfs_contexts, kobj);
1605 return sysfs_emit(buf, "%d\n", contexts->nr);
1608 static ssize_t nr_contexts_store(struct kobject *kobj,
1609 struct kobj_attribute *attr, const char *buf, size_t count)
1611 struct damon_sysfs_contexts *contexts = container_of(kobj,
1612 struct damon_sysfs_contexts, kobj);
1615 err = kstrtoint(buf, 0, &nr);
1618 /* TODO: support multiple contexts per kdamond */
1619 if (nr < 0 || 1 < nr)
1622 if (!mutex_trylock(&damon_sysfs_lock))
1624 err = damon_sysfs_contexts_add_dirs(contexts, nr);
1625 mutex_unlock(&damon_sysfs_lock);
1632 static void damon_sysfs_contexts_release(struct kobject *kobj)
1634 kfree(container_of(kobj, struct damon_sysfs_contexts, kobj));
1637 static struct kobj_attribute damon_sysfs_contexts_nr_attr
1638 = __ATTR_RW_MODE(nr_contexts, 0600);
1640 static struct attribute *damon_sysfs_contexts_attrs[] = {
1641 &damon_sysfs_contexts_nr_attr.attr,
1644 ATTRIBUTE_GROUPS(damon_sysfs_contexts);
1646 static struct kobj_type damon_sysfs_contexts_ktype = {
1647 .release = damon_sysfs_contexts_release,
1648 .sysfs_ops = &kobj_sysfs_ops,
1649 .default_groups = damon_sysfs_contexts_groups,
1656 struct damon_sysfs_kdamond {
1657 struct kobject kobj;
1658 struct damon_sysfs_contexts *contexts;
1659 struct damon_ctx *damon_ctx;
1662 static struct damon_sysfs_kdamond *damon_sysfs_kdamond_alloc(void)
1664 return kzalloc(sizeof(struct damon_sysfs_kdamond), GFP_KERNEL);
1667 static int damon_sysfs_kdamond_add_dirs(struct damon_sysfs_kdamond *kdamond)
1669 struct damon_sysfs_contexts *contexts;
1672 contexts = damon_sysfs_contexts_alloc();
1676 err = kobject_init_and_add(&contexts->kobj,
1677 &damon_sysfs_contexts_ktype, &kdamond->kobj,
1680 kobject_put(&contexts->kobj);
1683 kdamond->contexts = contexts;
1688 static void damon_sysfs_kdamond_rm_dirs(struct damon_sysfs_kdamond *kdamond)
1690 damon_sysfs_contexts_rm_dirs(kdamond->contexts);
1691 kobject_put(&kdamond->contexts->kobj);
1694 static bool damon_sysfs_ctx_running(struct damon_ctx *ctx)
1698 mutex_lock(&ctx->kdamond_lock);
1699 running = ctx->kdamond != NULL;
1700 mutex_unlock(&ctx->kdamond_lock);
1704 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
1707 struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1708 struct damon_sysfs_kdamond, kobj);
1709 struct damon_ctx *ctx = kdamond->damon_ctx;
1715 running = damon_sysfs_ctx_running(ctx);
1717 return sysfs_emit(buf, "%s\n", running ? "on" : "off");
1720 static int damon_sysfs_set_attrs(struct damon_ctx *ctx,
1721 struct damon_sysfs_attrs *sys_attrs)
1723 struct damon_sysfs_intervals *sys_intervals = sys_attrs->intervals;
1724 struct damon_sysfs_ul_range *sys_nr_regions =
1725 sys_attrs->nr_regions_range;
1727 return damon_set_attrs(ctx, sys_intervals->sample_us,
1728 sys_intervals->aggr_us, sys_intervals->update_us,
1729 sys_nr_regions->min, sys_nr_regions->max);
1732 static void damon_sysfs_destroy_targets(struct damon_ctx *ctx)
1734 struct damon_target *t, *next;
1736 damon_for_each_target_safe(t, next, ctx) {
1737 if (ctx->ops.id == DAMON_OPS_VADDR)
1739 damon_destroy_target(t);
1743 static int damon_sysfs_set_regions(struct damon_target *t,
1744 struct damon_sysfs_regions *sysfs_regions)
1748 for (i = 0; i < sysfs_regions->nr; i++) {
1749 struct damon_sysfs_region *sys_region =
1750 sysfs_regions->regions_arr[i];
1751 struct damon_region *prev, *r;
1753 if (sys_region->start > sys_region->end)
1755 r = damon_new_region(sys_region->start, sys_region->end);
1758 damon_add_region(r, t);
1759 if (damon_nr_regions(t) > 1) {
1760 prev = damon_prev_region(r);
1761 if (prev->ar.end > r->ar.start) {
1762 damon_destroy_region(r, t);
1770 static int damon_sysfs_set_targets(struct damon_ctx *ctx,
1771 struct damon_sysfs_targets *sysfs_targets)
1775 for (i = 0; i < sysfs_targets->nr; i++) {
1776 struct damon_sysfs_target *sys_target =
1777 sysfs_targets->targets_arr[i];
1778 struct damon_target *t = damon_new_target();
1781 damon_sysfs_destroy_targets(ctx);
1784 if (ctx->ops.id == DAMON_OPS_VADDR) {
1785 t->pid = find_get_pid(sys_target->pid);
1787 damon_sysfs_destroy_targets(ctx);
1791 damon_add_target(ctx, t);
1792 err = damon_sysfs_set_regions(t, sys_target->regions);
1794 damon_sysfs_destroy_targets(ctx);
1801 static struct damos *damon_sysfs_mk_scheme(
1802 struct damon_sysfs_scheme *sysfs_scheme)
1804 struct damon_sysfs_access_pattern *pattern =
1805 sysfs_scheme->access_pattern;
1806 struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas;
1807 struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights;
1808 struct damos_quota quota = {
1809 .ms = sysfs_quotas->ms,
1810 .sz = sysfs_quotas->sz,
1811 .reset_interval = sysfs_quotas->reset_interval_ms,
1812 .weight_sz = sysfs_weights->sz,
1813 .weight_nr_accesses = sysfs_weights->nr_accesses,
1814 .weight_age = sysfs_weights->age,
1816 struct damos_watermarks wmarks = {
1817 .metric = DAMOS_WMARK_NONE,
1824 return damon_new_scheme(pattern->sz->min, pattern->sz->max,
1825 pattern->nr_accesses->min, pattern->nr_accesses->max,
1826 pattern->age->min, pattern->age->max,
1827 sysfs_scheme->action, "a, &wmarks);
1830 static int damon_sysfs_set_schemes(struct damon_ctx *ctx,
1831 struct damon_sysfs_schemes *sysfs_schemes)
1835 for (i = 0; i < sysfs_schemes->nr; i++) {
1836 struct damos *scheme, *next;
1838 scheme = damon_sysfs_mk_scheme(sysfs_schemes->schemes_arr[i]);
1840 damon_for_each_scheme_safe(scheme, next, ctx)
1841 damon_destroy_scheme(scheme);
1844 damon_add_scheme(ctx, scheme);
1849 static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
1851 struct damon_target *t, *next;
1853 if (ctx->ops.id != DAMON_OPS_VADDR)
1856 mutex_lock(&ctx->kdamond_lock);
1857 damon_for_each_target_safe(t, next, ctx) {
1859 damon_destroy_target(t);
1861 mutex_unlock(&ctx->kdamond_lock);
1864 static struct damon_ctx *damon_sysfs_build_ctx(
1865 struct damon_sysfs_context *sys_ctx)
1867 struct damon_ctx *ctx = damon_new_ctx();
1871 return ERR_PTR(-ENOMEM);
1873 err = damon_select_ops(ctx, sys_ctx->ops_id);
1876 err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs);
1879 err = damon_sysfs_set_targets(ctx, sys_ctx->targets);
1882 err = damon_sysfs_set_schemes(ctx, sys_ctx->schemes);
1886 ctx->callback.before_terminate = damon_sysfs_before_terminate;
1890 damon_destroy_ctx(ctx);
1891 return ERR_PTR(err);
1894 static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
1896 struct damon_ctx *ctx;
1899 if (kdamond->damon_ctx &&
1900 damon_sysfs_ctx_running(kdamond->damon_ctx))
1902 /* TODO: support multiple contexts per kdamond */
1903 if (kdamond->contexts->nr != 1)
1906 if (kdamond->damon_ctx)
1907 damon_destroy_ctx(kdamond->damon_ctx);
1908 kdamond->damon_ctx = NULL;
1910 ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
1912 return PTR_ERR(ctx);
1913 err = damon_start(&ctx, 1, false);
1915 damon_destroy_ctx(ctx);
1918 kdamond->damon_ctx = ctx;
1922 static int damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond *kdamond)
1924 if (!kdamond->damon_ctx)
1926 return damon_stop(&kdamond->damon_ctx, 1);
1928 * To allow users show final monitoring results of already turned-off
1929 * DAMON, we free kdamond->damon_ctx in next
1930 * damon_sysfs_turn_damon_on(), or kdamonds_nr_store()
1934 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
1935 const char *buf, size_t count)
1937 struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1938 struct damon_sysfs_kdamond, kobj);
1941 if (!mutex_trylock(&damon_sysfs_lock))
1943 if (sysfs_streq(buf, "on"))
1944 ret = damon_sysfs_turn_damon_on(kdamond);
1945 else if (sysfs_streq(buf, "off"))
1946 ret = damon_sysfs_turn_damon_off(kdamond);
1949 mutex_unlock(&damon_sysfs_lock);
1955 static ssize_t pid_show(struct kobject *kobj,
1956 struct kobj_attribute *attr, char *buf)
1958 struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1959 struct damon_sysfs_kdamond, kobj);
1960 struct damon_ctx *ctx;
1963 if (!mutex_trylock(&damon_sysfs_lock))
1965 ctx = kdamond->damon_ctx;
1970 mutex_lock(&ctx->kdamond_lock);
1974 pid = ctx->kdamond->pid;
1975 mutex_unlock(&ctx->kdamond_lock);
1977 mutex_unlock(&damon_sysfs_lock);
1978 return sysfs_emit(buf, "%d\n", pid);
1981 static void damon_sysfs_kdamond_release(struct kobject *kobj)
1983 struct damon_sysfs_kdamond *kdamond = container_of(kobj,
1984 struct damon_sysfs_kdamond, kobj);
1986 if (kdamond->damon_ctx)
1987 damon_destroy_ctx(kdamond->damon_ctx);
1988 kfree(container_of(kobj, struct damon_sysfs_kdamond, kobj));
1991 static struct kobj_attribute damon_sysfs_kdamond_state_attr =
1992 __ATTR_RW_MODE(state, 0600);
1994 static struct kobj_attribute damon_sysfs_kdamond_pid_attr =
1995 __ATTR_RO_MODE(pid, 0400);
1997 static struct attribute *damon_sysfs_kdamond_attrs[] = {
1998 &damon_sysfs_kdamond_state_attr.attr,
1999 &damon_sysfs_kdamond_pid_attr.attr,
2002 ATTRIBUTE_GROUPS(damon_sysfs_kdamond);
2004 static struct kobj_type damon_sysfs_kdamond_ktype = {
2005 .release = damon_sysfs_kdamond_release,
2006 .sysfs_ops = &kobj_sysfs_ops,
2007 .default_groups = damon_sysfs_kdamond_groups,
2011 * kdamonds directory
2014 struct damon_sysfs_kdamonds {
2015 struct kobject kobj;
2016 struct damon_sysfs_kdamond **kdamonds_arr;
2020 static struct damon_sysfs_kdamonds *damon_sysfs_kdamonds_alloc(void)
2022 return kzalloc(sizeof(struct damon_sysfs_kdamonds), GFP_KERNEL);
2025 static void damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds *kdamonds)
2027 struct damon_sysfs_kdamond **kdamonds_arr = kdamonds->kdamonds_arr;
2030 for (i = 0; i < kdamonds->nr; i++) {
2031 damon_sysfs_kdamond_rm_dirs(kdamonds_arr[i]);
2032 kobject_put(&kdamonds_arr[i]->kobj);
2035 kfree(kdamonds_arr);
2036 kdamonds->kdamonds_arr = NULL;
2039 static int damon_sysfs_nr_running_ctxs(struct damon_sysfs_kdamond **kdamonds,
2042 int nr_running_ctxs = 0;
2045 for (i = 0; i < nr_kdamonds; i++) {
2046 struct damon_ctx *ctx = kdamonds[i]->damon_ctx;
2050 mutex_lock(&ctx->kdamond_lock);
2053 mutex_unlock(&ctx->kdamond_lock);
2055 return nr_running_ctxs;
2058 static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds *kdamonds,
2061 struct damon_sysfs_kdamond **kdamonds_arr, *kdamond;
2064 if (damon_sysfs_nr_running_ctxs(kdamonds->kdamonds_arr, kdamonds->nr))
2067 damon_sysfs_kdamonds_rm_dirs(kdamonds);
2071 kdamonds_arr = kmalloc_array(nr_kdamonds, sizeof(*kdamonds_arr),
2072 GFP_KERNEL | __GFP_NOWARN);
2075 kdamonds->kdamonds_arr = kdamonds_arr;
2077 for (i = 0; i < nr_kdamonds; i++) {
2078 kdamond = damon_sysfs_kdamond_alloc();
2080 damon_sysfs_kdamonds_rm_dirs(kdamonds);
2084 err = kobject_init_and_add(&kdamond->kobj,
2085 &damon_sysfs_kdamond_ktype, &kdamonds->kobj,
2090 err = damon_sysfs_kdamond_add_dirs(kdamond);
2094 kdamonds_arr[i] = kdamond;
2100 damon_sysfs_kdamonds_rm_dirs(kdamonds);
2101 kobject_put(&kdamond->kobj);
2105 static ssize_t nr_kdamonds_show(struct kobject *kobj,
2106 struct kobj_attribute *attr, char *buf)
2108 struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
2109 struct damon_sysfs_kdamonds, kobj);
2111 return sysfs_emit(buf, "%d\n", kdamonds->nr);
2114 static ssize_t nr_kdamonds_store(struct kobject *kobj,
2115 struct kobj_attribute *attr, const char *buf, size_t count)
2117 struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
2118 struct damon_sysfs_kdamonds, kobj);
2121 err = kstrtoint(buf, 0, &nr);
2127 if (!mutex_trylock(&damon_sysfs_lock))
2129 err = damon_sysfs_kdamonds_add_dirs(kdamonds, nr);
2130 mutex_unlock(&damon_sysfs_lock);
2137 static void damon_sysfs_kdamonds_release(struct kobject *kobj)
2139 kfree(container_of(kobj, struct damon_sysfs_kdamonds, kobj));
2142 static struct kobj_attribute damon_sysfs_kdamonds_nr_attr =
2143 __ATTR_RW_MODE(nr_kdamonds, 0600);
2145 static struct attribute *damon_sysfs_kdamonds_attrs[] = {
2146 &damon_sysfs_kdamonds_nr_attr.attr,
2149 ATTRIBUTE_GROUPS(damon_sysfs_kdamonds);
2151 static struct kobj_type damon_sysfs_kdamonds_ktype = {
2152 .release = damon_sysfs_kdamonds_release,
2153 .sysfs_ops = &kobj_sysfs_ops,
2154 .default_groups = damon_sysfs_kdamonds_groups,
2158 * damon user interface directory
2161 struct damon_sysfs_ui_dir {
2162 struct kobject kobj;
2163 struct damon_sysfs_kdamonds *kdamonds;
2166 static struct damon_sysfs_ui_dir *damon_sysfs_ui_dir_alloc(void)
2168 return kzalloc(sizeof(struct damon_sysfs_ui_dir), GFP_KERNEL);
2171 static int damon_sysfs_ui_dir_add_dirs(struct damon_sysfs_ui_dir *ui_dir)
2173 struct damon_sysfs_kdamonds *kdamonds;
2176 kdamonds = damon_sysfs_kdamonds_alloc();
2180 err = kobject_init_and_add(&kdamonds->kobj,
2181 &damon_sysfs_kdamonds_ktype, &ui_dir->kobj,
2184 kobject_put(&kdamonds->kobj);
2187 ui_dir->kdamonds = kdamonds;
2191 static void damon_sysfs_ui_dir_release(struct kobject *kobj)
2193 kfree(container_of(kobj, struct damon_sysfs_ui_dir, kobj));
2196 static struct attribute *damon_sysfs_ui_dir_attrs[] = {
2199 ATTRIBUTE_GROUPS(damon_sysfs_ui_dir);
2201 static struct kobj_type damon_sysfs_ui_dir_ktype = {
2202 .release = damon_sysfs_ui_dir_release,
2203 .sysfs_ops = &kobj_sysfs_ops,
2204 .default_groups = damon_sysfs_ui_dir_groups,
2207 static int __init damon_sysfs_init(void)
2209 struct kobject *damon_sysfs_root;
2210 struct damon_sysfs_ui_dir *admin;
2213 damon_sysfs_root = kobject_create_and_add("damon", mm_kobj);
2214 if (!damon_sysfs_root)
2217 admin = damon_sysfs_ui_dir_alloc();
2219 kobject_put(damon_sysfs_root);
2222 err = kobject_init_and_add(&admin->kobj, &damon_sysfs_ui_dir_ktype,
2223 damon_sysfs_root, "admin");
2226 err = damon_sysfs_ui_dir_add_dirs(admin);
2232 kobject_put(&admin->kobj);
2233 kobject_put(damon_sysfs_root);
2236 subsys_initcall(damon_sysfs_init);