1 // SPDX-License-Identifier: GPL-2.0
5 * Author: SeongJae Park <sjpark@amazon.de>
8 #define pr_fmt(fmt) "damon: " fmt
10 #include <linux/damon.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
17 #define CREATE_TRACE_POINTS
18 #include <trace/events/damon.h>
20 #ifdef CONFIG_DAMON_KUNIT_TEST
21 #undef DAMON_MIN_REGION
22 #define DAMON_MIN_REGION 1
25 static DEFINE_MUTEX(damon_lock);
26 static int nr_running_ctxs;
27 static bool running_exclusive_ctxs;
29 static DEFINE_MUTEX(damon_ops_lock);
30 static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
32 static struct kmem_cache *damon_region_cache __ro_after_init;
34 /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
35 static bool __damon_is_registered_ops(enum damon_ops_id id)
37 struct damon_operations empty_ops = {};
39 if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops)))
45 * damon_is_registered_ops() - Check if a given damon_operations is registered.
46 * @id: Id of the damon_operations to check if registered.
48 * Return: true if the ops is set, false otherwise.
50 bool damon_is_registered_ops(enum damon_ops_id id)
54 if (id >= NR_DAMON_OPS)
56 mutex_lock(&damon_ops_lock);
57 registered = __damon_is_registered_ops(id);
58 mutex_unlock(&damon_ops_lock);
63 * damon_register_ops() - Register a monitoring operations set to DAMON.
64 * @ops: monitoring operations set to register.
66 * This function registers a monitoring operations set of valid &struct
67 * damon_operations->id so that others can find and use them later.
69 * Return: 0 on success, negative error code otherwise.
71 int damon_register_ops(struct damon_operations *ops)
75 if (ops->id >= NR_DAMON_OPS)
77 mutex_lock(&damon_ops_lock);
78 /* Fail for already registered ops */
79 if (__damon_is_registered_ops(ops->id)) {
83 damon_registered_ops[ops->id] = *ops;
85 mutex_unlock(&damon_ops_lock);
90 * damon_select_ops() - Select a monitoring operations to use with the context.
91 * @ctx: monitoring context to use the operations.
92 * @id: id of the registered monitoring operations to select.
94 * This function finds registered monitoring operations set of @id and make
97 * Return: 0 on success, negative error code otherwise.
99 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id)
103 if (id >= NR_DAMON_OPS)
106 mutex_lock(&damon_ops_lock);
107 if (!__damon_is_registered_ops(id))
110 ctx->ops = damon_registered_ops[id];
111 mutex_unlock(&damon_ops_lock);
116 * Construct a damon_region struct
118 * Returns the pointer to the new struct if success, or NULL otherwise
120 struct damon_region *damon_new_region(unsigned long start, unsigned long end)
122 struct damon_region *region;
124 region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL);
128 region->ar.start = start;
129 region->ar.end = end;
130 region->nr_accesses = 0;
131 INIT_LIST_HEAD(®ion->list);
134 region->last_nr_accesses = 0;
139 void damon_add_region(struct damon_region *r, struct damon_target *t)
141 list_add_tail(&r->list, &t->regions_list);
145 static void damon_del_region(struct damon_region *r, struct damon_target *t)
151 static void damon_free_region(struct damon_region *r)
153 kmem_cache_free(damon_region_cache, r);
156 void damon_destroy_region(struct damon_region *r, struct damon_target *t)
158 damon_del_region(r, t);
159 damon_free_region(r);
163 * Check whether a region is intersecting an address range
165 * Returns true if it is.
167 static bool damon_intersect(struct damon_region *r,
168 struct damon_addr_range *re)
170 return !(r->ar.end <= re->start || re->end <= r->ar.start);
174 * Fill holes in regions with new regions.
176 static int damon_fill_regions_holes(struct damon_region *first,
177 struct damon_region *last, struct damon_target *t)
179 struct damon_region *r = first;
181 damon_for_each_region_from(r, t) {
182 struct damon_region *next, *newr;
186 next = damon_next_region(r);
187 if (r->ar.end != next->ar.start) {
188 newr = damon_new_region(r->ar.end, next->ar.start);
191 damon_insert_region(newr, r, next, t);
198 * damon_set_regions() - Set regions of a target for given address ranges.
199 * @t: the given target.
200 * @ranges: array of new monitoring target ranges.
201 * @nr_ranges: length of @ranges.
203 * This function adds new regions to, or modify existing regions of a
204 * monitoring target to fit in specific ranges.
206 * Return: 0 if success, or negative error code otherwise.
208 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
209 unsigned int nr_ranges)
211 struct damon_region *r, *next;
215 /* Remove regions which are not in the new ranges */
216 damon_for_each_region_safe(r, next, t) {
217 for (i = 0; i < nr_ranges; i++) {
218 if (damon_intersect(r, &ranges[i]))
222 damon_destroy_region(r, t);
225 r = damon_first_region(t);
226 /* Add new regions or resize existing regions to fit in the ranges */
227 for (i = 0; i < nr_ranges; i++) {
228 struct damon_region *first = NULL, *last, *newr;
229 struct damon_addr_range *range;
232 /* Get the first/last regions intersecting with the range */
233 damon_for_each_region_from(r, t) {
234 if (damon_intersect(r, range)) {
239 if (r->ar.start >= range->end)
243 /* no region intersects with this range */
244 newr = damon_new_region(
245 ALIGN_DOWN(range->start,
247 ALIGN(range->end, DAMON_MIN_REGION));
250 damon_insert_region(newr, damon_prev_region(r), r, t);
252 /* resize intersecting regions to fit in this range */
253 first->ar.start = ALIGN_DOWN(range->start,
255 last->ar.end = ALIGN(range->end, DAMON_MIN_REGION);
257 /* fill possible holes in the range */
258 err = damon_fill_regions_holes(first, last, t);
266 struct damos_filter *damos_new_filter(enum damos_filter_type type,
269 struct damos_filter *filter;
271 filter = kmalloc(sizeof(*filter), GFP_KERNEL);
275 filter->matching = matching;
276 INIT_LIST_HEAD(&filter->list);
280 void damos_add_filter(struct damos *s, struct damos_filter *f)
282 list_add_tail(&f->list, &s->filters);
285 static void damos_del_filter(struct damos_filter *f)
290 static void damos_free_filter(struct damos_filter *f)
295 void damos_destroy_filter(struct damos_filter *f)
298 damos_free_filter(f);
301 /* initialize private fields of damos_quota and return the pointer */
302 static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota)
304 quota->total_charged_sz = 0;
305 quota->total_charged_ns = 0;
307 quota->charged_sz = 0;
308 quota->charged_from = 0;
309 quota->charge_target_from = NULL;
310 quota->charge_addr_from = 0;
314 struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
315 enum damos_action action, struct damos_quota *quota,
316 struct damos_watermarks *wmarks)
318 struct damos *scheme;
320 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
323 scheme->pattern = *pattern;
324 scheme->action = action;
325 INIT_LIST_HEAD(&scheme->filters);
326 scheme->stat = (struct damos_stat){};
327 INIT_LIST_HEAD(&scheme->list);
329 scheme->quota = *(damos_quota_init_priv(quota));
331 scheme->wmarks = *wmarks;
332 scheme->wmarks.activated = true;
337 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
339 list_add_tail(&s->list, &ctx->schemes);
342 static void damon_del_scheme(struct damos *s)
347 static void damon_free_scheme(struct damos *s)
352 void damon_destroy_scheme(struct damos *s)
354 struct damos_filter *f, *next;
356 damos_for_each_filter_safe(f, next, s)
357 damos_destroy_filter(f);
359 damon_free_scheme(s);
363 * Construct a damon_target struct
365 * Returns the pointer to the new struct if success, or NULL otherwise
367 struct damon_target *damon_new_target(void)
369 struct damon_target *t;
371 t = kmalloc(sizeof(*t), GFP_KERNEL);
377 INIT_LIST_HEAD(&t->regions_list);
378 INIT_LIST_HEAD(&t->list);
383 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
385 list_add_tail(&t->list, &ctx->adaptive_targets);
388 bool damon_targets_empty(struct damon_ctx *ctx)
390 return list_empty(&ctx->adaptive_targets);
393 static void damon_del_target(struct damon_target *t)
398 void damon_free_target(struct damon_target *t)
400 struct damon_region *r, *next;
402 damon_for_each_region_safe(r, next, t)
403 damon_free_region(r);
407 void damon_destroy_target(struct damon_target *t)
410 damon_free_target(t);
413 unsigned int damon_nr_regions(struct damon_target *t)
415 return t->nr_regions;
418 struct damon_ctx *damon_new_ctx(void)
420 struct damon_ctx *ctx;
422 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
426 ctx->attrs.sample_interval = 5 * 1000;
427 ctx->attrs.aggr_interval = 100 * 1000;
428 ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
430 ctx->passed_sample_intervals = 0;
431 /* These will be set from kdamond_init_intervals_sis() */
432 ctx->next_aggregation_sis = 0;
433 ctx->next_ops_update_sis = 0;
435 mutex_init(&ctx->kdamond_lock);
437 ctx->attrs.min_nr_regions = 10;
438 ctx->attrs.max_nr_regions = 1000;
440 INIT_LIST_HEAD(&ctx->adaptive_targets);
441 INIT_LIST_HEAD(&ctx->schemes);
446 static void damon_destroy_targets(struct damon_ctx *ctx)
448 struct damon_target *t, *next_t;
450 if (ctx->ops.cleanup) {
451 ctx->ops.cleanup(ctx);
455 damon_for_each_target_safe(t, next_t, ctx)
456 damon_destroy_target(t);
459 void damon_destroy_ctx(struct damon_ctx *ctx)
461 struct damos *s, *next_s;
463 damon_destroy_targets(ctx);
465 damon_for_each_scheme_safe(s, next_s, ctx)
466 damon_destroy_scheme(s);
471 static unsigned int damon_age_for_new_attrs(unsigned int age,
472 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
474 return age * old_attrs->aggr_interval / new_attrs->aggr_interval;
477 /* convert access ratio in bp (per 10,000) to nr_accesses */
478 static unsigned int damon_accesses_bp_to_nr_accesses(
479 unsigned int accesses_bp, struct damon_attrs *attrs)
481 unsigned int max_nr_accesses =
482 attrs->aggr_interval / attrs->sample_interval;
484 return accesses_bp * max_nr_accesses / 10000;
487 /* convert nr_accesses to access ratio in bp (per 10,000) */
488 static unsigned int damon_nr_accesses_to_accesses_bp(
489 unsigned int nr_accesses, struct damon_attrs *attrs)
491 unsigned int max_nr_accesses =
492 attrs->aggr_interval / attrs->sample_interval;
494 return nr_accesses * 10000 / max_nr_accesses;
497 static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
498 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
500 return damon_accesses_bp_to_nr_accesses(
501 damon_nr_accesses_to_accesses_bp(
502 nr_accesses, old_attrs),
506 static void damon_update_monitoring_result(struct damon_region *r,
507 struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
509 r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses,
510 old_attrs, new_attrs);
511 r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs);
515 * region->nr_accesses is the number of sampling intervals in the last
516 * aggregation interval that access to the region has found, and region->age is
517 * the number of aggregation intervals that its access pattern has maintained.
518 * For the reason, the real meaning of the two fields depend on current
519 * sampling interval and aggregation interval. This function updates
520 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
522 static void damon_update_monitoring_results(struct damon_ctx *ctx,
523 struct damon_attrs *new_attrs)
525 struct damon_attrs *old_attrs = &ctx->attrs;
526 struct damon_target *t;
527 struct damon_region *r;
529 /* if any interval is zero, simply forgive conversion */
530 if (!old_attrs->sample_interval || !old_attrs->aggr_interval ||
531 !new_attrs->sample_interval ||
532 !new_attrs->aggr_interval)
535 damon_for_each_target(t, ctx)
536 damon_for_each_region(r, t)
537 damon_update_monitoring_result(
538 r, old_attrs, new_attrs);
542 * damon_set_attrs() - Set attributes for the monitoring.
543 * @ctx: monitoring context
544 * @attrs: monitoring attributes
546 * This function should be called while the kdamond is not running, or an
547 * access check results aggregation is not ongoing (e.g., from
548 * &struct damon_callback->after_aggregation or
549 * &struct damon_callback->after_wmarks_check callbacks).
551 * Every time interval is in micro-seconds.
553 * Return: 0 on success, negative error code otherwise.
555 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
557 unsigned long sample_interval = attrs->sample_interval ?
558 attrs->sample_interval : 1;
560 if (attrs->min_nr_regions < 3)
562 if (attrs->min_nr_regions > attrs->max_nr_regions)
564 if (attrs->sample_interval > attrs->aggr_interval)
567 ctx->next_aggregation_sis = ctx->passed_sample_intervals +
568 attrs->aggr_interval / sample_interval;
569 ctx->next_ops_update_sis = ctx->passed_sample_intervals +
570 attrs->ops_update_interval / sample_interval;
572 damon_update_monitoring_results(ctx, attrs);
578 * damon_set_schemes() - Set data access monitoring based operation schemes.
579 * @ctx: monitoring context
580 * @schemes: array of the schemes
581 * @nr_schemes: number of entries in @schemes
583 * This function should not be called while the kdamond of the context is
586 void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
589 struct damos *s, *next;
592 damon_for_each_scheme_safe(s, next, ctx)
593 damon_destroy_scheme(s);
594 for (i = 0; i < nr_schemes; i++)
595 damon_add_scheme(ctx, schemes[i]);
599 * damon_nr_running_ctxs() - Return number of currently running contexts.
601 int damon_nr_running_ctxs(void)
605 mutex_lock(&damon_lock);
606 nr_ctxs = nr_running_ctxs;
607 mutex_unlock(&damon_lock);
612 /* Returns the size upper limit for each monitoring region */
613 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
615 struct damon_target *t;
616 struct damon_region *r;
617 unsigned long sz = 0;
619 damon_for_each_target(t, ctx) {
620 damon_for_each_region(r, t)
621 sz += damon_sz_region(r);
624 if (ctx->attrs.min_nr_regions)
625 sz /= ctx->attrs.min_nr_regions;
626 if (sz < DAMON_MIN_REGION)
627 sz = DAMON_MIN_REGION;
632 static int kdamond_fn(void *data);
635 * __damon_start() - Starts monitoring with given context.
636 * @ctx: monitoring context
638 * This function should be called while damon_lock is hold.
640 * Return: 0 on success, negative error code otherwise.
642 static int __damon_start(struct damon_ctx *ctx)
646 mutex_lock(&ctx->kdamond_lock);
649 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
651 if (IS_ERR(ctx->kdamond)) {
652 err = PTR_ERR(ctx->kdamond);
656 mutex_unlock(&ctx->kdamond_lock);
662 * damon_start() - Starts the monitorings for a given group of contexts.
663 * @ctxs: an array of the pointers for contexts to start monitoring
664 * @nr_ctxs: size of @ctxs
665 * @exclusive: exclusiveness of this contexts group
667 * This function starts a group of monitoring threads for a group of monitoring
668 * contexts. One thread per each context is created and run in parallel. The
669 * caller should handle synchronization between the threads by itself. If
670 * @exclusive is true and a group of threads that created by other
671 * 'damon_start()' call is currently running, this function does nothing but
674 * Return: 0 on success, negative error code otherwise.
676 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
681 mutex_lock(&damon_lock);
682 if ((exclusive && nr_running_ctxs) ||
683 (!exclusive && running_exclusive_ctxs)) {
684 mutex_unlock(&damon_lock);
688 for (i = 0; i < nr_ctxs; i++) {
689 err = __damon_start(ctxs[i]);
694 if (exclusive && nr_running_ctxs)
695 running_exclusive_ctxs = true;
696 mutex_unlock(&damon_lock);
702 * __damon_stop() - Stops monitoring of a given context.
703 * @ctx: monitoring context
705 * Return: 0 on success, negative error code otherwise.
707 static int __damon_stop(struct damon_ctx *ctx)
709 struct task_struct *tsk;
711 mutex_lock(&ctx->kdamond_lock);
714 get_task_struct(tsk);
715 mutex_unlock(&ctx->kdamond_lock);
717 put_task_struct(tsk);
720 mutex_unlock(&ctx->kdamond_lock);
726 * damon_stop() - Stops the monitorings for a given group of contexts.
727 * @ctxs: an array of the pointers for contexts to stop monitoring
728 * @nr_ctxs: size of @ctxs
730 * Return: 0 on success, negative error code otherwise.
732 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
736 for (i = 0; i < nr_ctxs; i++) {
737 /* nr_running_ctxs is decremented in kdamond_fn */
738 err = __damon_stop(ctxs[i]);
746 * Reset the aggregated monitoring results ('nr_accesses' of each region).
748 static void kdamond_reset_aggregated(struct damon_ctx *c)
750 struct damon_target *t;
751 unsigned int ti = 0; /* target's index */
753 damon_for_each_target(t, c) {
754 struct damon_region *r;
756 damon_for_each_region(r, t) {
757 trace_damon_aggregated(ti, r, damon_nr_regions(t));
758 r->last_nr_accesses = r->nr_accesses;
765 static void damon_split_region_at(struct damon_target *t,
766 struct damon_region *r, unsigned long sz_r);
768 static bool __damos_valid_target(struct damon_region *r, struct damos *s)
772 sz = damon_sz_region(r);
773 return s->pattern.min_sz_region <= sz &&
774 sz <= s->pattern.max_sz_region &&
775 s->pattern.min_nr_accesses <= r->nr_accesses &&
776 r->nr_accesses <= s->pattern.max_nr_accesses &&
777 s->pattern.min_age_region <= r->age &&
778 r->age <= s->pattern.max_age_region;
781 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
782 struct damon_region *r, struct damos *s)
784 bool ret = __damos_valid_target(r, s);
786 if (!ret || !s->quota.esz || !c->ops.get_scheme_score)
789 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score;
793 * damos_skip_charged_region() - Check if the given region or starting part of
794 * it is already charged for the DAMOS quota.
795 * @t: The target of the region.
796 * @rp: The pointer to the region.
797 * @s: The scheme to be applied.
799 * If a quota of a scheme has exceeded in a quota charge window, the scheme's
800 * action would applied to only a part of the target access pattern fulfilling
801 * regions. To avoid applying the scheme action to only already applied
802 * regions, DAMON skips applying the scheme action to the regions that charged
803 * in the previous charge window.
805 * This function checks if a given region should be skipped or not for the
806 * reason. If only the starting part of the region has previously charged,
807 * this function splits the region into two so that the second one covers the
808 * area that not charged in the previous charge widnow and saves the second
809 * region in *rp and returns false, so that the caller can apply DAMON action
812 * Return: true if the region should be entirely skipped, false otherwise.
814 static bool damos_skip_charged_region(struct damon_target *t,
815 struct damon_region **rp, struct damos *s)
817 struct damon_region *r = *rp;
818 struct damos_quota *quota = &s->quota;
819 unsigned long sz_to_skip;
821 /* Skip previously charged regions */
822 if (quota->charge_target_from) {
823 if (t != quota->charge_target_from)
825 if (r == damon_last_region(t)) {
826 quota->charge_target_from = NULL;
827 quota->charge_addr_from = 0;
830 if (quota->charge_addr_from &&
831 r->ar.end <= quota->charge_addr_from)
834 if (quota->charge_addr_from && r->ar.start <
835 quota->charge_addr_from) {
836 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
837 r->ar.start, DAMON_MIN_REGION);
839 if (damon_sz_region(r) <= DAMON_MIN_REGION)
841 sz_to_skip = DAMON_MIN_REGION;
843 damon_split_region_at(t, r, sz_to_skip);
844 r = damon_next_region(r);
847 quota->charge_target_from = NULL;
848 quota->charge_addr_from = 0;
853 static void damos_update_stat(struct damos *s,
854 unsigned long sz_tried, unsigned long sz_applied)
857 s->stat.sz_tried += sz_tried;
859 s->stat.nr_applied++;
860 s->stat.sz_applied += sz_applied;
863 static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
864 struct damon_region *r, struct damos_filter *filter)
866 bool matched = false;
867 struct damon_target *ti;
869 unsigned long start, end;
871 switch (filter->type) {
872 case DAMOS_FILTER_TYPE_TARGET:
873 damon_for_each_target(ti, ctx) {
878 matched = target_idx == filter->target_idx;
880 case DAMOS_FILTER_TYPE_ADDR:
881 start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION);
882 end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION);
884 /* inside the range */
885 if (start <= r->ar.start && r->ar.end <= end) {
889 /* outside of the range */
890 if (r->ar.end <= start || end <= r->ar.start) {
894 /* start before the range and overlap */
895 if (r->ar.start < start) {
896 damon_split_region_at(t, r, start - r->ar.start);
900 /* start inside the range */
901 damon_split_region_at(t, r, end - r->ar.start);
908 return matched == filter->matching;
911 static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
912 struct damon_region *r, struct damos *s)
914 struct damos_filter *filter;
916 damos_for_each_filter(filter, s) {
917 if (__damos_filter_out(ctx, t, r, filter))
923 static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
924 struct damon_region *r, struct damos *s)
926 struct damos_quota *quota = &s->quota;
927 unsigned long sz = damon_sz_region(r);
928 struct timespec64 begin, end;
929 unsigned long sz_applied = 0;
932 * We plan to support multiple context per kdamond, as DAMON sysfs
933 * implies with 'nr_contexts' file. Nevertheless, only single context
934 * per kdamond is supported for now. So, we can simply use '0' context
937 unsigned int cidx = 0;
938 struct damos *siter; /* schemes iterator */
939 unsigned int sidx = 0;
940 struct damon_target *titer; /* targets iterator */
941 unsigned int tidx = 0;
942 bool do_trace = false;
944 /* get indices for trace_damos_before_apply() */
945 if (trace_damos_before_apply_enabled()) {
946 damon_for_each_scheme(siter, c) {
951 damon_for_each_target(titer, c) {
959 if (c->ops.apply_scheme) {
960 if (quota->esz && quota->charged_sz + sz > quota->esz) {
961 sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
965 damon_split_region_at(t, r, sz);
967 if (damos_filter_out(c, t, r, s))
969 ktime_get_coarse_ts64(&begin);
970 if (c->callback.before_damos_apply)
971 err = c->callback.before_damos_apply(c, t, r, s);
973 trace_damos_before_apply(cidx, sidx, tidx, r,
974 damon_nr_regions(t), do_trace);
975 sz_applied = c->ops.apply_scheme(c, t, r, s);
977 ktime_get_coarse_ts64(&end);
978 quota->total_charged_ns += timespec64_to_ns(&end) -
979 timespec64_to_ns(&begin);
980 quota->charged_sz += sz;
981 if (quota->esz && quota->charged_sz >= quota->esz) {
982 quota->charge_target_from = t;
983 quota->charge_addr_from = r->ar.end + 1;
986 if (s->action != DAMOS_STAT)
990 damos_update_stat(s, sz, sz_applied);
993 static void damon_do_apply_schemes(struct damon_ctx *c,
994 struct damon_target *t,
995 struct damon_region *r)
999 damon_for_each_scheme(s, c) {
1000 struct damos_quota *quota = &s->quota;
1002 if (!s->wmarks.activated)
1005 /* Check the quota */
1006 if (quota->esz && quota->charged_sz >= quota->esz)
1009 if (damos_skip_charged_region(t, &r, s))
1012 if (!damos_valid_target(c, t, r, s))
1015 damos_apply_scheme(c, t, r, s);
1019 /* Shouldn't be called if quota->ms and quota->sz are zero */
1020 static void damos_set_effective_quota(struct damos_quota *quota)
1022 unsigned long throughput;
1026 quota->esz = quota->sz;
1030 if (quota->total_charged_ns)
1031 throughput = quota->total_charged_sz * 1000000 /
1032 quota->total_charged_ns;
1034 throughput = PAGE_SIZE * 1024;
1035 esz = throughput * quota->ms;
1037 if (quota->sz && quota->sz < esz)
1042 static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
1044 struct damos_quota *quota = &s->quota;
1045 struct damon_target *t;
1046 struct damon_region *r;
1047 unsigned long cumulated_sz;
1048 unsigned int score, max_score = 0;
1050 if (!quota->ms && !quota->sz)
1053 /* New charge window starts */
1054 if (time_after_eq(jiffies, quota->charged_from +
1055 msecs_to_jiffies(quota->reset_interval))) {
1056 if (quota->esz && quota->charged_sz >= quota->esz)
1057 s->stat.qt_exceeds++;
1058 quota->total_charged_sz += quota->charged_sz;
1059 quota->charged_from = jiffies;
1060 quota->charged_sz = 0;
1061 damos_set_effective_quota(quota);
1064 if (!c->ops.get_scheme_score)
1067 /* Fill up the score histogram */
1068 memset(quota->histogram, 0, sizeof(quota->histogram));
1069 damon_for_each_target(t, c) {
1070 damon_for_each_region(r, t) {
1071 if (!__damos_valid_target(r, s))
1073 score = c->ops.get_scheme_score(c, t, r, s);
1074 quota->histogram[score] += damon_sz_region(r);
1075 if (score > max_score)
1080 /* Set the min score limit */
1081 for (cumulated_sz = 0, score = max_score; ; score--) {
1082 cumulated_sz += quota->histogram[score];
1083 if (cumulated_sz >= quota->esz || !score)
1086 quota->min_score = score;
1089 static void kdamond_apply_schemes(struct damon_ctx *c)
1091 struct damon_target *t;
1092 struct damon_region *r, *next_r;
1095 damon_for_each_scheme(s, c) {
1096 if (!s->wmarks.activated)
1099 damos_adjust_quota(c, s);
1102 damon_for_each_target(t, c) {
1103 damon_for_each_region_safe(r, next_r, t)
1104 damon_do_apply_schemes(c, t, r);
1109 * Merge two adjacent regions into one region
1111 static void damon_merge_two_regions(struct damon_target *t,
1112 struct damon_region *l, struct damon_region *r)
1114 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r);
1116 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
1118 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
1119 l->ar.end = r->ar.end;
1120 damon_destroy_region(r, t);
1124 * Merge adjacent regions having similar access frequencies
1126 * t target affected by this merge operation
1127 * thres '->nr_accesses' diff threshold for the merge
1128 * sz_limit size upper limit of each region
1130 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
1131 unsigned long sz_limit)
1133 struct damon_region *r, *prev = NULL, *next;
1135 damon_for_each_region_safe(r, next, t) {
1136 if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
1141 if (prev && prev->ar.end == r->ar.start &&
1142 abs(prev->nr_accesses - r->nr_accesses) <= thres &&
1143 damon_sz_region(prev) + damon_sz_region(r) <= sz_limit)
1144 damon_merge_two_regions(t, prev, r);
1151 * Merge adjacent regions having similar access frequencies
1153 * threshold '->nr_accesses' diff threshold for the merge
1154 * sz_limit size upper limit of each region
1156 * This function merges monitoring target regions which are adjacent and their
1157 * access frequencies are similar. This is for minimizing the monitoring
1158 * overhead under the dynamically changeable access pattern. If a merge was
1159 * unnecessarily made, later 'kdamond_split_regions()' will revert it.
1161 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
1162 unsigned long sz_limit)
1164 struct damon_target *t;
1166 damon_for_each_target(t, c)
1167 damon_merge_regions_of(t, threshold, sz_limit);
1171 * Split a region in two
1173 * r the region to be split
1174 * sz_r size of the first sub-region that will be made
1176 static void damon_split_region_at(struct damon_target *t,
1177 struct damon_region *r, unsigned long sz_r)
1179 struct damon_region *new;
1181 new = damon_new_region(r->ar.start + sz_r, r->ar.end);
1185 r->ar.end = new->ar.start;
1188 new->last_nr_accesses = r->last_nr_accesses;
1190 damon_insert_region(new, r, damon_next_region(r), t);
1193 /* Split every region in the given target into 'nr_subs' regions */
1194 static void damon_split_regions_of(struct damon_target *t, int nr_subs)
1196 struct damon_region *r, *next;
1197 unsigned long sz_region, sz_sub = 0;
1200 damon_for_each_region_safe(r, next, t) {
1201 sz_region = damon_sz_region(r);
1203 for (i = 0; i < nr_subs - 1 &&
1204 sz_region > 2 * DAMON_MIN_REGION; i++) {
1206 * Randomly select size of left sub-region to be at
1207 * least 10 percent and at most 90% of original region
1209 sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
1210 sz_region / 10, DAMON_MIN_REGION);
1211 /* Do not allow blank region */
1212 if (sz_sub == 0 || sz_sub >= sz_region)
1215 damon_split_region_at(t, r, sz_sub);
1222 * Split every target region into randomly-sized small regions
1224 * This function splits every target region into random-sized small regions if
1225 * current total number of the regions is equal or smaller than half of the
1226 * user-specified maximum number of regions. This is for maximizing the
1227 * monitoring accuracy under the dynamically changeable access patterns. If a
1228 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
1231 static void kdamond_split_regions(struct damon_ctx *ctx)
1233 struct damon_target *t;
1234 unsigned int nr_regions = 0;
1235 static unsigned int last_nr_regions;
1236 int nr_subregions = 2;
1238 damon_for_each_target(t, ctx)
1239 nr_regions += damon_nr_regions(t);
1241 if (nr_regions > ctx->attrs.max_nr_regions / 2)
1244 /* Maybe the middle of the region has different access frequency */
1245 if (last_nr_regions == nr_regions &&
1246 nr_regions < ctx->attrs.max_nr_regions / 3)
1249 damon_for_each_target(t, ctx)
1250 damon_split_regions_of(t, nr_subregions);
1252 last_nr_regions = nr_regions;
1256 * Check whether current monitoring should be stopped
1258 * The monitoring is stopped when either the user requested to stop, or all
1259 * monitoring targets are invalid.
1261 * Returns true if need to stop current monitoring.
1263 static bool kdamond_need_stop(struct damon_ctx *ctx)
1265 struct damon_target *t;
1267 if (kthread_should_stop())
1270 if (!ctx->ops.target_valid)
1273 damon_for_each_target(t, ctx) {
1274 if (ctx->ops.target_valid(t))
1281 static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric)
1286 case DAMOS_WMARK_FREE_MEM_RATE:
1288 return i.freeram * 1000 / i.totalram;
1296 * Returns zero if the scheme is active. Else, returns time to wait for next
1297 * watermark check in micro-seconds.
1299 static unsigned long damos_wmark_wait_us(struct damos *scheme)
1301 unsigned long metric;
1303 if (scheme->wmarks.metric == DAMOS_WMARK_NONE)
1306 metric = damos_wmark_metric_value(scheme->wmarks.metric);
1307 /* higher than high watermark or lower than low watermark */
1308 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
1309 if (scheme->wmarks.activated)
1310 pr_debug("deactivate a scheme (%d) for %s wmark\n",
1312 metric > scheme->wmarks.high ?
1314 scheme->wmarks.activated = false;
1315 return scheme->wmarks.interval;
1318 /* inactive and higher than middle watermark */
1319 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
1320 !scheme->wmarks.activated)
1321 return scheme->wmarks.interval;
1323 if (!scheme->wmarks.activated)
1324 pr_debug("activate a scheme (%d)\n", scheme->action);
1325 scheme->wmarks.activated = true;
1329 static void kdamond_usleep(unsigned long usecs)
1331 /* See Documentation/timers/timers-howto.rst for the thresholds */
1332 if (usecs > 20 * USEC_PER_MSEC)
1333 schedule_timeout_idle(usecs_to_jiffies(usecs));
1335 usleep_idle_range(usecs, usecs + 1);
1338 /* Returns negative error code if it's not activated but should return */
1339 static int kdamond_wait_activation(struct damon_ctx *ctx)
1342 unsigned long wait_time;
1343 unsigned long min_wait_time = 0;
1344 bool init_wait_time = false;
1346 while (!kdamond_need_stop(ctx)) {
1347 damon_for_each_scheme(s, ctx) {
1348 wait_time = damos_wmark_wait_us(s);
1349 if (!init_wait_time || wait_time < min_wait_time) {
1350 init_wait_time = true;
1351 min_wait_time = wait_time;
1357 kdamond_usleep(min_wait_time);
1359 if (ctx->callback.after_wmarks_check &&
1360 ctx->callback.after_wmarks_check(ctx))
1366 static void kdamond_init_intervals_sis(struct damon_ctx *ctx)
1368 unsigned long sample_interval = ctx->attrs.sample_interval ?
1369 ctx->attrs.sample_interval : 1;
1371 ctx->passed_sample_intervals = 0;
1372 ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval;
1373 ctx->next_ops_update_sis = ctx->attrs.ops_update_interval /
1378 * The monitoring daemon that runs as a kernel thread
1380 static int kdamond_fn(void *data)
1382 struct damon_ctx *ctx = data;
1383 struct damon_target *t;
1384 struct damon_region *r, *next;
1385 unsigned int max_nr_accesses = 0;
1386 unsigned long sz_limit = 0;
1388 pr_debug("kdamond (%d) starts\n", current->pid);
1390 kdamond_init_intervals_sis(ctx);
1394 if (ctx->callback.before_start && ctx->callback.before_start(ctx))
1397 sz_limit = damon_region_sz_limit(ctx);
1399 while (!kdamond_need_stop(ctx)) {
1401 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could
1402 * be changed from after_wmarks_check() or after_aggregation()
1403 * callbacks. Read the values here, and use those for this
1404 * iteration. That is, damon_set_attrs() updated new values
1405 * are respected from next iteration.
1407 unsigned long next_aggregation_sis = ctx->next_aggregation_sis;
1408 unsigned long next_ops_update_sis = ctx->next_ops_update_sis;
1409 unsigned long sample_interval = ctx->attrs.sample_interval;
1411 if (kdamond_wait_activation(ctx))
1414 if (ctx->ops.prepare_access_checks)
1415 ctx->ops.prepare_access_checks(ctx);
1416 if (ctx->callback.after_sampling &&
1417 ctx->callback.after_sampling(ctx))
1420 kdamond_usleep(sample_interval);
1421 ctx->passed_sample_intervals++;
1423 if (ctx->ops.check_accesses)
1424 max_nr_accesses = ctx->ops.check_accesses(ctx);
1426 sample_interval = ctx->attrs.sample_interval ?
1427 ctx->attrs.sample_interval : 1;
1428 if (ctx->passed_sample_intervals == next_aggregation_sis) {
1429 ctx->next_aggregation_sis = next_aggregation_sis +
1430 ctx->attrs.aggr_interval / sample_interval;
1431 kdamond_merge_regions(ctx,
1432 max_nr_accesses / 10,
1434 if (ctx->callback.after_aggregation &&
1435 ctx->callback.after_aggregation(ctx))
1437 if (!list_empty(&ctx->schemes))
1438 kdamond_apply_schemes(ctx);
1439 kdamond_reset_aggregated(ctx);
1440 kdamond_split_regions(ctx);
1441 if (ctx->ops.reset_aggregated)
1442 ctx->ops.reset_aggregated(ctx);
1445 if (ctx->passed_sample_intervals == next_ops_update_sis) {
1446 ctx->next_ops_update_sis = next_ops_update_sis +
1447 ctx->attrs.ops_update_interval /
1449 if (ctx->ops.update)
1450 ctx->ops.update(ctx);
1451 sz_limit = damon_region_sz_limit(ctx);
1455 damon_for_each_target(t, ctx) {
1456 damon_for_each_region_safe(r, next, t)
1457 damon_destroy_region(r, t);
1460 if (ctx->callback.before_terminate)
1461 ctx->callback.before_terminate(ctx);
1462 if (ctx->ops.cleanup)
1463 ctx->ops.cleanup(ctx);
1465 pr_debug("kdamond (%d) finishes\n", current->pid);
1466 mutex_lock(&ctx->kdamond_lock);
1467 ctx->kdamond = NULL;
1468 mutex_unlock(&ctx->kdamond_lock);
1470 mutex_lock(&damon_lock);
1472 if (!nr_running_ctxs && running_exclusive_ctxs)
1473 running_exclusive_ctxs = false;
1474 mutex_unlock(&damon_lock);
1480 * struct damon_system_ram_region - System RAM resource address region of
1482 * @start: Start address of the region (inclusive).
1483 * @end: End address of the region (exclusive).
1485 struct damon_system_ram_region {
1486 unsigned long start;
1490 static int walk_system_ram(struct resource *res, void *arg)
1492 struct damon_system_ram_region *a = arg;
1494 if (a->end - a->start < resource_size(res)) {
1495 a->start = res->start;
1502 * Find biggest 'System RAM' resource and store its start and end address in
1503 * @start and @end, respectively. If no System RAM is found, returns false.
1505 static bool damon_find_biggest_system_ram(unsigned long *start,
1509 struct damon_system_ram_region arg = {};
1511 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
1512 if (arg.end <= arg.start)
1521 * damon_set_region_biggest_system_ram_default() - Set the region of the given
1522 * monitoring target as requested, or biggest 'System RAM'.
1523 * @t: The monitoring target to set the region.
1524 * @start: The pointer to the start address of the region.
1525 * @end: The pointer to the end address of the region.
1527 * This function sets the region of @t as requested by @start and @end. If the
1528 * values of @start and @end are zero, however, this function finds the biggest
1529 * 'System RAM' resource and sets the region to cover the resource. In the
1530 * latter case, this function saves the start and end addresses of the resource
1531 * in @start and @end, respectively.
1533 * Return: 0 on success, negative error code otherwise.
1535 int damon_set_region_biggest_system_ram_default(struct damon_target *t,
1536 unsigned long *start, unsigned long *end)
1538 struct damon_addr_range addr_range;
1543 if (!*start && !*end &&
1544 !damon_find_biggest_system_ram(start, end))
1547 addr_range.start = *start;
1548 addr_range.end = *end;
1549 return damon_set_regions(t, &addr_range, 1);
1553 * damon_moving_sum() - Calculate an inferred moving sum value.
1554 * @mvsum: Inferred sum of the last @len_window values.
1555 * @nomvsum: Non-moving sum of the last discrete @len_window window values.
1556 * @len_window: The number of last values to take care of.
1557 * @new_value: New value that will be added to the pseudo moving sum.
1559 * Moving sum (moving average * window size) is good for handling noise, but
1560 * the cost of keeping past values can be high for arbitrary window size. This
1561 * function implements a lightweight pseudo moving sum function that doesn't
1562 * keep the past window values.
1564 * It simply assumes there was no noise in the past, and get the no-noise
1565 * assumed past value to drop from @nomvsum and @len_window. @nomvsum is a
1566 * non-moving sum of the last window. For example, if @len_window is 10 and we
1567 * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25
1568 * values. Hence, this function simply drops @nomvsum / @len_window from
1569 * given @mvsum and add @new_value.
1571 * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for
1572 * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For
1573 * calculating next moving sum with a new value, we should drop 0 from 50 and
1574 * add the new value. However, this function assumes it got value 5 for each
1575 * of the last ten times. Based on the assumption, when the next value is
1576 * measured, it drops the assumed past value, 5 from the current sum, and add
1577 * the new value to get the updated pseduo-moving average.
1579 * This means the value could have errors, but the errors will be disappeared
1580 * for every @len_window aligned calls. For example, if @len_window is 10, the
1581 * pseudo moving sum with 11th value to 19th value would have an error. But
1582 * the sum with 20th value will not have the error.
1584 * Return: Pseudo-moving average after getting the @new_value.
1586 unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum,
1587 unsigned int len_window, unsigned int new_value)
1589 return mvsum - nomvsum / len_window + new_value;
1593 * damon_update_region_access_rate() - Update the access rate of a region.
1594 * @r: The DAMON region to update for its access check result.
1595 * @accessed: Whether the region has accessed during last sampling interval.
1597 * Update the access rate of a region with the region's last sampling interval
1598 * access check result.
1600 * Usually this will be called by &damon_operations->check_accesses callback.
1602 void damon_update_region_access_rate(struct damon_region *r, bool accessed)
1608 static int __init damon_init(void)
1610 damon_region_cache = KMEM_CACHE(damon_region, 0);
1611 if (unlikely(!damon_region_cache)) {
1612 pr_err("creating damon_region_cache fails\n");
1619 subsys_initcall(damon_init);
1621 #include "core-test.h"