1 // SPDX-License-Identifier: GPL-2.0
5 * Author: SeongJae Park <sjpark@amazon.de>
8 #define pr_fmt(fmt) "damon: " fmt
10 #include <linux/damon.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
13 #include <linux/random.h>
14 #include <linux/slab.h>
16 #define CREATE_TRACE_POINTS
17 #include <trace/events/damon.h>
19 #ifdef CONFIG_DAMON_KUNIT_TEST
20 #undef DAMON_MIN_REGION
21 #define DAMON_MIN_REGION 1
24 /* Get a random number in [l, r) */
25 #define damon_rand(l, r) (l + prandom_u32_max(r - l))
27 static DEFINE_MUTEX(damon_lock);
28 static int nr_running_ctxs;
31 * Construct a damon_region struct
33 * Returns the pointer to the new struct if success, or NULL otherwise
35 struct damon_region *damon_new_region(unsigned long start, unsigned long end)
37 struct damon_region *region;
39 region = kmalloc(sizeof(*region), GFP_KERNEL);
43 region->ar.start = start;
45 region->nr_accesses = 0;
46 INIT_LIST_HEAD(®ion->list);
49 region->last_nr_accesses = 0;
55 * Add a region between two other regions
57 inline void damon_insert_region(struct damon_region *r,
58 struct damon_region *prev, struct damon_region *next,
59 struct damon_target *t)
61 __list_add(&r->list, &prev->list, &next->list);
65 void damon_add_region(struct damon_region *r, struct damon_target *t)
67 list_add_tail(&r->list, &t->regions_list);
71 static void damon_del_region(struct damon_region *r, struct damon_target *t)
77 static void damon_free_region(struct damon_region *r)
82 void damon_destroy_region(struct damon_region *r, struct damon_target *t)
84 damon_del_region(r, t);
89 * Construct a damon_target struct
91 * Returns the pointer to the new struct if success, or NULL otherwise
93 struct damon_target *damon_new_target(unsigned long id)
95 struct damon_target *t;
97 t = kmalloc(sizeof(*t), GFP_KERNEL);
103 INIT_LIST_HEAD(&t->regions_list);
108 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
110 list_add_tail(&t->list, &ctx->adaptive_targets);
113 static void damon_del_target(struct damon_target *t)
118 void damon_free_target(struct damon_target *t)
120 struct damon_region *r, *next;
122 damon_for_each_region_safe(r, next, t)
123 damon_free_region(r);
127 void damon_destroy_target(struct damon_target *t)
130 damon_free_target(t);
133 unsigned int damon_nr_regions(struct damon_target *t)
135 return t->nr_regions;
138 struct damon_ctx *damon_new_ctx(void)
140 struct damon_ctx *ctx;
142 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
146 ctx->sample_interval = 5 * 1000;
147 ctx->aggr_interval = 100 * 1000;
148 ctx->primitive_update_interval = 60 * 1000 * 1000;
150 ktime_get_coarse_ts64(&ctx->last_aggregation);
151 ctx->last_primitive_update = ctx->last_aggregation;
153 mutex_init(&ctx->kdamond_lock);
155 ctx->min_nr_regions = 10;
156 ctx->max_nr_regions = 1000;
158 INIT_LIST_HEAD(&ctx->adaptive_targets);
163 static void damon_destroy_targets(struct damon_ctx *ctx)
165 struct damon_target *t, *next_t;
167 if (ctx->primitive.cleanup) {
168 ctx->primitive.cleanup(ctx);
172 damon_for_each_target_safe(t, next_t, ctx)
173 damon_destroy_target(t);
176 void damon_destroy_ctx(struct damon_ctx *ctx)
178 damon_destroy_targets(ctx);
183 * damon_set_targets() - Set monitoring targets.
184 * @ctx: monitoring context
185 * @ids: array of target ids
186 * @nr_ids: number of entries in @ids
188 * This function should not be called while the kdamond is running.
190 * Return: 0 on success, negative error code otherwise.
192 int damon_set_targets(struct damon_ctx *ctx,
193 unsigned long *ids, ssize_t nr_ids)
196 struct damon_target *t, *next;
198 damon_destroy_targets(ctx);
200 for (i = 0; i < nr_ids; i++) {
201 t = damon_new_target(ids[i]);
203 pr_err("Failed to alloc damon_target\n");
204 /* The caller should do cleanup of the ids itself */
205 damon_for_each_target_safe(t, next, ctx)
206 damon_destroy_target(t);
209 damon_add_target(ctx, t);
216 * damon_set_attrs() - Set attributes for the monitoring.
217 * @ctx: monitoring context
218 * @sample_int: time interval between samplings
219 * @aggr_int: time interval between aggregations
220 * @primitive_upd_int: time interval between monitoring primitive updates
221 * @min_nr_reg: minimal number of regions
222 * @max_nr_reg: maximum number of regions
224 * This function should not be called while the kdamond is running.
225 * Every time interval is in micro-seconds.
227 * Return: 0 on success, negative error code otherwise.
229 int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
230 unsigned long aggr_int, unsigned long primitive_upd_int,
231 unsigned long min_nr_reg, unsigned long max_nr_reg)
233 if (min_nr_reg < 3) {
234 pr_err("min_nr_regions (%lu) must be at least 3\n",
238 if (min_nr_reg > max_nr_reg) {
239 pr_err("invalid nr_regions. min (%lu) > max (%lu)\n",
240 min_nr_reg, max_nr_reg);
244 ctx->sample_interval = sample_int;
245 ctx->aggr_interval = aggr_int;
246 ctx->primitive_update_interval = primitive_upd_int;
247 ctx->min_nr_regions = min_nr_reg;
248 ctx->max_nr_regions = max_nr_reg;
254 * damon_nr_running_ctxs() - Return number of currently running contexts.
256 int damon_nr_running_ctxs(void)
260 mutex_lock(&damon_lock);
261 nr_ctxs = nr_running_ctxs;
262 mutex_unlock(&damon_lock);
267 /* Returns the size upper limit for each monitoring region */
268 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
270 struct damon_target *t;
271 struct damon_region *r;
272 unsigned long sz = 0;
274 damon_for_each_target(t, ctx) {
275 damon_for_each_region(r, t)
276 sz += r->ar.end - r->ar.start;
279 if (ctx->min_nr_regions)
280 sz /= ctx->min_nr_regions;
281 if (sz < DAMON_MIN_REGION)
282 sz = DAMON_MIN_REGION;
287 static bool damon_kdamond_running(struct damon_ctx *ctx)
291 mutex_lock(&ctx->kdamond_lock);
292 running = ctx->kdamond != NULL;
293 mutex_unlock(&ctx->kdamond_lock);
298 static int kdamond_fn(void *data);
301 * __damon_start() - Starts monitoring with given context.
302 * @ctx: monitoring context
304 * This function should be called while damon_lock is hold.
306 * Return: 0 on success, negative error code otherwise.
308 static int __damon_start(struct damon_ctx *ctx)
312 mutex_lock(&ctx->kdamond_lock);
315 ctx->kdamond_stop = false;
316 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
318 if (IS_ERR(ctx->kdamond)) {
319 err = PTR_ERR(ctx->kdamond);
323 mutex_unlock(&ctx->kdamond_lock);
329 * damon_start() - Starts the monitorings for a given group of contexts.
330 * @ctxs: an array of the pointers for contexts to start monitoring
331 * @nr_ctxs: size of @ctxs
333 * This function starts a group of monitoring threads for a group of monitoring
334 * contexts. One thread per each context is created and run in parallel. The
335 * caller should handle synchronization between the threads by itself. If a
336 * group of threads that created by other 'damon_start()' call is currently
337 * running, this function does nothing but returns -EBUSY.
339 * Return: 0 on success, negative error code otherwise.
341 int damon_start(struct damon_ctx **ctxs, int nr_ctxs)
346 mutex_lock(&damon_lock);
347 if (nr_running_ctxs) {
348 mutex_unlock(&damon_lock);
352 for (i = 0; i < nr_ctxs; i++) {
353 err = __damon_start(ctxs[i]);
358 mutex_unlock(&damon_lock);
364 * __damon_stop() - Stops monitoring of given context.
365 * @ctx: monitoring context
367 * Return: 0 on success, negative error code otherwise.
369 static int __damon_stop(struct damon_ctx *ctx)
371 mutex_lock(&ctx->kdamond_lock);
373 ctx->kdamond_stop = true;
374 mutex_unlock(&ctx->kdamond_lock);
375 while (damon_kdamond_running(ctx))
376 usleep_range(ctx->sample_interval,
377 ctx->sample_interval * 2);
380 mutex_unlock(&ctx->kdamond_lock);
386 * damon_stop() - Stops the monitorings for a given group of contexts.
387 * @ctxs: an array of the pointers for contexts to stop monitoring
388 * @nr_ctxs: size of @ctxs
390 * Return: 0 on success, negative error code otherwise.
392 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
396 for (i = 0; i < nr_ctxs; i++) {
397 /* nr_running_ctxs is decremented in kdamond_fn */
398 err = __damon_stop(ctxs[i]);
407 * damon_check_reset_time_interval() - Check if a time interval is elapsed.
408 * @baseline: the time to check whether the interval has elapsed since
409 * @interval: the time interval (microseconds)
411 * See whether the given time interval has passed since the given baseline
412 * time. If so, it also updates the baseline to current time for next check.
414 * Return: true if the time interval has passed, or false otherwise.
416 static bool damon_check_reset_time_interval(struct timespec64 *baseline,
417 unsigned long interval)
419 struct timespec64 now;
421 ktime_get_coarse_ts64(&now);
422 if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) <
430 * Check whether it is time to flush the aggregated information
432 static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
434 return damon_check_reset_time_interval(&ctx->last_aggregation,
439 * Reset the aggregated monitoring results ('nr_accesses' of each region).
441 static void kdamond_reset_aggregated(struct damon_ctx *c)
443 struct damon_target *t;
445 damon_for_each_target(t, c) {
446 struct damon_region *r;
448 damon_for_each_region(r, t) {
449 trace_damon_aggregated(t, r, damon_nr_regions(t));
450 r->last_nr_accesses = r->nr_accesses;
456 #define sz_damon_region(r) (r->ar.end - r->ar.start)
459 * Merge two adjacent regions into one region
461 static void damon_merge_two_regions(struct damon_target *t,
462 struct damon_region *l, struct damon_region *r)
464 unsigned long sz_l = sz_damon_region(l), sz_r = sz_damon_region(r);
466 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
468 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
469 l->ar.end = r->ar.end;
470 damon_destroy_region(r, t);
473 #define diff_of(a, b) (a > b ? a - b : b - a)
476 * Merge adjacent regions having similar access frequencies
478 * t target affected by this merge operation
479 * thres '->nr_accesses' diff threshold for the merge
480 * sz_limit size upper limit of each region
482 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
483 unsigned long sz_limit)
485 struct damon_region *r, *prev = NULL, *next;
487 damon_for_each_region_safe(r, next, t) {
488 if (diff_of(r->nr_accesses, r->last_nr_accesses) > thres)
493 if (prev && prev->ar.end == r->ar.start &&
494 diff_of(prev->nr_accesses, r->nr_accesses) <= thres &&
495 sz_damon_region(prev) + sz_damon_region(r) <= sz_limit)
496 damon_merge_two_regions(t, prev, r);
503 * Merge adjacent regions having similar access frequencies
505 * threshold '->nr_accesses' diff threshold for the merge
506 * sz_limit size upper limit of each region
508 * This function merges monitoring target regions which are adjacent and their
509 * access frequencies are similar. This is for minimizing the monitoring
510 * overhead under the dynamically changeable access pattern. If a merge was
511 * unnecessarily made, later 'kdamond_split_regions()' will revert it.
513 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
514 unsigned long sz_limit)
516 struct damon_target *t;
518 damon_for_each_target(t, c)
519 damon_merge_regions_of(t, threshold, sz_limit);
523 * Split a region in two
525 * r the region to be split
526 * sz_r size of the first sub-region that will be made
528 static void damon_split_region_at(struct damon_ctx *ctx,
529 struct damon_target *t, struct damon_region *r,
532 struct damon_region *new;
534 new = damon_new_region(r->ar.start + sz_r, r->ar.end);
538 r->ar.end = new->ar.start;
541 new->last_nr_accesses = r->last_nr_accesses;
543 damon_insert_region(new, r, damon_next_region(r), t);
546 /* Split every region in the given target into 'nr_subs' regions */
547 static void damon_split_regions_of(struct damon_ctx *ctx,
548 struct damon_target *t, int nr_subs)
550 struct damon_region *r, *next;
551 unsigned long sz_region, sz_sub = 0;
554 damon_for_each_region_safe(r, next, t) {
555 sz_region = r->ar.end - r->ar.start;
557 for (i = 0; i < nr_subs - 1 &&
558 sz_region > 2 * DAMON_MIN_REGION; i++) {
560 * Randomly select size of left sub-region to be at
561 * least 10 percent and at most 90% of original region
563 sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
564 sz_region / 10, DAMON_MIN_REGION);
565 /* Do not allow blank region */
566 if (sz_sub == 0 || sz_sub >= sz_region)
569 damon_split_region_at(ctx, t, r, sz_sub);
576 * Split every target region into randomly-sized small regions
578 * This function splits every target region into random-sized small regions if
579 * current total number of the regions is equal or smaller than half of the
580 * user-specified maximum number of regions. This is for maximizing the
581 * monitoring accuracy under the dynamically changeable access patterns. If a
582 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
585 static void kdamond_split_regions(struct damon_ctx *ctx)
587 struct damon_target *t;
588 unsigned int nr_regions = 0;
589 static unsigned int last_nr_regions;
590 int nr_subregions = 2;
592 damon_for_each_target(t, ctx)
593 nr_regions += damon_nr_regions(t);
595 if (nr_regions > ctx->max_nr_regions / 2)
598 /* Maybe the middle of the region has different access frequency */
599 if (last_nr_regions == nr_regions &&
600 nr_regions < ctx->max_nr_regions / 3)
603 damon_for_each_target(t, ctx)
604 damon_split_regions_of(ctx, t, nr_subregions);
606 last_nr_regions = nr_regions;
610 * Check whether it is time to check and apply the target monitoring regions
612 * Returns true if it is.
614 static bool kdamond_need_update_primitive(struct damon_ctx *ctx)
616 return damon_check_reset_time_interval(&ctx->last_primitive_update,
617 ctx->primitive_update_interval);
621 * Check whether current monitoring should be stopped
623 * The monitoring is stopped when either the user requested to stop, or all
624 * monitoring targets are invalid.
626 * Returns true if need to stop current monitoring.
628 static bool kdamond_need_stop(struct damon_ctx *ctx)
630 struct damon_target *t;
633 mutex_lock(&ctx->kdamond_lock);
634 stop = ctx->kdamond_stop;
635 mutex_unlock(&ctx->kdamond_lock);
639 if (!ctx->primitive.target_valid)
642 damon_for_each_target(t, ctx) {
643 if (ctx->primitive.target_valid(t))
650 static void set_kdamond_stop(struct damon_ctx *ctx)
652 mutex_lock(&ctx->kdamond_lock);
653 ctx->kdamond_stop = true;
654 mutex_unlock(&ctx->kdamond_lock);
658 * The monitoring daemon that runs as a kernel thread
660 static int kdamond_fn(void *data)
662 struct damon_ctx *ctx = (struct damon_ctx *)data;
663 struct damon_target *t;
664 struct damon_region *r, *next;
665 unsigned int max_nr_accesses = 0;
666 unsigned long sz_limit = 0;
668 pr_debug("kdamond (%d) starts\n", current->pid);
670 if (ctx->primitive.init)
671 ctx->primitive.init(ctx);
672 if (ctx->callback.before_start && ctx->callback.before_start(ctx))
673 set_kdamond_stop(ctx);
675 sz_limit = damon_region_sz_limit(ctx);
677 while (!kdamond_need_stop(ctx)) {
678 if (ctx->primitive.prepare_access_checks)
679 ctx->primitive.prepare_access_checks(ctx);
680 if (ctx->callback.after_sampling &&
681 ctx->callback.after_sampling(ctx))
682 set_kdamond_stop(ctx);
684 usleep_range(ctx->sample_interval, ctx->sample_interval + 1);
686 if (ctx->primitive.check_accesses)
687 max_nr_accesses = ctx->primitive.check_accesses(ctx);
689 if (kdamond_aggregate_interval_passed(ctx)) {
690 kdamond_merge_regions(ctx,
691 max_nr_accesses / 10,
693 if (ctx->callback.after_aggregation &&
694 ctx->callback.after_aggregation(ctx))
695 set_kdamond_stop(ctx);
696 kdamond_reset_aggregated(ctx);
697 kdamond_split_regions(ctx);
698 if (ctx->primitive.reset_aggregated)
699 ctx->primitive.reset_aggregated(ctx);
702 if (kdamond_need_update_primitive(ctx)) {
703 if (ctx->primitive.update)
704 ctx->primitive.update(ctx);
705 sz_limit = damon_region_sz_limit(ctx);
708 damon_for_each_target(t, ctx) {
709 damon_for_each_region_safe(r, next, t)
710 damon_destroy_region(r, t);
713 if (ctx->callback.before_terminate &&
714 ctx->callback.before_terminate(ctx))
715 set_kdamond_stop(ctx);
716 if (ctx->primitive.cleanup)
717 ctx->primitive.cleanup(ctx);
719 pr_debug("kdamond (%d) finishes\n", current->pid);
720 mutex_lock(&ctx->kdamond_lock);
722 mutex_unlock(&ctx->kdamond_lock);
724 mutex_lock(&damon_lock);
726 mutex_unlock(&damon_lock);
731 #include "core-test.h"