+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Data Access Monitor Unit Tests
- *
- * Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved.
- *
- * Author: SeongJae Park <sj@kernel.org>
- */
-
-#ifdef CONFIG_DAMON_KUNIT_TEST
-
-#ifndef _DAMON_CORE_TEST_H
-#define _DAMON_CORE_TEST_H
-
-#include <kunit/test.h>
-
-static void damon_test_regions(struct kunit *test)
-{
- struct damon_region *r;
- struct damon_target *t;
-
- r = damon_new_region(1, 2);
- KUNIT_EXPECT_EQ(test, 1ul, r->ar.start);
- KUNIT_EXPECT_EQ(test, 2ul, r->ar.end);
- KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
-
- t = damon_new_target();
- KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
-
- damon_add_region(r, t);
- KUNIT_EXPECT_EQ(test, 1u, damon_nr_regions(t));
-
- damon_destroy_region(r, t);
- KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
-
- damon_free_target(t);
-}
-
-static unsigned int nr_damon_targets(struct damon_ctx *ctx)
-{
- struct damon_target *t;
- unsigned int nr_targets = 0;
-
- damon_for_each_target(t, ctx)
- nr_targets++;
-
- return nr_targets;
-}
-
-static void damon_test_target(struct kunit *test)
-{
- struct damon_ctx *c = damon_new_ctx();
- struct damon_target *t;
-
- t = damon_new_target();
- KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
-
- damon_add_target(c, t);
- KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(c));
-
- damon_destroy_target(t);
- KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
-
- damon_destroy_ctx(c);
-}
-
-/*
- * Test kdamond_reset_aggregated()
- *
- * DAMON checks access to each region and aggregates this information as the
- * access frequency of each region. In detail, it increases '->nr_accesses' of
- * regions that an access has confirmed. 'kdamond_reset_aggregated()' flushes
- * the aggregated information ('->nr_accesses' of each regions) to the result
- * buffer. As a result of the flushing, the '->nr_accesses' of regions are
- * initialized to zero.
- */
-static void damon_test_aggregate(struct kunit *test)
-{
- struct damon_ctx *ctx = damon_new_ctx();
- unsigned long saddr[][3] = {{10, 20, 30}, {5, 42, 49}, {13, 33, 55} };
- unsigned long eaddr[][3] = {{15, 27, 40}, {31, 45, 55}, {23, 44, 66} };
- unsigned long accesses[][3] = {{42, 95, 84}, {10, 20, 30}, {0, 1, 2} };
- struct damon_target *t;
- struct damon_region *r;
- int it, ir;
-
- for (it = 0; it < 3; it++) {
- t = damon_new_target();
- damon_add_target(ctx, t);
- }
-
- it = 0;
- damon_for_each_target(t, ctx) {
- for (ir = 0; ir < 3; ir++) {
- r = damon_new_region(saddr[it][ir], eaddr[it][ir]);
- r->nr_accesses = accesses[it][ir];
- r->nr_accesses_bp = accesses[it][ir] * 10000;
- damon_add_region(r, t);
- }
- it++;
- }
- kdamond_reset_aggregated(ctx);
- it = 0;
- damon_for_each_target(t, ctx) {
- ir = 0;
- /* '->nr_accesses' should be zeroed */
- damon_for_each_region(r, t) {
- KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
- ir++;
- }
- /* regions should be preserved */
- KUNIT_EXPECT_EQ(test, 3, ir);
- it++;
- }
- /* targets also should be preserved */
- KUNIT_EXPECT_EQ(test, 3, it);
-
- damon_destroy_ctx(ctx);
-}
-
-static void damon_test_split_at(struct kunit *test)
-{
- struct damon_ctx *c = damon_new_ctx();
- struct damon_target *t;
- struct damon_region *r, *r_new;
-
- t = damon_new_target();
- r = damon_new_region(0, 100);
- r->nr_accesses_bp = 420000;
- r->nr_accesses = 42;
- r->last_nr_accesses = 15;
- damon_add_region(r, t);
- damon_split_region_at(t, r, 25);
- KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
- KUNIT_EXPECT_EQ(test, r->ar.end, 25ul);
-
- r_new = damon_next_region(r);
- KUNIT_EXPECT_EQ(test, r_new->ar.start, 25ul);
- KUNIT_EXPECT_EQ(test, r_new->ar.end, 100ul);
-
- KUNIT_EXPECT_EQ(test, r->nr_accesses_bp, r_new->nr_accesses_bp);
- KUNIT_EXPECT_EQ(test, r->nr_accesses, r_new->nr_accesses);
- KUNIT_EXPECT_EQ(test, r->last_nr_accesses, r_new->last_nr_accesses);
-
- damon_free_target(t);
- damon_destroy_ctx(c);
-}
-
-static void damon_test_merge_two(struct kunit *test)
-{
- struct damon_target *t;
- struct damon_region *r, *r2, *r3;
- int i;
-
- t = damon_new_target();
- r = damon_new_region(0, 100);
- r->nr_accesses = 10;
- r->nr_accesses_bp = 100000;
- damon_add_region(r, t);
- r2 = damon_new_region(100, 300);
- r2->nr_accesses = 20;
- r2->nr_accesses_bp = 200000;
- damon_add_region(r2, t);
-
- damon_merge_two_regions(t, r, r2);
- KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
- KUNIT_EXPECT_EQ(test, r->ar.end, 300ul);
- KUNIT_EXPECT_EQ(test, r->nr_accesses, 16u);
-
- i = 0;
- damon_for_each_region(r3, t) {
- KUNIT_EXPECT_PTR_EQ(test, r, r3);
- i++;
- }
- KUNIT_EXPECT_EQ(test, i, 1);
-
- damon_free_target(t);
-}
-
-static struct damon_region *__nth_region_of(struct damon_target *t, int idx)
-{
- struct damon_region *r;
- unsigned int i = 0;
-
- damon_for_each_region(r, t) {
- if (i++ == idx)
- return r;
- }
-
- return NULL;
-}
-
-static void damon_test_merge_regions_of(struct kunit *test)
-{
- struct damon_target *t;
- struct damon_region *r;
- unsigned long sa[] = {0, 100, 114, 122, 130, 156, 170, 184};
- unsigned long ea[] = {100, 112, 122, 130, 156, 170, 184, 230};
- unsigned int nrs[] = {0, 0, 10, 10, 20, 30, 1, 2};
-
- unsigned long saddrs[] = {0, 114, 130, 156, 170};
- unsigned long eaddrs[] = {112, 130, 156, 170, 230};
- int i;
-
- t = damon_new_target();
- for (i = 0; i < ARRAY_SIZE(sa); i++) {
- r = damon_new_region(sa[i], ea[i]);
- r->nr_accesses = nrs[i];
- r->nr_accesses_bp = nrs[i] * 10000;
- damon_add_region(r, t);
- }
-
- damon_merge_regions_of(t, 9, 9999);
- /* 0-112, 114-130, 130-156, 156-170 */
- KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u);
- for (i = 0; i < 5; i++) {
- r = __nth_region_of(t, i);
- KUNIT_EXPECT_EQ(test, r->ar.start, saddrs[i]);
- KUNIT_EXPECT_EQ(test, r->ar.end, eaddrs[i]);
- }
- damon_free_target(t);
-}
-
-static void damon_test_split_regions_of(struct kunit *test)
-{
- struct damon_ctx *c = damon_new_ctx();
- struct damon_target *t;
- struct damon_region *r;
-
- t = damon_new_target();
- r = damon_new_region(0, 22);
- damon_add_region(r, t);
- damon_split_regions_of(t, 2);
- KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u);
- damon_free_target(t);
-
- t = damon_new_target();
- r = damon_new_region(0, 220);
- damon_add_region(r, t);
- damon_split_regions_of(t, 4);
- KUNIT_EXPECT_LE(test, damon_nr_regions(t), 4u);
- damon_free_target(t);
- damon_destroy_ctx(c);
-}
-
-static void damon_test_ops_registration(struct kunit *test)
-{
- struct damon_ctx *c = damon_new_ctx();
- struct damon_operations ops = {.id = DAMON_OPS_VADDR}, bak;
- bool need_cleanup = false;
-
- /* DAMON_OPS_VADDR is registered only if CONFIG_DAMON_VADDR is set */
- if (!damon_is_registered_ops(DAMON_OPS_VADDR)) {
- bak.id = DAMON_OPS_VADDR;
- KUNIT_EXPECT_EQ(test, damon_register_ops(&bak), 0);
- need_cleanup = true;
- }
-
- /* DAMON_OPS_VADDR is ensured to be registered */
- KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_VADDR), 0);
-
- /* Double-registration is prohibited */
- KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
-
- /* Unknown ops id cannot be registered */
- KUNIT_EXPECT_EQ(test, damon_select_ops(c, NR_DAMON_OPS), -EINVAL);
-
- /* Registration should success after unregistration */
- mutex_lock(&damon_ops_lock);
- bak = damon_registered_ops[DAMON_OPS_VADDR];
- damon_registered_ops[DAMON_OPS_VADDR] = (struct damon_operations){};
- mutex_unlock(&damon_ops_lock);
-
- ops.id = DAMON_OPS_VADDR;
- KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), 0);
-
- mutex_lock(&damon_ops_lock);
- damon_registered_ops[DAMON_OPS_VADDR] = bak;
- mutex_unlock(&damon_ops_lock);
-
- /* Check double-registration failure again */
- KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
-
- damon_destroy_ctx(c);
-
- if (need_cleanup) {
- mutex_lock(&damon_ops_lock);
- damon_registered_ops[DAMON_OPS_VADDR] =
- (struct damon_operations){};
- mutex_unlock(&damon_ops_lock);
- }
-}
-
-static void damon_test_set_regions(struct kunit *test)
-{
- struct damon_target *t = damon_new_target();
- struct damon_region *r1 = damon_new_region(4, 16);
- struct damon_region *r2 = damon_new_region(24, 32);
- struct damon_addr_range range = {.start = 8, .end = 28};
- unsigned long expects[] = {8, 16, 16, 24, 24, 28};
- int expect_idx = 0;
- struct damon_region *r;
-
- damon_add_region(r1, t);
- damon_add_region(r2, t);
- damon_set_regions(t, &range, 1);
-
- KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 3);
- damon_for_each_region(r, t) {
- KUNIT_EXPECT_EQ(test, r->ar.start, expects[expect_idx++]);
- KUNIT_EXPECT_EQ(test, r->ar.end, expects[expect_idx++]);
- }
- damon_destroy_target(t);
-}
-
-static void damon_test_nr_accesses_to_accesses_bp(struct kunit *test)
-{
- struct damon_attrs attrs = {
- .sample_interval = 10,
- .aggr_interval = ((unsigned long)UINT_MAX + 1) * 10
- };
-
- KUNIT_EXPECT_EQ(test, damon_nr_accesses_to_accesses_bp(123, &attrs), 0);
-}
-
-static void damon_test_update_monitoring_result(struct kunit *test)
-{
- struct damon_attrs old_attrs = {
- .sample_interval = 10, .aggr_interval = 1000,};
- struct damon_attrs new_attrs;
- struct damon_region *r = damon_new_region(3, 7);
-
- r->nr_accesses = 15;
- r->nr_accesses_bp = 150000;
- r->age = 20;
-
- new_attrs = (struct damon_attrs){
- .sample_interval = 100, .aggr_interval = 10000,};
- damon_update_monitoring_result(r, &old_attrs, &new_attrs);
- KUNIT_EXPECT_EQ(test, r->nr_accesses, 15);
- KUNIT_EXPECT_EQ(test, r->age, 2);
-
- new_attrs = (struct damon_attrs){
- .sample_interval = 1, .aggr_interval = 1000};
- damon_update_monitoring_result(r, &old_attrs, &new_attrs);
- KUNIT_EXPECT_EQ(test, r->nr_accesses, 150);
- KUNIT_EXPECT_EQ(test, r->age, 2);
-
- new_attrs = (struct damon_attrs){
- .sample_interval = 1, .aggr_interval = 100};
- damon_update_monitoring_result(r, &old_attrs, &new_attrs);
- KUNIT_EXPECT_EQ(test, r->nr_accesses, 150);
- KUNIT_EXPECT_EQ(test, r->age, 20);
-
- damon_free_region(r);
-}
-
-static void damon_test_set_attrs(struct kunit *test)
-{
- struct damon_ctx *c = damon_new_ctx();
- struct damon_attrs valid_attrs = {
- .min_nr_regions = 10, .max_nr_regions = 1000,
- .sample_interval = 5000, .aggr_interval = 100000,};
- struct damon_attrs invalid_attrs;
-
- KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &valid_attrs), 0);
-
- invalid_attrs = valid_attrs;
- invalid_attrs.min_nr_regions = 1;
- KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
-
- invalid_attrs = valid_attrs;
- invalid_attrs.max_nr_regions = 9;
- KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
-
- invalid_attrs = valid_attrs;
- invalid_attrs.aggr_interval = 4999;
- KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
-
- damon_destroy_ctx(c);
-}
-
-static void damon_test_moving_sum(struct kunit *test)
-{
- unsigned int mvsum = 50000, nomvsum = 50000, len_window = 10;
- unsigned int new_values[] = {10000, 0, 10000, 0, 0, 0, 10000, 0, 0, 0};
- unsigned int expects[] = {55000, 50000, 55000, 50000, 45000, 40000,
- 45000, 40000, 35000, 30000};
- int i;
-
- for (i = 0; i < ARRAY_SIZE(new_values); i++) {
- mvsum = damon_moving_sum(mvsum, nomvsum, len_window,
- new_values[i]);
- KUNIT_EXPECT_EQ(test, mvsum, expects[i]);
- }
-}
-
-static void damos_test_new_filter(struct kunit *test)
-{
- struct damos_filter *filter;
-
- filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true);
- KUNIT_EXPECT_EQ(test, filter->type, DAMOS_FILTER_TYPE_ANON);
- KUNIT_EXPECT_EQ(test, filter->matching, true);
- KUNIT_EXPECT_PTR_EQ(test, filter->list.prev, &filter->list);
- KUNIT_EXPECT_PTR_EQ(test, filter->list.next, &filter->list);
- damos_destroy_filter(filter);
-}
-
-static void damos_test_filter_out(struct kunit *test)
-{
- struct damon_target *t;
- struct damon_region *r, *r2;
- struct damos_filter *f;
-
- f = damos_new_filter(DAMOS_FILTER_TYPE_ADDR, true);
- f->addr_range = (struct damon_addr_range){
- .start = DAMON_MIN_REGION * 2, .end = DAMON_MIN_REGION * 6};
-
- t = damon_new_target();
- r = damon_new_region(DAMON_MIN_REGION * 3, DAMON_MIN_REGION * 5);
- damon_add_region(r, t);
-
- /* region in the range */
- KUNIT_EXPECT_TRUE(test, __damos_filter_out(NULL, t, r, f));
- KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
-
- /* region before the range */
- r->ar.start = DAMON_MIN_REGION * 1;
- r->ar.end = DAMON_MIN_REGION * 2;
- KUNIT_EXPECT_FALSE(test, __damos_filter_out(NULL, t, r, f));
- KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
-
- /* region after the range */
- r->ar.start = DAMON_MIN_REGION * 6;
- r->ar.end = DAMON_MIN_REGION * 8;
- KUNIT_EXPECT_FALSE(test, __damos_filter_out(NULL, t, r, f));
- KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
-
- /* region started before the range */
- r->ar.start = DAMON_MIN_REGION * 1;
- r->ar.end = DAMON_MIN_REGION * 4;
- KUNIT_EXPECT_FALSE(test, __damos_filter_out(NULL, t, r, f));
- /* filter should have split the region */
- KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 1);
- KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 2);
- KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2);
- r2 = damon_next_region(r);
- KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 2);
- KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 4);
- damon_destroy_region(r2, t);
-
- /* region started in the range */
- r->ar.start = DAMON_MIN_REGION * 2;
- r->ar.end = DAMON_MIN_REGION * 8;
- KUNIT_EXPECT_TRUE(test, __damos_filter_out(NULL, t, r, f));
- /* filter should have split the region */
- KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 2);
- KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 6);
- KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2);
- r2 = damon_next_region(r);
- KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 6);
- KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 8);
- damon_destroy_region(r2, t);
-
- damon_free_target(t);
- damos_free_filter(f);
-}
-
-static void damon_test_feed_loop_next_input(struct kunit *test)
-{
- unsigned long last_input = 900000, current_score = 200;
-
- /*
- * If current score is lower than the goal, which is always 10,000
- * (read the comment on damon_feed_loop_next_input()'s comment), next
- * input should be higher than the last input.
- */
- KUNIT_EXPECT_GT(test,
- damon_feed_loop_next_input(last_input, current_score),
- last_input);
-
- /*
- * If current score is higher than the goal, next input should be lower
- * than the last input.
- */
- current_score = 250000000;
- KUNIT_EXPECT_LT(test,
- damon_feed_loop_next_input(last_input, current_score),
- last_input);
-
- /*
- * The next input depends on the distance between the current score and
- * the goal
- */
- KUNIT_EXPECT_GT(test,
- damon_feed_loop_next_input(last_input, 200),
- damon_feed_loop_next_input(last_input, 2000));
-}
-
-static struct kunit_case damon_test_cases[] = {
- KUNIT_CASE(damon_test_target),
- KUNIT_CASE(damon_test_regions),
- KUNIT_CASE(damon_test_aggregate),
- KUNIT_CASE(damon_test_split_at),
- KUNIT_CASE(damon_test_merge_two),
- KUNIT_CASE(damon_test_merge_regions_of),
- KUNIT_CASE(damon_test_split_regions_of),
- KUNIT_CASE(damon_test_ops_registration),
- KUNIT_CASE(damon_test_set_regions),
- KUNIT_CASE(damon_test_nr_accesses_to_accesses_bp),
- KUNIT_CASE(damon_test_update_monitoring_result),
- KUNIT_CASE(damon_test_set_attrs),
- KUNIT_CASE(damon_test_moving_sum),
- KUNIT_CASE(damos_test_new_filter),
- KUNIT_CASE(damos_test_filter_out),
- KUNIT_CASE(damon_test_feed_loop_next_input),
- {},
-};
-
-static struct kunit_suite damon_test_suite = {
- .name = "damon",
- .test_cases = damon_test_cases,
-};
-kunit_test_suite(damon_test_suite);
-
-#endif /* _DAMON_CORE_TEST_H */
-
-#endif /* CONFIG_DAMON_KUNIT_TEST */
subsys_initcall(damon_init);
-#include "core-test.h"
+#include "tests/core-kunit.h"
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * DAMON Debugfs Interface Unit Tests
- *
- * Author: SeongJae Park <sj@kernel.org>
- */
-
-#ifdef CONFIG_DAMON_DBGFS_KUNIT_TEST
-
-#ifndef _DAMON_DBGFS_TEST_H
-#define _DAMON_DBGFS_TEST_H
-
-#include <kunit/test.h>
-
-static void damon_dbgfs_test_str_to_ints(struct kunit *test)
-{
- char *question;
- int *answers;
- int expected[] = {12, 35, 46};
- ssize_t nr_integers = 0, i;
-
- question = "123";
- answers = str_to_ints(question, strlen(question), &nr_integers);
- KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers);
- KUNIT_EXPECT_EQ(test, 123, answers[0]);
- kfree(answers);
-
- question = "123abc";
- answers = str_to_ints(question, strlen(question), &nr_integers);
- KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers);
- KUNIT_EXPECT_EQ(test, 123, answers[0]);
- kfree(answers);
-
- question = "a123";
- answers = str_to_ints(question, strlen(question), &nr_integers);
- KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
- kfree(answers);
-
- question = "12 35";
- answers = str_to_ints(question, strlen(question), &nr_integers);
- KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers);
- for (i = 0; i < nr_integers; i++)
- KUNIT_EXPECT_EQ(test, expected[i], answers[i]);
- kfree(answers);
-
- question = "12 35 46";
- answers = str_to_ints(question, strlen(question), &nr_integers);
- KUNIT_EXPECT_EQ(test, (ssize_t)3, nr_integers);
- for (i = 0; i < nr_integers; i++)
- KUNIT_EXPECT_EQ(test, expected[i], answers[i]);
- kfree(answers);
-
- question = "12 35 abc 46";
- answers = str_to_ints(question, strlen(question), &nr_integers);
- KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers);
- for (i = 0; i < 2; i++)
- KUNIT_EXPECT_EQ(test, expected[i], answers[i]);
- kfree(answers);
-
- question = "";
- answers = str_to_ints(question, strlen(question), &nr_integers);
- KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
- kfree(answers);
-
- question = "\n";
- answers = str_to_ints(question, strlen(question), &nr_integers);
- KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
- kfree(answers);
-}
-
-static void damon_dbgfs_test_set_targets(struct kunit *test)
-{
- struct damon_ctx *ctx = dbgfs_new_ctx();
- char buf[64];
-
- if (!damon_is_registered_ops(DAMON_OPS_PADDR)) {
- dbgfs_destroy_ctx(ctx);
- kunit_skip(test, "PADDR not registered");
- }
-
- /* Make DAMON consider target has no pid */
- damon_select_ops(ctx, DAMON_OPS_PADDR);
-
- dbgfs_set_targets(ctx, 0, NULL);
- sprint_target_ids(ctx, buf, 64);
- KUNIT_EXPECT_STREQ(test, (char *)buf, "\n");
-
- dbgfs_set_targets(ctx, 1, NULL);
- sprint_target_ids(ctx, buf, 64);
- KUNIT_EXPECT_STREQ(test, (char *)buf, "42\n");
-
- dbgfs_set_targets(ctx, 0, NULL);
- sprint_target_ids(ctx, buf, 64);
- KUNIT_EXPECT_STREQ(test, (char *)buf, "\n");
-
- dbgfs_destroy_ctx(ctx);
-}
-
-static void damon_dbgfs_test_set_init_regions(struct kunit *test)
-{
- struct damon_ctx *ctx = damon_new_ctx();
- /* Each line represents one region in ``<target idx> <start> <end>`` */
- char * const valid_inputs[] = {"1 10 20\n 1 20 30\n1 35 45",
- "1 10 20\n",
- "1 10 20\n0 39 59\n0 70 134\n 1 20 25\n",
- ""};
- /* Reading the file again will show sorted, clean output */
- char * const valid_expects[] = {"1 10 20\n1 20 30\n1 35 45\n",
- "1 10 20\n",
- "0 39 59\n0 70 134\n1 10 20\n1 20 25\n",
- ""};
- char * const invalid_inputs[] = {"3 10 20\n", /* target not exists */
- "1 10 20\n 1 14 26\n", /* regions overlap */
- "0 10 20\n1 30 40\n 0 5 8"}; /* not sorted by address */
- char *input, *expect;
- int i, rc;
- char buf[256];
-
- if (!damon_is_registered_ops(DAMON_OPS_PADDR)) {
- damon_destroy_ctx(ctx);
- kunit_skip(test, "PADDR not registered");
- }
-
- damon_select_ops(ctx, DAMON_OPS_PADDR);
-
- dbgfs_set_targets(ctx, 3, NULL);
-
- /* Put valid inputs and check the results */
- for (i = 0; i < ARRAY_SIZE(valid_inputs); i++) {
- input = valid_inputs[i];
- expect = valid_expects[i];
-
- rc = set_init_regions(ctx, input, strnlen(input, 256));
- KUNIT_EXPECT_EQ(test, rc, 0);
-
- memset(buf, 0, 256);
- sprint_init_regions(ctx, buf, 256);
-
- KUNIT_EXPECT_STREQ(test, (char *)buf, expect);
- }
- /* Put invalid inputs and check the return error code */
- for (i = 0; i < ARRAY_SIZE(invalid_inputs); i++) {
- input = invalid_inputs[i];
- pr_info("input: %s\n", input);
- rc = set_init_regions(ctx, input, strnlen(input, 256));
- KUNIT_EXPECT_EQ(test, rc, -EINVAL);
-
- memset(buf, 0, 256);
- sprint_init_regions(ctx, buf, 256);
-
- KUNIT_EXPECT_STREQ(test, (char *)buf, "");
- }
-
- dbgfs_set_targets(ctx, 0, NULL);
- damon_destroy_ctx(ctx);
-}
-
-static struct kunit_case damon_test_cases[] = {
- KUNIT_CASE(damon_dbgfs_test_str_to_ints),
- KUNIT_CASE(damon_dbgfs_test_set_targets),
- KUNIT_CASE(damon_dbgfs_test_set_init_regions),
- {},
-};
-
-static struct kunit_suite damon_test_suite = {
- .name = "damon-dbgfs",
- .test_cases = damon_test_cases,
-};
-kunit_test_suite(damon_test_suite);
-
-#endif /* _DAMON_TEST_H */
-
-#endif /* CONFIG_DAMON_KUNIT_TEST */
module_init(damon_dbgfs_init);
-#include "dbgfs-test.h"
+#include "tests/dbgfs-kunit.h"
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Data Access Monitor Unit Tests
- *
- * Author: SeongJae Park <sj@kernel.org>
- */
-
-#ifdef CONFIG_DAMON_SYSFS_KUNIT_TEST
-
-#ifndef _DAMON_SYSFS_TEST_H
-#define _DAMON_SYSFS_TEST_H
-
-#include <kunit/test.h>
-
-static unsigned int nr_damon_targets(struct damon_ctx *ctx)
-{
- struct damon_target *t;
- unsigned int nr_targets = 0;
-
- damon_for_each_target(t, ctx)
- nr_targets++;
-
- return nr_targets;
-}
-
-static int __damon_sysfs_test_get_any_pid(int min, int max)
-{
- struct pid *pid;
- int i;
-
- for (i = min; i <= max; i++) {
- pid = find_get_pid(i);
- if (pid) {
- put_pid(pid);
- return i;
- }
- }
- return -1;
-}
-
-static void damon_sysfs_test_add_targets(struct kunit *test)
-{
- struct damon_sysfs_targets *sysfs_targets;
- struct damon_sysfs_target *sysfs_target;
- struct damon_ctx *ctx;
-
- sysfs_targets = damon_sysfs_targets_alloc();
- sysfs_targets->nr = 1;
- sysfs_targets->targets_arr = kmalloc_array(1,
- sizeof(*sysfs_targets->targets_arr), GFP_KERNEL);
-
- sysfs_target = damon_sysfs_target_alloc();
- sysfs_target->pid = __damon_sysfs_test_get_any_pid(12, 100);
- sysfs_target->regions = damon_sysfs_regions_alloc();
- sysfs_targets->targets_arr[0] = sysfs_target;
-
- ctx = damon_new_ctx();
-
- damon_sysfs_add_targets(ctx, sysfs_targets);
- KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(ctx));
-
- sysfs_target->pid = __damon_sysfs_test_get_any_pid(
- sysfs_target->pid + 1, 200);
- damon_sysfs_add_targets(ctx, sysfs_targets);
- KUNIT_EXPECT_EQ(test, 2u, nr_damon_targets(ctx));
-
- damon_destroy_ctx(ctx);
- kfree(sysfs_targets->targets_arr);
- kfree(sysfs_targets);
- kfree(sysfs_target);
-}
-
-static struct kunit_case damon_sysfs_test_cases[] = {
- KUNIT_CASE(damon_sysfs_test_add_targets),
- {},
-};
-
-static struct kunit_suite damon_sysfs_test_suite = {
- .name = "damon-sysfs",
- .test_cases = damon_sysfs_test_cases,
-};
-kunit_test_suite(damon_sysfs_test_suite);
-
-#endif /* _DAMON_SYSFS_TEST_H */
-
-#endif /* CONFIG_DAMON_SYSFS_KUNIT_TEST */
}
subsys_initcall(damon_sysfs_init);
-#include "sysfs-test.h"
+#include "tests/sysfs-kunit.h"
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Data Access Monitor Unit Tests
+ *
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ *
+ * Author: SeongJae Park <sj@kernel.org>
+ */
+
+#ifdef CONFIG_DAMON_KUNIT_TEST
+
+#ifndef _DAMON_CORE_TEST_H
+#define _DAMON_CORE_TEST_H
+
+#include <kunit/test.h>
+
+static void damon_test_regions(struct kunit *test)
+{
+ struct damon_region *r;
+ struct damon_target *t;
+
+ r = damon_new_region(1, 2);
+ KUNIT_EXPECT_EQ(test, 1ul, r->ar.start);
+ KUNIT_EXPECT_EQ(test, 2ul, r->ar.end);
+ KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
+
+ t = damon_new_target();
+ KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
+
+ damon_add_region(r, t);
+ KUNIT_EXPECT_EQ(test, 1u, damon_nr_regions(t));
+
+ damon_destroy_region(r, t);
+ KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
+
+ damon_free_target(t);
+}
+
+static unsigned int nr_damon_targets(struct damon_ctx *ctx)
+{
+ struct damon_target *t;
+ unsigned int nr_targets = 0;
+
+ damon_for_each_target(t, ctx)
+ nr_targets++;
+
+ return nr_targets;
+}
+
+static void damon_test_target(struct kunit *test)
+{
+ struct damon_ctx *c = damon_new_ctx();
+ struct damon_target *t;
+
+ t = damon_new_target();
+ KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
+
+ damon_add_target(c, t);
+ KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(c));
+
+ damon_destroy_target(t);
+ KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
+
+ damon_destroy_ctx(c);
+}
+
+/*
+ * Test kdamond_reset_aggregated()
+ *
+ * DAMON checks access to each region and aggregates this information as the
+ * access frequency of each region. In detail, it increases '->nr_accesses' of
+ * regions that an access has confirmed. 'kdamond_reset_aggregated()' flushes
+ * the aggregated information ('->nr_accesses' of each regions) to the result
+ * buffer. As a result of the flushing, the '->nr_accesses' of regions are
+ * initialized to zero.
+ */
+static void damon_test_aggregate(struct kunit *test)
+{
+ struct damon_ctx *ctx = damon_new_ctx();
+ unsigned long saddr[][3] = {{10, 20, 30}, {5, 42, 49}, {13, 33, 55} };
+ unsigned long eaddr[][3] = {{15, 27, 40}, {31, 45, 55}, {23, 44, 66} };
+ unsigned long accesses[][3] = {{42, 95, 84}, {10, 20, 30}, {0, 1, 2} };
+ struct damon_target *t;
+ struct damon_region *r;
+ int it, ir;
+
+ for (it = 0; it < 3; it++) {
+ t = damon_new_target();
+ damon_add_target(ctx, t);
+ }
+
+ it = 0;
+ damon_for_each_target(t, ctx) {
+ for (ir = 0; ir < 3; ir++) {
+ r = damon_new_region(saddr[it][ir], eaddr[it][ir]);
+ r->nr_accesses = accesses[it][ir];
+ r->nr_accesses_bp = accesses[it][ir] * 10000;
+ damon_add_region(r, t);
+ }
+ it++;
+ }
+ kdamond_reset_aggregated(ctx);
+ it = 0;
+ damon_for_each_target(t, ctx) {
+ ir = 0;
+ /* '->nr_accesses' should be zeroed */
+ damon_for_each_region(r, t) {
+ KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
+ ir++;
+ }
+ /* regions should be preserved */
+ KUNIT_EXPECT_EQ(test, 3, ir);
+ it++;
+ }
+ /* targets also should be preserved */
+ KUNIT_EXPECT_EQ(test, 3, it);
+
+ damon_destroy_ctx(ctx);
+}
+
+static void damon_test_split_at(struct kunit *test)
+{
+ struct damon_ctx *c = damon_new_ctx();
+ struct damon_target *t;
+ struct damon_region *r, *r_new;
+
+ t = damon_new_target();
+ r = damon_new_region(0, 100);
+ r->nr_accesses_bp = 420000;
+ r->nr_accesses = 42;
+ r->last_nr_accesses = 15;
+ damon_add_region(r, t);
+ damon_split_region_at(t, r, 25);
+ KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
+ KUNIT_EXPECT_EQ(test, r->ar.end, 25ul);
+
+ r_new = damon_next_region(r);
+ KUNIT_EXPECT_EQ(test, r_new->ar.start, 25ul);
+ KUNIT_EXPECT_EQ(test, r_new->ar.end, 100ul);
+
+ KUNIT_EXPECT_EQ(test, r->nr_accesses_bp, r_new->nr_accesses_bp);
+ KUNIT_EXPECT_EQ(test, r->nr_accesses, r_new->nr_accesses);
+ KUNIT_EXPECT_EQ(test, r->last_nr_accesses, r_new->last_nr_accesses);
+
+ damon_free_target(t);
+ damon_destroy_ctx(c);
+}
+
+static void damon_test_merge_two(struct kunit *test)
+{
+ struct damon_target *t;
+ struct damon_region *r, *r2, *r3;
+ int i;
+
+ t = damon_new_target();
+ r = damon_new_region(0, 100);
+ r->nr_accesses = 10;
+ r->nr_accesses_bp = 100000;
+ damon_add_region(r, t);
+ r2 = damon_new_region(100, 300);
+ r2->nr_accesses = 20;
+ r2->nr_accesses_bp = 200000;
+ damon_add_region(r2, t);
+
+ damon_merge_two_regions(t, r, r2);
+ KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
+ KUNIT_EXPECT_EQ(test, r->ar.end, 300ul);
+ KUNIT_EXPECT_EQ(test, r->nr_accesses, 16u);
+
+ i = 0;
+ damon_for_each_region(r3, t) {
+ KUNIT_EXPECT_PTR_EQ(test, r, r3);
+ i++;
+ }
+ KUNIT_EXPECT_EQ(test, i, 1);
+
+ damon_free_target(t);
+}
+
+static struct damon_region *__nth_region_of(struct damon_target *t, int idx)
+{
+ struct damon_region *r;
+ unsigned int i = 0;
+
+ damon_for_each_region(r, t) {
+ if (i++ == idx)
+ return r;
+ }
+
+ return NULL;
+}
+
+static void damon_test_merge_regions_of(struct kunit *test)
+{
+ struct damon_target *t;
+ struct damon_region *r;
+ unsigned long sa[] = {0, 100, 114, 122, 130, 156, 170, 184};
+ unsigned long ea[] = {100, 112, 122, 130, 156, 170, 184, 230};
+ unsigned int nrs[] = {0, 0, 10, 10, 20, 30, 1, 2};
+
+ unsigned long saddrs[] = {0, 114, 130, 156, 170};
+ unsigned long eaddrs[] = {112, 130, 156, 170, 230};
+ int i;
+
+ t = damon_new_target();
+ for (i = 0; i < ARRAY_SIZE(sa); i++) {
+ r = damon_new_region(sa[i], ea[i]);
+ r->nr_accesses = nrs[i];
+ r->nr_accesses_bp = nrs[i] * 10000;
+ damon_add_region(r, t);
+ }
+
+ damon_merge_regions_of(t, 9, 9999);
+ /* 0-112, 114-130, 130-156, 156-170 */
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u);
+ for (i = 0; i < 5; i++) {
+ r = __nth_region_of(t, i);
+ KUNIT_EXPECT_EQ(test, r->ar.start, saddrs[i]);
+ KUNIT_EXPECT_EQ(test, r->ar.end, eaddrs[i]);
+ }
+ damon_free_target(t);
+}
+
+static void damon_test_split_regions_of(struct kunit *test)
+{
+ struct damon_ctx *c = damon_new_ctx();
+ struct damon_target *t;
+ struct damon_region *r;
+
+ t = damon_new_target();
+ r = damon_new_region(0, 22);
+ damon_add_region(r, t);
+ damon_split_regions_of(t, 2);
+ KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u);
+ damon_free_target(t);
+
+ t = damon_new_target();
+ r = damon_new_region(0, 220);
+ damon_add_region(r, t);
+ damon_split_regions_of(t, 4);
+ KUNIT_EXPECT_LE(test, damon_nr_regions(t), 4u);
+ damon_free_target(t);
+ damon_destroy_ctx(c);
+}
+
+static void damon_test_ops_registration(struct kunit *test)
+{
+ struct damon_ctx *c = damon_new_ctx();
+ struct damon_operations ops = {.id = DAMON_OPS_VADDR}, bak;
+ bool need_cleanup = false;
+
+ /* DAMON_OPS_VADDR is registered only if CONFIG_DAMON_VADDR is set */
+ if (!damon_is_registered_ops(DAMON_OPS_VADDR)) {
+ bak.id = DAMON_OPS_VADDR;
+ KUNIT_EXPECT_EQ(test, damon_register_ops(&bak), 0);
+ need_cleanup = true;
+ }
+
+ /* DAMON_OPS_VADDR is ensured to be registered */
+ KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_VADDR), 0);
+
+ /* Double-registration is prohibited */
+ KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
+
+ /* Unknown ops id cannot be registered */
+ KUNIT_EXPECT_EQ(test, damon_select_ops(c, NR_DAMON_OPS), -EINVAL);
+
+ /* Registration should success after unregistration */
+ mutex_lock(&damon_ops_lock);
+ bak = damon_registered_ops[DAMON_OPS_VADDR];
+ damon_registered_ops[DAMON_OPS_VADDR] = (struct damon_operations){};
+ mutex_unlock(&damon_ops_lock);
+
+ ops.id = DAMON_OPS_VADDR;
+ KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), 0);
+
+ mutex_lock(&damon_ops_lock);
+ damon_registered_ops[DAMON_OPS_VADDR] = bak;
+ mutex_unlock(&damon_ops_lock);
+
+ /* Check double-registration failure again */
+ KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
+
+ damon_destroy_ctx(c);
+
+ if (need_cleanup) {
+ mutex_lock(&damon_ops_lock);
+ damon_registered_ops[DAMON_OPS_VADDR] =
+ (struct damon_operations){};
+ mutex_unlock(&damon_ops_lock);
+ }
+}
+
+static void damon_test_set_regions(struct kunit *test)
+{
+ struct damon_target *t = damon_new_target();
+ struct damon_region *r1 = damon_new_region(4, 16);
+ struct damon_region *r2 = damon_new_region(24, 32);
+ struct damon_addr_range range = {.start = 8, .end = 28};
+ unsigned long expects[] = {8, 16, 16, 24, 24, 28};
+ int expect_idx = 0;
+ struct damon_region *r;
+
+ damon_add_region(r1, t);
+ damon_add_region(r2, t);
+ damon_set_regions(t, &range, 1);
+
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 3);
+ damon_for_each_region(r, t) {
+ KUNIT_EXPECT_EQ(test, r->ar.start, expects[expect_idx++]);
+ KUNIT_EXPECT_EQ(test, r->ar.end, expects[expect_idx++]);
+ }
+ damon_destroy_target(t);
+}
+
+static void damon_test_nr_accesses_to_accesses_bp(struct kunit *test)
+{
+ struct damon_attrs attrs = {
+ .sample_interval = 10,
+ .aggr_interval = ((unsigned long)UINT_MAX + 1) * 10
+ };
+
+ KUNIT_EXPECT_EQ(test, damon_nr_accesses_to_accesses_bp(123, &attrs), 0);
+}
+
+static void damon_test_update_monitoring_result(struct kunit *test)
+{
+ struct damon_attrs old_attrs = {
+ .sample_interval = 10, .aggr_interval = 1000,};
+ struct damon_attrs new_attrs;
+ struct damon_region *r = damon_new_region(3, 7);
+
+ r->nr_accesses = 15;
+ r->nr_accesses_bp = 150000;
+ r->age = 20;
+
+ new_attrs = (struct damon_attrs){
+ .sample_interval = 100, .aggr_interval = 10000,};
+ damon_update_monitoring_result(r, &old_attrs, &new_attrs);
+ KUNIT_EXPECT_EQ(test, r->nr_accesses, 15);
+ KUNIT_EXPECT_EQ(test, r->age, 2);
+
+ new_attrs = (struct damon_attrs){
+ .sample_interval = 1, .aggr_interval = 1000};
+ damon_update_monitoring_result(r, &old_attrs, &new_attrs);
+ KUNIT_EXPECT_EQ(test, r->nr_accesses, 150);
+ KUNIT_EXPECT_EQ(test, r->age, 2);
+
+ new_attrs = (struct damon_attrs){
+ .sample_interval = 1, .aggr_interval = 100};
+ damon_update_monitoring_result(r, &old_attrs, &new_attrs);
+ KUNIT_EXPECT_EQ(test, r->nr_accesses, 150);
+ KUNIT_EXPECT_EQ(test, r->age, 20);
+
+ damon_free_region(r);
+}
+
+static void damon_test_set_attrs(struct kunit *test)
+{
+ struct damon_ctx *c = damon_new_ctx();
+ struct damon_attrs valid_attrs = {
+ .min_nr_regions = 10, .max_nr_regions = 1000,
+ .sample_interval = 5000, .aggr_interval = 100000,};
+ struct damon_attrs invalid_attrs;
+
+ KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &valid_attrs), 0);
+
+ invalid_attrs = valid_attrs;
+ invalid_attrs.min_nr_regions = 1;
+ KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
+
+ invalid_attrs = valid_attrs;
+ invalid_attrs.max_nr_regions = 9;
+ KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
+
+ invalid_attrs = valid_attrs;
+ invalid_attrs.aggr_interval = 4999;
+ KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
+
+ damon_destroy_ctx(c);
+}
+
+static void damon_test_moving_sum(struct kunit *test)
+{
+ unsigned int mvsum = 50000, nomvsum = 50000, len_window = 10;
+ unsigned int new_values[] = {10000, 0, 10000, 0, 0, 0, 10000, 0, 0, 0};
+ unsigned int expects[] = {55000, 50000, 55000, 50000, 45000, 40000,
+ 45000, 40000, 35000, 30000};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(new_values); i++) {
+ mvsum = damon_moving_sum(mvsum, nomvsum, len_window,
+ new_values[i]);
+ KUNIT_EXPECT_EQ(test, mvsum, expects[i]);
+ }
+}
+
+static void damos_test_new_filter(struct kunit *test)
+{
+ struct damos_filter *filter;
+
+ filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true);
+ KUNIT_EXPECT_EQ(test, filter->type, DAMOS_FILTER_TYPE_ANON);
+ KUNIT_EXPECT_EQ(test, filter->matching, true);
+ KUNIT_EXPECT_PTR_EQ(test, filter->list.prev, &filter->list);
+ KUNIT_EXPECT_PTR_EQ(test, filter->list.next, &filter->list);
+ damos_destroy_filter(filter);
+}
+
+static void damos_test_filter_out(struct kunit *test)
+{
+ struct damon_target *t;
+ struct damon_region *r, *r2;
+ struct damos_filter *f;
+
+ f = damos_new_filter(DAMOS_FILTER_TYPE_ADDR, true);
+ f->addr_range = (struct damon_addr_range){
+ .start = DAMON_MIN_REGION * 2, .end = DAMON_MIN_REGION * 6};
+
+ t = damon_new_target();
+ r = damon_new_region(DAMON_MIN_REGION * 3, DAMON_MIN_REGION * 5);
+ damon_add_region(r, t);
+
+ /* region in the range */
+ KUNIT_EXPECT_TRUE(test, __damos_filter_out(NULL, t, r, f));
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
+
+ /* region before the range */
+ r->ar.start = DAMON_MIN_REGION * 1;
+ r->ar.end = DAMON_MIN_REGION * 2;
+ KUNIT_EXPECT_FALSE(test, __damos_filter_out(NULL, t, r, f));
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
+
+ /* region after the range */
+ r->ar.start = DAMON_MIN_REGION * 6;
+ r->ar.end = DAMON_MIN_REGION * 8;
+ KUNIT_EXPECT_FALSE(test, __damos_filter_out(NULL, t, r, f));
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1);
+
+ /* region started before the range */
+ r->ar.start = DAMON_MIN_REGION * 1;
+ r->ar.end = DAMON_MIN_REGION * 4;
+ KUNIT_EXPECT_FALSE(test, __damos_filter_out(NULL, t, r, f));
+ /* filter should have split the region */
+ KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 1);
+ KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 2);
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2);
+ r2 = damon_next_region(r);
+ KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 2);
+ KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 4);
+ damon_destroy_region(r2, t);
+
+ /* region started in the range */
+ r->ar.start = DAMON_MIN_REGION * 2;
+ r->ar.end = DAMON_MIN_REGION * 8;
+ KUNIT_EXPECT_TRUE(test, __damos_filter_out(NULL, t, r, f));
+ /* filter should have split the region */
+ KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 2);
+ KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 6);
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 2);
+ r2 = damon_next_region(r);
+ KUNIT_EXPECT_EQ(test, r2->ar.start, DAMON_MIN_REGION * 6);
+ KUNIT_EXPECT_EQ(test, r2->ar.end, DAMON_MIN_REGION * 8);
+ damon_destroy_region(r2, t);
+
+ damon_free_target(t);
+ damos_free_filter(f);
+}
+
+static void damon_test_feed_loop_next_input(struct kunit *test)
+{
+ unsigned long last_input = 900000, current_score = 200;
+
+ /*
+ * If current score is lower than the goal, which is always 10,000
+ * (read the comment on damon_feed_loop_next_input()'s comment), next
+ * input should be higher than the last input.
+ */
+ KUNIT_EXPECT_GT(test,
+ damon_feed_loop_next_input(last_input, current_score),
+ last_input);
+
+ /*
+ * If current score is higher than the goal, next input should be lower
+ * than the last input.
+ */
+ current_score = 250000000;
+ KUNIT_EXPECT_LT(test,
+ damon_feed_loop_next_input(last_input, current_score),
+ last_input);
+
+ /*
+ * The next input depends on the distance between the current score and
+ * the goal
+ */
+ KUNIT_EXPECT_GT(test,
+ damon_feed_loop_next_input(last_input, 200),
+ damon_feed_loop_next_input(last_input, 2000));
+}
+
+static struct kunit_case damon_test_cases[] = {
+ KUNIT_CASE(damon_test_target),
+ KUNIT_CASE(damon_test_regions),
+ KUNIT_CASE(damon_test_aggregate),
+ KUNIT_CASE(damon_test_split_at),
+ KUNIT_CASE(damon_test_merge_two),
+ KUNIT_CASE(damon_test_merge_regions_of),
+ KUNIT_CASE(damon_test_split_regions_of),
+ KUNIT_CASE(damon_test_ops_registration),
+ KUNIT_CASE(damon_test_set_regions),
+ KUNIT_CASE(damon_test_nr_accesses_to_accesses_bp),
+ KUNIT_CASE(damon_test_update_monitoring_result),
+ KUNIT_CASE(damon_test_set_attrs),
+ KUNIT_CASE(damon_test_moving_sum),
+ KUNIT_CASE(damos_test_new_filter),
+ KUNIT_CASE(damos_test_filter_out),
+ KUNIT_CASE(damon_test_feed_loop_next_input),
+ {},
+};
+
+static struct kunit_suite damon_test_suite = {
+ .name = "damon",
+ .test_cases = damon_test_cases,
+};
+kunit_test_suite(damon_test_suite);
+
+#endif /* _DAMON_CORE_TEST_H */
+
+#endif /* CONFIG_DAMON_KUNIT_TEST */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DAMON Debugfs Interface Unit Tests
+ *
+ * Author: SeongJae Park <sj@kernel.org>
+ */
+
+#ifdef CONFIG_DAMON_DBGFS_KUNIT_TEST
+
+#ifndef _DAMON_DBGFS_TEST_H
+#define _DAMON_DBGFS_TEST_H
+
+#include <kunit/test.h>
+
+static void damon_dbgfs_test_str_to_ints(struct kunit *test)
+{
+ char *question;
+ int *answers;
+ int expected[] = {12, 35, 46};
+ ssize_t nr_integers = 0, i;
+
+ question = "123";
+ answers = str_to_ints(question, strlen(question), &nr_integers);
+ KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers);
+ KUNIT_EXPECT_EQ(test, 123, answers[0]);
+ kfree(answers);
+
+ question = "123abc";
+ answers = str_to_ints(question, strlen(question), &nr_integers);
+ KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers);
+ KUNIT_EXPECT_EQ(test, 123, answers[0]);
+ kfree(answers);
+
+ question = "a123";
+ answers = str_to_ints(question, strlen(question), &nr_integers);
+ KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
+ kfree(answers);
+
+ question = "12 35";
+ answers = str_to_ints(question, strlen(question), &nr_integers);
+ KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers);
+ for (i = 0; i < nr_integers; i++)
+ KUNIT_EXPECT_EQ(test, expected[i], answers[i]);
+ kfree(answers);
+
+ question = "12 35 46";
+ answers = str_to_ints(question, strlen(question), &nr_integers);
+ KUNIT_EXPECT_EQ(test, (ssize_t)3, nr_integers);
+ for (i = 0; i < nr_integers; i++)
+ KUNIT_EXPECT_EQ(test, expected[i], answers[i]);
+ kfree(answers);
+
+ question = "12 35 abc 46";
+ answers = str_to_ints(question, strlen(question), &nr_integers);
+ KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers);
+ for (i = 0; i < 2; i++)
+ KUNIT_EXPECT_EQ(test, expected[i], answers[i]);
+ kfree(answers);
+
+ question = "";
+ answers = str_to_ints(question, strlen(question), &nr_integers);
+ KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
+ kfree(answers);
+
+ question = "\n";
+ answers = str_to_ints(question, strlen(question), &nr_integers);
+ KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
+ kfree(answers);
+}
+
+static void damon_dbgfs_test_set_targets(struct kunit *test)
+{
+ struct damon_ctx *ctx = dbgfs_new_ctx();
+ char buf[64];
+
+ if (!damon_is_registered_ops(DAMON_OPS_PADDR)) {
+ dbgfs_destroy_ctx(ctx);
+ kunit_skip(test, "PADDR not registered");
+ }
+
+ /* Make DAMON consider target has no pid */
+ damon_select_ops(ctx, DAMON_OPS_PADDR);
+
+ dbgfs_set_targets(ctx, 0, NULL);
+ sprint_target_ids(ctx, buf, 64);
+ KUNIT_EXPECT_STREQ(test, (char *)buf, "\n");
+
+ dbgfs_set_targets(ctx, 1, NULL);
+ sprint_target_ids(ctx, buf, 64);
+ KUNIT_EXPECT_STREQ(test, (char *)buf, "42\n");
+
+ dbgfs_set_targets(ctx, 0, NULL);
+ sprint_target_ids(ctx, buf, 64);
+ KUNIT_EXPECT_STREQ(test, (char *)buf, "\n");
+
+ dbgfs_destroy_ctx(ctx);
+}
+
+static void damon_dbgfs_test_set_init_regions(struct kunit *test)
+{
+ struct damon_ctx *ctx = damon_new_ctx();
+ /* Each line represents one region in ``<target idx> <start> <end>`` */
+ char * const valid_inputs[] = {"1 10 20\n 1 20 30\n1 35 45",
+ "1 10 20\n",
+ "1 10 20\n0 39 59\n0 70 134\n 1 20 25\n",
+ ""};
+ /* Reading the file again will show sorted, clean output */
+ char * const valid_expects[] = {"1 10 20\n1 20 30\n1 35 45\n",
+ "1 10 20\n",
+ "0 39 59\n0 70 134\n1 10 20\n1 20 25\n",
+ ""};
+ char * const invalid_inputs[] = {"3 10 20\n", /* target not exists */
+ "1 10 20\n 1 14 26\n", /* regions overlap */
+ "0 10 20\n1 30 40\n 0 5 8"}; /* not sorted by address */
+ char *input, *expect;
+ int i, rc;
+ char buf[256];
+
+ if (!damon_is_registered_ops(DAMON_OPS_PADDR)) {
+ damon_destroy_ctx(ctx);
+ kunit_skip(test, "PADDR not registered");
+ }
+
+ damon_select_ops(ctx, DAMON_OPS_PADDR);
+
+ dbgfs_set_targets(ctx, 3, NULL);
+
+ /* Put valid inputs and check the results */
+ for (i = 0; i < ARRAY_SIZE(valid_inputs); i++) {
+ input = valid_inputs[i];
+ expect = valid_expects[i];
+
+ rc = set_init_regions(ctx, input, strnlen(input, 256));
+ KUNIT_EXPECT_EQ(test, rc, 0);
+
+ memset(buf, 0, 256);
+ sprint_init_regions(ctx, buf, 256);
+
+ KUNIT_EXPECT_STREQ(test, (char *)buf, expect);
+ }
+ /* Put invalid inputs and check the return error code */
+ for (i = 0; i < ARRAY_SIZE(invalid_inputs); i++) {
+ input = invalid_inputs[i];
+ pr_info("input: %s\n", input);
+ rc = set_init_regions(ctx, input, strnlen(input, 256));
+ KUNIT_EXPECT_EQ(test, rc, -EINVAL);
+
+ memset(buf, 0, 256);
+ sprint_init_regions(ctx, buf, 256);
+
+ KUNIT_EXPECT_STREQ(test, (char *)buf, "");
+ }
+
+ dbgfs_set_targets(ctx, 0, NULL);
+ damon_destroy_ctx(ctx);
+}
+
+static struct kunit_case damon_test_cases[] = {
+ KUNIT_CASE(damon_dbgfs_test_str_to_ints),
+ KUNIT_CASE(damon_dbgfs_test_set_targets),
+ KUNIT_CASE(damon_dbgfs_test_set_init_regions),
+ {},
+};
+
+static struct kunit_suite damon_test_suite = {
+ .name = "damon-dbgfs",
+ .test_cases = damon_test_cases,
+};
+kunit_test_suite(damon_test_suite);
+
+#endif /* _DAMON_TEST_H */
+
+#endif /* CONFIG_DAMON_KUNIT_TEST */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Data Access Monitor Unit Tests
+ *
+ * Author: SeongJae Park <sj@kernel.org>
+ */
+
+#ifdef CONFIG_DAMON_SYSFS_KUNIT_TEST
+
+#ifndef _DAMON_SYSFS_TEST_H
+#define _DAMON_SYSFS_TEST_H
+
+#include <kunit/test.h>
+
+static unsigned int nr_damon_targets(struct damon_ctx *ctx)
+{
+ struct damon_target *t;
+ unsigned int nr_targets = 0;
+
+ damon_for_each_target(t, ctx)
+ nr_targets++;
+
+ return nr_targets;
+}
+
+static int __damon_sysfs_test_get_any_pid(int min, int max)
+{
+ struct pid *pid;
+ int i;
+
+ for (i = min; i <= max; i++) {
+ pid = find_get_pid(i);
+ if (pid) {
+ put_pid(pid);
+ return i;
+ }
+ }
+ return -1;
+}
+
+static void damon_sysfs_test_add_targets(struct kunit *test)
+{
+ struct damon_sysfs_targets *sysfs_targets;
+ struct damon_sysfs_target *sysfs_target;
+ struct damon_ctx *ctx;
+
+ sysfs_targets = damon_sysfs_targets_alloc();
+ sysfs_targets->nr = 1;
+ sysfs_targets->targets_arr = kmalloc_array(1,
+ sizeof(*sysfs_targets->targets_arr), GFP_KERNEL);
+
+ sysfs_target = damon_sysfs_target_alloc();
+ sysfs_target->pid = __damon_sysfs_test_get_any_pid(12, 100);
+ sysfs_target->regions = damon_sysfs_regions_alloc();
+ sysfs_targets->targets_arr[0] = sysfs_target;
+
+ ctx = damon_new_ctx();
+
+ damon_sysfs_add_targets(ctx, sysfs_targets);
+ KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(ctx));
+
+ sysfs_target->pid = __damon_sysfs_test_get_any_pid(
+ sysfs_target->pid + 1, 200);
+ damon_sysfs_add_targets(ctx, sysfs_targets);
+ KUNIT_EXPECT_EQ(test, 2u, nr_damon_targets(ctx));
+
+ damon_destroy_ctx(ctx);
+ kfree(sysfs_targets->targets_arr);
+ kfree(sysfs_targets);
+ kfree(sysfs_target);
+}
+
+static struct kunit_case damon_sysfs_test_cases[] = {
+ KUNIT_CASE(damon_sysfs_test_add_targets),
+ {},
+};
+
+static struct kunit_suite damon_sysfs_test_suite = {
+ .name = "damon-sysfs",
+ .test_cases = damon_sysfs_test_cases,
+};
+kunit_test_suite(damon_sysfs_test_suite);
+
+#endif /* _DAMON_SYSFS_TEST_H */
+
+#endif /* CONFIG_DAMON_SYSFS_KUNIT_TEST */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Data Access Monitor Unit Tests
+ *
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ *
+ * Author: SeongJae Park <sj@kernel.org>
+ */
+
+#ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
+
+#ifndef _DAMON_VADDR_TEST_H
+#define _DAMON_VADDR_TEST_H
+
+#include <kunit/test.h>
+
+static int __link_vmas(struct maple_tree *mt, struct vm_area_struct *vmas,
+ ssize_t nr_vmas)
+{
+ int i, ret = -ENOMEM;
+ MA_STATE(mas, mt, 0, 0);
+
+ if (!nr_vmas)
+ return 0;
+
+ mas_lock(&mas);
+ for (i = 0; i < nr_vmas; i++) {
+ mas_set_range(&mas, vmas[i].vm_start, vmas[i].vm_end - 1);
+ if (mas_store_gfp(&mas, &vmas[i], GFP_KERNEL))
+ goto failed;
+ }
+
+ ret = 0;
+failed:
+ mas_unlock(&mas);
+ return ret;
+}
+
+/*
+ * Test __damon_va_three_regions() function
+ *
+ * In case of virtual memory address spaces monitoring, DAMON converts the
+ * complex and dynamic memory mappings of each target task to three
+ * discontiguous regions which cover every mapped areas. However, the three
+ * regions should not include the two biggest unmapped areas in the original
+ * mapping, because the two biggest areas are normally the areas between 1)
+ * heap and the mmap()-ed regions, and 2) the mmap()-ed regions and stack.
+ * Because these two unmapped areas are very huge but obviously never accessed,
+ * covering the region is just a waste.
+ *
+ * '__damon_va_three_regions() receives an address space of a process. It
+ * first identifies the start of mappings, end of mappings, and the two biggest
+ * unmapped areas. After that, based on the information, it constructs the
+ * three regions and returns. For more detail, refer to the comment of
+ * 'damon_init_regions_of()' function definition in 'mm/damon.c' file.
+ *
+ * For example, suppose virtual address ranges of 10-20, 20-25, 200-210,
+ * 210-220, 300-305, and 307-330 (Other comments represent this mappings in
+ * more short form: 10-20-25, 200-210-220, 300-305, 307-330) of a process are
+ * mapped. To cover every mappings, the three regions should start with 10,
+ * and end with 305. The process also has three unmapped areas, 25-200,
+ * 220-300, and 305-307. Among those, 25-200 and 220-300 are the biggest two
+ * unmapped areas, and thus it should be converted to three regions of 10-25,
+ * 200-220, and 300-330.
+ */
+static void damon_test_three_regions_in_vmas(struct kunit *test)
+{
+ static struct mm_struct mm;
+ struct damon_addr_range regions[3] = {0,};
+ /* 10-20-25, 200-210-220, 300-305, 307-330 */
+ struct vm_area_struct vmas[] = {
+ (struct vm_area_struct) {.vm_start = 10, .vm_end = 20},
+ (struct vm_area_struct) {.vm_start = 20, .vm_end = 25},
+ (struct vm_area_struct) {.vm_start = 200, .vm_end = 210},
+ (struct vm_area_struct) {.vm_start = 210, .vm_end = 220},
+ (struct vm_area_struct) {.vm_start = 300, .vm_end = 305},
+ (struct vm_area_struct) {.vm_start = 307, .vm_end = 330},
+ };
+
+ mt_init_flags(&mm.mm_mt, MM_MT_FLAGS);
+ if (__link_vmas(&mm.mm_mt, vmas, ARRAY_SIZE(vmas)))
+ kunit_skip(test, "Failed to create VMA tree");
+
+ __damon_va_three_regions(&mm, regions);
+
+ KUNIT_EXPECT_EQ(test, 10ul, regions[0].start);
+ KUNIT_EXPECT_EQ(test, 25ul, regions[0].end);
+ KUNIT_EXPECT_EQ(test, 200ul, regions[1].start);
+ KUNIT_EXPECT_EQ(test, 220ul, regions[1].end);
+ KUNIT_EXPECT_EQ(test, 300ul, regions[2].start);
+ KUNIT_EXPECT_EQ(test, 330ul, regions[2].end);
+}
+
+static struct damon_region *__nth_region_of(struct damon_target *t, int idx)
+{
+ struct damon_region *r;
+ unsigned int i = 0;
+
+ damon_for_each_region(r, t) {
+ if (i++ == idx)
+ return r;
+ }
+
+ return NULL;
+}
+
+/*
+ * Test 'damon_set_regions()'
+ *
+ * test kunit object
+ * regions an array containing start/end addresses of current
+ * monitoring target regions
+ * nr_regions the number of the addresses in 'regions'
+ * three_regions The three regions that need to be applied now
+ * expected start/end addresses of monitoring target regions that
+ * 'three_regions' are applied
+ * nr_expected the number of addresses in 'expected'
+ *
+ * The memory mapping of the target processes changes dynamically. To follow
+ * the change, DAMON periodically reads the mappings, simplifies it to the
+ * three regions, and updates the monitoring target regions to fit in the three
+ * regions. The update of current target regions is the role of
+ * 'damon_set_regions()'.
+ *
+ * This test passes the given target regions and the new three regions that
+ * need to be applied to the function and check whether it updates the regions
+ * as expected.
+ */
+static void damon_do_test_apply_three_regions(struct kunit *test,
+ unsigned long *regions, int nr_regions,
+ struct damon_addr_range *three_regions,
+ unsigned long *expected, int nr_expected)
+{
+ struct damon_target *t;
+ struct damon_region *r;
+ int i;
+
+ t = damon_new_target();
+ for (i = 0; i < nr_regions / 2; i++) {
+ r = damon_new_region(regions[i * 2], regions[i * 2 + 1]);
+ damon_add_region(r, t);
+ }
+
+ damon_set_regions(t, three_regions, 3);
+
+ for (i = 0; i < nr_expected / 2; i++) {
+ r = __nth_region_of(t, i);
+ KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]);
+ KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]);
+ }
+
+ damon_destroy_target(t);
+}
+
+/*
+ * This function test most common case where the three big regions are only
+ * slightly changed. Target regions should adjust their boundary (10-20-30,
+ * 50-55, 70-80, 90-100) to fit with the new big regions or remove target
+ * regions (57-79) that now out of the three regions.
+ */
+static void damon_test_apply_three_regions1(struct kunit *test)
+{
+ /* 10-20-30, 50-55-57-59, 70-80-90-100 */
+ unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59,
+ 70, 80, 80, 90, 90, 100};
+ /* 5-27, 45-55, 73-104 */
+ struct damon_addr_range new_three_regions[3] = {
+ (struct damon_addr_range){.start = 5, .end = 27},
+ (struct damon_addr_range){.start = 45, .end = 55},
+ (struct damon_addr_range){.start = 73, .end = 104} };
+ /* 5-20-27, 45-55, 73-80-90-104 */
+ unsigned long expected[] = {5, 20, 20, 27, 45, 55,
+ 73, 80, 80, 90, 90, 104};
+
+ damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions),
+ new_three_regions, expected, ARRAY_SIZE(expected));
+}
+
+/*
+ * Test slightly bigger change. Similar to above, but the second big region
+ * now require two target regions (50-55, 57-59) to be removed.
+ */
+static void damon_test_apply_three_regions2(struct kunit *test)
+{
+ /* 10-20-30, 50-55-57-59, 70-80-90-100 */
+ unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59,
+ 70, 80, 80, 90, 90, 100};
+ /* 5-27, 56-57, 65-104 */
+ struct damon_addr_range new_three_regions[3] = {
+ (struct damon_addr_range){.start = 5, .end = 27},
+ (struct damon_addr_range){.start = 56, .end = 57},
+ (struct damon_addr_range){.start = 65, .end = 104} };
+ /* 5-20-27, 56-57, 65-80-90-104 */
+ unsigned long expected[] = {5, 20, 20, 27, 56, 57,
+ 65, 80, 80, 90, 90, 104};
+
+ damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions),
+ new_three_regions, expected, ARRAY_SIZE(expected));
+}
+
+/*
+ * Test a big change. The second big region has totally freed and mapped to
+ * different area (50-59 -> 61-63). The target regions which were in the old
+ * second big region (50-55-57-59) should be removed and new target region
+ * covering the second big region (61-63) should be created.
+ */
+static void damon_test_apply_three_regions3(struct kunit *test)
+{
+ /* 10-20-30, 50-55-57-59, 70-80-90-100 */
+ unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59,
+ 70, 80, 80, 90, 90, 100};
+ /* 5-27, 61-63, 65-104 */
+ struct damon_addr_range new_three_regions[3] = {
+ (struct damon_addr_range){.start = 5, .end = 27},
+ (struct damon_addr_range){.start = 61, .end = 63},
+ (struct damon_addr_range){.start = 65, .end = 104} };
+ /* 5-20-27, 61-63, 65-80-90-104 */
+ unsigned long expected[] = {5, 20, 20, 27, 61, 63,
+ 65, 80, 80, 90, 90, 104};
+
+ damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions),
+ new_three_regions, expected, ARRAY_SIZE(expected));
+}
+
+/*
+ * Test another big change. Both of the second and third big regions (50-59
+ * and 70-100) has totally freed and mapped to different area (30-32 and
+ * 65-68). The target regions which were in the old second and third big
+ * regions should now be removed and new target regions covering the new second
+ * and third big regions should be created.
+ */
+static void damon_test_apply_three_regions4(struct kunit *test)
+{
+ /* 10-20-30, 50-55-57-59, 70-80-90-100 */
+ unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59,
+ 70, 80, 80, 90, 90, 100};
+ /* 5-7, 30-32, 65-68 */
+ struct damon_addr_range new_three_regions[3] = {
+ (struct damon_addr_range){.start = 5, .end = 7},
+ (struct damon_addr_range){.start = 30, .end = 32},
+ (struct damon_addr_range){.start = 65, .end = 68} };
+ /* expect 5-7, 30-32, 65-68 */
+ unsigned long expected[] = {5, 7, 30, 32, 65, 68};
+
+ damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions),
+ new_three_regions, expected, ARRAY_SIZE(expected));
+}
+
+static void damon_test_split_evenly_fail(struct kunit *test,
+ unsigned long start, unsigned long end, unsigned int nr_pieces)
+{
+ struct damon_target *t = damon_new_target();
+ struct damon_region *r = damon_new_region(start, end);
+
+ damon_add_region(r, t);
+ KUNIT_EXPECT_EQ(test,
+ damon_va_evenly_split_region(t, r, nr_pieces), -EINVAL);
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
+
+ damon_for_each_region(r, t) {
+ KUNIT_EXPECT_EQ(test, r->ar.start, start);
+ KUNIT_EXPECT_EQ(test, r->ar.end, end);
+ }
+
+ damon_free_target(t);
+}
+
+static void damon_test_split_evenly_succ(struct kunit *test,
+ unsigned long start, unsigned long end, unsigned int nr_pieces)
+{
+ struct damon_target *t = damon_new_target();
+ struct damon_region *r = damon_new_region(start, end);
+ unsigned long expected_width = (end - start) / nr_pieces;
+ unsigned long i = 0;
+
+ damon_add_region(r, t);
+ KUNIT_EXPECT_EQ(test,
+ damon_va_evenly_split_region(t, r, nr_pieces), 0);
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), nr_pieces);
+
+ damon_for_each_region(r, t) {
+ if (i == nr_pieces - 1) {
+ KUNIT_EXPECT_EQ(test,
+ r->ar.start, start + i * expected_width);
+ KUNIT_EXPECT_EQ(test, r->ar.end, end);
+ break;
+ }
+ KUNIT_EXPECT_EQ(test,
+ r->ar.start, start + i++ * expected_width);
+ KUNIT_EXPECT_EQ(test, r->ar.end, start + i * expected_width);
+ }
+ damon_free_target(t);
+}
+
+static void damon_test_split_evenly(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
+ -EINVAL);
+
+ damon_test_split_evenly_fail(test, 0, 100, 0);
+ damon_test_split_evenly_succ(test, 0, 100, 10);
+ damon_test_split_evenly_succ(test, 5, 59, 5);
+ damon_test_split_evenly_fail(test, 5, 6, 2);
+}
+
+static struct kunit_case damon_test_cases[] = {
+ KUNIT_CASE(damon_test_three_regions_in_vmas),
+ KUNIT_CASE(damon_test_apply_three_regions1),
+ KUNIT_CASE(damon_test_apply_three_regions2),
+ KUNIT_CASE(damon_test_apply_three_regions3),
+ KUNIT_CASE(damon_test_apply_three_regions4),
+ KUNIT_CASE(damon_test_split_evenly),
+ {},
+};
+
+static struct kunit_suite damon_test_suite = {
+ .name = "damon-operations",
+ .test_cases = damon_test_cases,
+};
+kunit_test_suite(damon_test_suite);
+
+#endif /* _DAMON_VADDR_TEST_H */
+
+#endif /* CONFIG_DAMON_VADDR_KUNIT_TEST */
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Data Access Monitor Unit Tests
- *
- * Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved.
- *
- * Author: SeongJae Park <sj@kernel.org>
- */
-
-#ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
-
-#ifndef _DAMON_VADDR_TEST_H
-#define _DAMON_VADDR_TEST_H
-
-#include <kunit/test.h>
-
-static int __link_vmas(struct maple_tree *mt, struct vm_area_struct *vmas,
- ssize_t nr_vmas)
-{
- int i, ret = -ENOMEM;
- MA_STATE(mas, mt, 0, 0);
-
- if (!nr_vmas)
- return 0;
-
- mas_lock(&mas);
- for (i = 0; i < nr_vmas; i++) {
- mas_set_range(&mas, vmas[i].vm_start, vmas[i].vm_end - 1);
- if (mas_store_gfp(&mas, &vmas[i], GFP_KERNEL))
- goto failed;
- }
-
- ret = 0;
-failed:
- mas_unlock(&mas);
- return ret;
-}
-
-/*
- * Test __damon_va_three_regions() function
- *
- * In case of virtual memory address spaces monitoring, DAMON converts the
- * complex and dynamic memory mappings of each target task to three
- * discontiguous regions which cover every mapped areas. However, the three
- * regions should not include the two biggest unmapped areas in the original
- * mapping, because the two biggest areas are normally the areas between 1)
- * heap and the mmap()-ed regions, and 2) the mmap()-ed regions and stack.
- * Because these two unmapped areas are very huge but obviously never accessed,
- * covering the region is just a waste.
- *
- * '__damon_va_three_regions() receives an address space of a process. It
- * first identifies the start of mappings, end of mappings, and the two biggest
- * unmapped areas. After that, based on the information, it constructs the
- * three regions and returns. For more detail, refer to the comment of
- * 'damon_init_regions_of()' function definition in 'mm/damon.c' file.
- *
- * For example, suppose virtual address ranges of 10-20, 20-25, 200-210,
- * 210-220, 300-305, and 307-330 (Other comments represent this mappings in
- * more short form: 10-20-25, 200-210-220, 300-305, 307-330) of a process are
- * mapped. To cover every mappings, the three regions should start with 10,
- * and end with 305. The process also has three unmapped areas, 25-200,
- * 220-300, and 305-307. Among those, 25-200 and 220-300 are the biggest two
- * unmapped areas, and thus it should be converted to three regions of 10-25,
- * 200-220, and 300-330.
- */
-static void damon_test_three_regions_in_vmas(struct kunit *test)
-{
- static struct mm_struct mm;
- struct damon_addr_range regions[3] = {0,};
- /* 10-20-25, 200-210-220, 300-305, 307-330 */
- struct vm_area_struct vmas[] = {
- (struct vm_area_struct) {.vm_start = 10, .vm_end = 20},
- (struct vm_area_struct) {.vm_start = 20, .vm_end = 25},
- (struct vm_area_struct) {.vm_start = 200, .vm_end = 210},
- (struct vm_area_struct) {.vm_start = 210, .vm_end = 220},
- (struct vm_area_struct) {.vm_start = 300, .vm_end = 305},
- (struct vm_area_struct) {.vm_start = 307, .vm_end = 330},
- };
-
- mt_init_flags(&mm.mm_mt, MM_MT_FLAGS);
- if (__link_vmas(&mm.mm_mt, vmas, ARRAY_SIZE(vmas)))
- kunit_skip(test, "Failed to create VMA tree");
-
- __damon_va_three_regions(&mm, regions);
-
- KUNIT_EXPECT_EQ(test, 10ul, regions[0].start);
- KUNIT_EXPECT_EQ(test, 25ul, regions[0].end);
- KUNIT_EXPECT_EQ(test, 200ul, regions[1].start);
- KUNIT_EXPECT_EQ(test, 220ul, regions[1].end);
- KUNIT_EXPECT_EQ(test, 300ul, regions[2].start);
- KUNIT_EXPECT_EQ(test, 330ul, regions[2].end);
-}
-
-static struct damon_region *__nth_region_of(struct damon_target *t, int idx)
-{
- struct damon_region *r;
- unsigned int i = 0;
-
- damon_for_each_region(r, t) {
- if (i++ == idx)
- return r;
- }
-
- return NULL;
-}
-
-/*
- * Test 'damon_set_regions()'
- *
- * test kunit object
- * regions an array containing start/end addresses of current
- * monitoring target regions
- * nr_regions the number of the addresses in 'regions'
- * three_regions The three regions that need to be applied now
- * expected start/end addresses of monitoring target regions that
- * 'three_regions' are applied
- * nr_expected the number of addresses in 'expected'
- *
- * The memory mapping of the target processes changes dynamically. To follow
- * the change, DAMON periodically reads the mappings, simplifies it to the
- * three regions, and updates the monitoring target regions to fit in the three
- * regions. The update of current target regions is the role of
- * 'damon_set_regions()'.
- *
- * This test passes the given target regions and the new three regions that
- * need to be applied to the function and check whether it updates the regions
- * as expected.
- */
-static void damon_do_test_apply_three_regions(struct kunit *test,
- unsigned long *regions, int nr_regions,
- struct damon_addr_range *three_regions,
- unsigned long *expected, int nr_expected)
-{
- struct damon_target *t;
- struct damon_region *r;
- int i;
-
- t = damon_new_target();
- for (i = 0; i < nr_regions / 2; i++) {
- r = damon_new_region(regions[i * 2], regions[i * 2 + 1]);
- damon_add_region(r, t);
- }
-
- damon_set_regions(t, three_regions, 3);
-
- for (i = 0; i < nr_expected / 2; i++) {
- r = __nth_region_of(t, i);
- KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]);
- KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]);
- }
-
- damon_destroy_target(t);
-}
-
-/*
- * This function test most common case where the three big regions are only
- * slightly changed. Target regions should adjust their boundary (10-20-30,
- * 50-55, 70-80, 90-100) to fit with the new big regions or remove target
- * regions (57-79) that now out of the three regions.
- */
-static void damon_test_apply_three_regions1(struct kunit *test)
-{
- /* 10-20-30, 50-55-57-59, 70-80-90-100 */
- unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59,
- 70, 80, 80, 90, 90, 100};
- /* 5-27, 45-55, 73-104 */
- struct damon_addr_range new_three_regions[3] = {
- (struct damon_addr_range){.start = 5, .end = 27},
- (struct damon_addr_range){.start = 45, .end = 55},
- (struct damon_addr_range){.start = 73, .end = 104} };
- /* 5-20-27, 45-55, 73-80-90-104 */
- unsigned long expected[] = {5, 20, 20, 27, 45, 55,
- 73, 80, 80, 90, 90, 104};
-
- damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions),
- new_three_regions, expected, ARRAY_SIZE(expected));
-}
-
-/*
- * Test slightly bigger change. Similar to above, but the second big region
- * now require two target regions (50-55, 57-59) to be removed.
- */
-static void damon_test_apply_three_regions2(struct kunit *test)
-{
- /* 10-20-30, 50-55-57-59, 70-80-90-100 */
- unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59,
- 70, 80, 80, 90, 90, 100};
- /* 5-27, 56-57, 65-104 */
- struct damon_addr_range new_three_regions[3] = {
- (struct damon_addr_range){.start = 5, .end = 27},
- (struct damon_addr_range){.start = 56, .end = 57},
- (struct damon_addr_range){.start = 65, .end = 104} };
- /* 5-20-27, 56-57, 65-80-90-104 */
- unsigned long expected[] = {5, 20, 20, 27, 56, 57,
- 65, 80, 80, 90, 90, 104};
-
- damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions),
- new_three_regions, expected, ARRAY_SIZE(expected));
-}
-
-/*
- * Test a big change. The second big region has totally freed and mapped to
- * different area (50-59 -> 61-63). The target regions which were in the old
- * second big region (50-55-57-59) should be removed and new target region
- * covering the second big region (61-63) should be created.
- */
-static void damon_test_apply_three_regions3(struct kunit *test)
-{
- /* 10-20-30, 50-55-57-59, 70-80-90-100 */
- unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59,
- 70, 80, 80, 90, 90, 100};
- /* 5-27, 61-63, 65-104 */
- struct damon_addr_range new_three_regions[3] = {
- (struct damon_addr_range){.start = 5, .end = 27},
- (struct damon_addr_range){.start = 61, .end = 63},
- (struct damon_addr_range){.start = 65, .end = 104} };
- /* 5-20-27, 61-63, 65-80-90-104 */
- unsigned long expected[] = {5, 20, 20, 27, 61, 63,
- 65, 80, 80, 90, 90, 104};
-
- damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions),
- new_three_regions, expected, ARRAY_SIZE(expected));
-}
-
-/*
- * Test another big change. Both of the second and third big regions (50-59
- * and 70-100) has totally freed and mapped to different area (30-32 and
- * 65-68). The target regions which were in the old second and third big
- * regions should now be removed and new target regions covering the new second
- * and third big regions should be created.
- */
-static void damon_test_apply_three_regions4(struct kunit *test)
-{
- /* 10-20-30, 50-55-57-59, 70-80-90-100 */
- unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59,
- 70, 80, 80, 90, 90, 100};
- /* 5-7, 30-32, 65-68 */
- struct damon_addr_range new_three_regions[3] = {
- (struct damon_addr_range){.start = 5, .end = 7},
- (struct damon_addr_range){.start = 30, .end = 32},
- (struct damon_addr_range){.start = 65, .end = 68} };
- /* expect 5-7, 30-32, 65-68 */
- unsigned long expected[] = {5, 7, 30, 32, 65, 68};
-
- damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions),
- new_three_regions, expected, ARRAY_SIZE(expected));
-}
-
-static void damon_test_split_evenly_fail(struct kunit *test,
- unsigned long start, unsigned long end, unsigned int nr_pieces)
-{
- struct damon_target *t = damon_new_target();
- struct damon_region *r = damon_new_region(start, end);
-
- damon_add_region(r, t);
- KUNIT_EXPECT_EQ(test,
- damon_va_evenly_split_region(t, r, nr_pieces), -EINVAL);
- KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
-
- damon_for_each_region(r, t) {
- KUNIT_EXPECT_EQ(test, r->ar.start, start);
- KUNIT_EXPECT_EQ(test, r->ar.end, end);
- }
-
- damon_free_target(t);
-}
-
-static void damon_test_split_evenly_succ(struct kunit *test,
- unsigned long start, unsigned long end, unsigned int nr_pieces)
-{
- struct damon_target *t = damon_new_target();
- struct damon_region *r = damon_new_region(start, end);
- unsigned long expected_width = (end - start) / nr_pieces;
- unsigned long i = 0;
-
- damon_add_region(r, t);
- KUNIT_EXPECT_EQ(test,
- damon_va_evenly_split_region(t, r, nr_pieces), 0);
- KUNIT_EXPECT_EQ(test, damon_nr_regions(t), nr_pieces);
-
- damon_for_each_region(r, t) {
- if (i == nr_pieces - 1) {
- KUNIT_EXPECT_EQ(test,
- r->ar.start, start + i * expected_width);
- KUNIT_EXPECT_EQ(test, r->ar.end, end);
- break;
- }
- KUNIT_EXPECT_EQ(test,
- r->ar.start, start + i++ * expected_width);
- KUNIT_EXPECT_EQ(test, r->ar.end, start + i * expected_width);
- }
- damon_free_target(t);
-}
-
-static void damon_test_split_evenly(struct kunit *test)
-{
- KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
- -EINVAL);
-
- damon_test_split_evenly_fail(test, 0, 100, 0);
- damon_test_split_evenly_succ(test, 0, 100, 10);
- damon_test_split_evenly_succ(test, 5, 59, 5);
- damon_test_split_evenly_fail(test, 5, 6, 2);
-}
-
-static struct kunit_case damon_test_cases[] = {
- KUNIT_CASE(damon_test_three_regions_in_vmas),
- KUNIT_CASE(damon_test_apply_three_regions1),
- KUNIT_CASE(damon_test_apply_three_regions2),
- KUNIT_CASE(damon_test_apply_three_regions3),
- KUNIT_CASE(damon_test_apply_three_regions4),
- KUNIT_CASE(damon_test_split_evenly),
- {},
-};
-
-static struct kunit_suite damon_test_suite = {
- .name = "damon-operations",
- .test_cases = damon_test_cases,
-};
-kunit_test_suite(damon_test_suite);
-
-#endif /* _DAMON_VADDR_TEST_H */
-
-#endif /* CONFIG_DAMON_VADDR_KUNIT_TEST */
subsys_initcall(damon_va_initcall);
-#include "vaddr-test.h"
+#include "tests/vaddr-kunit.h"