1 // SPDX-License-Identifier: GPL-2.0
3 * KVM dirty page logging performance test
5 * Based on dirty_log_test.c
7 * Copyright (C) 2018, Red Hat, Inc.
8 * Copyright (C) 2020, Google, Inc.
15 #include <linux/bitmap.h>
18 #include "test_util.h"
19 #include "perf_test_util.h"
20 #include "guest_modes.h"
23 #include "aarch64/vgic.h"
25 #define GICD_BASE_GPA 0x8000000ULL
26 #define GICR_BASE_GPA 0x80A0000ULL
30 static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
33 * The test can still run even if hardware does not support GICv3, as it
34 * is only an optimization to reduce guest exits.
36 gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
39 static void arch_cleanup_vm(struct kvm_vm *vm)
45 #else /* __aarch64__ */
47 static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
51 static void arch_cleanup_vm(struct kvm_vm *vm)
57 /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
58 #define TEST_HOST_LOOP_N 2UL
60 static int nr_vcpus = 1;
61 static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
64 static u64 dirty_log_manual_caps;
65 static bool host_quit;
67 static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
69 static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
72 struct kvm_vm *vm = perf_test_args.vm;
73 uint64_t pages_count = 0;
75 struct timespec start;
76 struct timespec ts_diff;
77 struct timespec total = (struct timespec){0};
79 int vcpu_id = vcpu_args->vcpu_id;
81 run = vcpu_state(vm, vcpu_id);
83 while (!READ_ONCE(host_quit)) {
84 int current_iteration = READ_ONCE(iteration);
86 clock_gettime(CLOCK_MONOTONIC, &start);
87 ret = _vcpu_run(vm, vcpu_id);
88 ts_diff = timespec_elapsed(start);
90 TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
91 TEST_ASSERT(get_ucall(vm, vcpu_id, NULL) == UCALL_SYNC,
92 "Invalid guest sync status: exit_reason=%s\n",
93 exit_reason_str(run->exit_reason));
95 pr_debug("Got sync event from vCPU %d\n", vcpu_id);
96 vcpu_last_completed_iteration[vcpu_id] = current_iteration;
97 pr_debug("vCPU %d updated last completed iteration to %d\n",
98 vcpu_id, vcpu_last_completed_iteration[vcpu_id]);
100 if (current_iteration) {
101 pages_count += vcpu_args->pages;
102 total = timespec_add(total, ts_diff);
103 pr_debug("vCPU %d iteration %d dirty memory time: %ld.%.9lds\n",
104 vcpu_id, current_iteration, ts_diff.tv_sec,
107 pr_debug("vCPU %d iteration %d populate memory time: %ld.%.9lds\n",
108 vcpu_id, current_iteration, ts_diff.tv_sec,
112 while (current_iteration == READ_ONCE(iteration) &&
113 !READ_ONCE(host_quit)) {}
116 avg = timespec_div(total, vcpu_last_completed_iteration[vcpu_id]);
117 pr_debug("\nvCPU %d dirtied 0x%lx pages over %d iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
118 vcpu_id, pages_count, vcpu_last_completed_iteration[vcpu_id],
119 total.tv_sec, total.tv_nsec, avg.tv_sec, avg.tv_nsec);
123 unsigned long iterations;
124 uint64_t phys_offset;
126 bool partition_vcpu_memory_access;
127 enum vm_mem_backing_src_type backing_src;
131 static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
135 for (i = 0; i < slots; i++) {
136 int slot = PERF_TEST_MEM_SLOT_INDEX + i;
137 int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0;
139 vm_mem_region_set_flags(vm, slot, flags);
143 static inline void enable_dirty_logging(struct kvm_vm *vm, int slots)
145 toggle_dirty_logging(vm, slots, true);
148 static inline void disable_dirty_logging(struct kvm_vm *vm, int slots)
150 toggle_dirty_logging(vm, slots, false);
153 static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots)
157 for (i = 0; i < slots; i++) {
158 int slot = PERF_TEST_MEM_SLOT_INDEX + i;
160 kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
164 static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
165 int slots, uint64_t pages_per_slot)
169 for (i = 0; i < slots; i++) {
170 int slot = PERF_TEST_MEM_SLOT_INDEX + i;
172 kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
176 static unsigned long **alloc_bitmaps(int slots, uint64_t pages_per_slot)
178 unsigned long **bitmaps;
181 bitmaps = malloc(slots * sizeof(bitmaps[0]));
182 TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array.");
184 for (i = 0; i < slots; i++) {
185 bitmaps[i] = bitmap_zalloc(pages_per_slot);
186 TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap.");
192 static void free_bitmaps(unsigned long *bitmaps[], int slots)
196 for (i = 0; i < slots; i++)
202 static void run_test(enum vm_guest_mode mode, void *arg)
204 struct test_params *p = arg;
206 unsigned long **bitmaps;
207 uint64_t guest_num_pages;
208 uint64_t host_num_pages;
209 uint64_t pages_per_slot;
211 struct timespec start;
212 struct timespec ts_diff;
213 struct timespec get_dirty_log_total = (struct timespec){0};
214 struct timespec vcpu_dirty_total = (struct timespec){0};
216 struct kvm_enable_cap cap = {};
217 struct timespec clear_dirty_log_total = (struct timespec){0};
219 vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
220 p->slots, p->backing_src,
221 p->partition_vcpu_memory_access);
223 perf_test_set_wr_fract(vm, p->wr_fract);
225 guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
226 guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
227 host_num_pages = vm_num_host_pages(mode, guest_num_pages);
228 pages_per_slot = host_num_pages / p->slots;
230 bitmaps = alloc_bitmaps(p->slots, pages_per_slot);
232 if (dirty_log_manual_caps) {
233 cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
234 cap.args[0] = dirty_log_manual_caps;
235 vm_enable_cap(vm, &cap);
238 arch_setup_vm(vm, nr_vcpus);
240 /* Start the iterations */
244 clock_gettime(CLOCK_MONOTONIC, &start);
245 for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
246 vcpu_last_completed_iteration[vcpu_id] = -1;
248 perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
250 /* Allow the vCPUs to populate memory */
251 pr_debug("Starting iteration %d - Populating\n", iteration);
252 for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
253 while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) !=
258 ts_diff = timespec_elapsed(start);
259 pr_info("Populate memory time: %ld.%.9lds\n",
260 ts_diff.tv_sec, ts_diff.tv_nsec);
262 /* Enable dirty logging */
263 clock_gettime(CLOCK_MONOTONIC, &start);
264 enable_dirty_logging(vm, p->slots);
265 ts_diff = timespec_elapsed(start);
266 pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
267 ts_diff.tv_sec, ts_diff.tv_nsec);
269 while (iteration < p->iterations) {
271 * Incrementing the iteration number will start the vCPUs
272 * dirtying memory again.
274 clock_gettime(CLOCK_MONOTONIC, &start);
277 pr_debug("Starting iteration %d\n", iteration);
278 for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
279 while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id])
284 ts_diff = timespec_elapsed(start);
285 vcpu_dirty_total = timespec_add(vcpu_dirty_total, ts_diff);
286 pr_info("Iteration %d dirty memory time: %ld.%.9lds\n",
287 iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
289 clock_gettime(CLOCK_MONOTONIC, &start);
290 get_dirty_log(vm, bitmaps, p->slots);
291 ts_diff = timespec_elapsed(start);
292 get_dirty_log_total = timespec_add(get_dirty_log_total,
294 pr_info("Iteration %d get dirty log time: %ld.%.9lds\n",
295 iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
297 if (dirty_log_manual_caps) {
298 clock_gettime(CLOCK_MONOTONIC, &start);
299 clear_dirty_log(vm, bitmaps, p->slots, pages_per_slot);
300 ts_diff = timespec_elapsed(start);
301 clear_dirty_log_total = timespec_add(clear_dirty_log_total,
303 pr_info("Iteration %d clear dirty log time: %ld.%.9lds\n",
304 iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
308 /* Disable dirty logging */
309 clock_gettime(CLOCK_MONOTONIC, &start);
310 disable_dirty_logging(vm, p->slots);
311 ts_diff = timespec_elapsed(start);
312 pr_info("Disabling dirty logging time: %ld.%.9lds\n",
313 ts_diff.tv_sec, ts_diff.tv_nsec);
315 /* Tell the vcpu thread to quit */
317 perf_test_join_vcpu_threads(nr_vcpus);
319 avg = timespec_div(get_dirty_log_total, p->iterations);
320 pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
321 p->iterations, get_dirty_log_total.tv_sec,
322 get_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
324 if (dirty_log_manual_caps) {
325 avg = timespec_div(clear_dirty_log_total, p->iterations);
326 pr_info("Clear dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
327 p->iterations, clear_dirty_log_total.tv_sec,
328 clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
331 free_bitmaps(bitmaps, p->slots);
333 perf_test_destroy_vm(vm);
336 static void help(char *name)
339 printf("usage: %s [-h] [-i iterations] [-p offset] [-g]"
340 "[-m mode] [-b vcpu bytes] [-v vcpus] [-o] [-s mem type]"
341 "[-x memslots]\n", name);
343 printf(" -i: specify iteration counts (default: %"PRIu64")\n",
345 printf(" -g: Do not enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2. This\n"
346 " makes KVM_GET_DIRTY_LOG clear the dirty log (i.e.\n"
347 " KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE is not enabled)\n"
348 " and writes will be tracked as soon as dirty logging is\n"
349 " enabled on the memslot (i.e. KVM_DIRTY_LOG_INITIALLY_SET\n"
350 " is not enabled).\n");
351 printf(" -p: specify guest physical test memory offset\n"
352 " Warning: a low offset can conflict with the loaded test code.\n");
354 printf(" -b: specify the size of the memory region which should be\n"
355 " dirtied by each vCPU. e.g. 10M or 3G.\n"
357 printf(" -f: specify the fraction of pages which should be written to\n"
358 " as opposed to simply read, in the form\n"
359 " 1/<fraction of pages to write>.\n"
360 " (default: 1 i.e. all pages are written to.)\n");
361 printf(" -v: specify the number of vCPUs to run.\n");
362 printf(" -o: Overlap guest memory accesses instead of partitioning\n"
363 " them into a separate region of memory for each vCPU.\n");
364 backing_src_help("-s");
365 printf(" -x: Split the memory region into this number of memslots.\n"
371 int main(int argc, char *argv[])
373 int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
374 struct test_params p = {
375 .iterations = TEST_HOST_LOOP_N,
377 .partition_vcpu_memory_access = true,
378 .backing_src = DEFAULT_VM_MEM_SRC,
383 dirty_log_manual_caps =
384 kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
385 dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
386 KVM_DIRTY_LOG_INITIALLY_SET);
388 guest_modes_append_default();
390 while ((opt = getopt(argc, argv, "ghi:p:m:b:f:v:os:x:")) != -1) {
393 dirty_log_manual_caps = 0;
396 p.iterations = atoi(optarg);
399 p.phys_offset = strtoull(optarg, NULL, 0);
402 guest_modes_cmdline(optarg);
405 guest_percpu_mem_size = parse_size(optarg);
408 p.wr_fract = atoi(optarg);
409 TEST_ASSERT(p.wr_fract >= 1,
410 "Write fraction cannot be less than one");
413 nr_vcpus = atoi(optarg);
414 TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
415 "Invalid number of vcpus, must be between 1 and %d", max_vcpus);
418 p.partition_vcpu_memory_access = false;
421 p.backing_src = parse_backing_src_type(optarg);
424 p.slots = atoi(optarg);
433 TEST_ASSERT(p.iterations >= 2, "The test should have at least two iterations");
435 pr_info("Test iterations: %"PRIu64"\n", p.iterations);
437 for_each_guest_mode(run_test, &p);