1 // SPDX-License-Identifier: GPL-2.0
3 * KVM dirty page logging performance test
5 * Based on dirty_log_test.c
7 * Copyright (C) 2018, Red Hat, Inc.
8 * Copyright (C) 2020, Google, Inc.
11 #define _GNU_SOURCE /* for program_invocation_name */
18 #include <linux/bitmap.h>
19 #include <linux/bitops.h>
22 #include "perf_test_util.h"
23 #include "processor.h"
24 #include "test_util.h"
26 /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
27 #define TEST_HOST_LOOP_N 2UL
30 static u64 dirty_log_manual_caps;
31 static bool host_quit;
32 static uint64_t iteration;
33 static uint64_t vcpu_last_completed_iteration[MAX_VCPUS];
35 static void *vcpu_worker(void *data)
38 struct kvm_vm *vm = perf_test_args.vm;
39 uint64_t pages_count = 0;
41 struct timespec start;
42 struct timespec ts_diff;
43 struct timespec total = (struct timespec){0};
45 struct vcpu_args *vcpu_args = (struct vcpu_args *)data;
46 int vcpu_id = vcpu_args->vcpu_id;
48 vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
49 run = vcpu_state(vm, vcpu_id);
51 while (!READ_ONCE(host_quit)) {
52 uint64_t current_iteration = READ_ONCE(iteration);
54 clock_gettime(CLOCK_MONOTONIC, &start);
55 ret = _vcpu_run(vm, vcpu_id);
56 ts_diff = timespec_diff_now(start);
58 TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
59 TEST_ASSERT(get_ucall(vm, vcpu_id, NULL) == UCALL_SYNC,
60 "Invalid guest sync status: exit_reason=%s\n",
61 exit_reason_str(run->exit_reason));
63 pr_debug("Got sync event from vCPU %d\n", vcpu_id);
64 vcpu_last_completed_iteration[vcpu_id] = current_iteration;
65 pr_debug("vCPU %d updated last completed iteration to %lu\n",
66 vcpu_id, vcpu_last_completed_iteration[vcpu_id]);
68 if (current_iteration) {
69 pages_count += vcpu_args->pages;
70 total = timespec_add(total, ts_diff);
71 pr_debug("vCPU %d iteration %lu dirty memory time: %ld.%.9lds\n",
72 vcpu_id, current_iteration, ts_diff.tv_sec,
75 pr_debug("vCPU %d iteration %lu populate memory time: %ld.%.9lds\n",
76 vcpu_id, current_iteration, ts_diff.tv_sec,
80 while (current_iteration == READ_ONCE(iteration) &&
81 !READ_ONCE(host_quit)) {}
84 avg = timespec_div(total, vcpu_last_completed_iteration[vcpu_id]);
85 pr_debug("\nvCPU %d dirtied 0x%lx pages over %lu iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
86 vcpu_id, pages_count, vcpu_last_completed_iteration[vcpu_id],
87 total.tv_sec, total.tv_nsec, avg.tv_sec, avg.tv_nsec);
92 static void run_test(enum vm_guest_mode mode, unsigned long iterations,
93 uint64_t phys_offset, int wr_fract)
95 pthread_t *vcpu_threads;
98 uint64_t guest_num_pages;
99 uint64_t host_num_pages;
101 struct timespec start;
102 struct timespec ts_diff;
103 struct timespec get_dirty_log_total = (struct timespec){0};
104 struct timespec vcpu_dirty_total = (struct timespec){0};
106 struct kvm_enable_cap cap = {};
107 struct timespec clear_dirty_log_total = (struct timespec){0};
109 vm = create_vm(mode, nr_vcpus, guest_percpu_mem_size);
111 perf_test_args.wr_fract = wr_fract;
113 guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
114 guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
115 host_num_pages = vm_num_host_pages(mode, guest_num_pages);
116 bmap = bitmap_alloc(host_num_pages);
118 if (dirty_log_manual_caps) {
119 cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
120 cap.args[0] = dirty_log_manual_caps;
121 vm_enable_cap(vm, &cap);
124 vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
125 TEST_ASSERT(vcpu_threads, "Memory allocation failed");
127 add_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
129 sync_global_to_guest(vm, perf_test_args);
131 /* Start the iterations */
135 clock_gettime(CLOCK_MONOTONIC, &start);
136 for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
137 pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
138 &perf_test_args.vcpu_args[vcpu_id]);
141 /* Allow the vCPU to populate memory */
142 pr_debug("Starting iteration %lu - Populating\n", iteration);
143 while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) != iteration)
144 pr_debug("Waiting for vcpu_last_completed_iteration == %lu\n",
147 ts_diff = timespec_diff_now(start);
148 pr_info("Populate memory time: %ld.%.9lds\n",
149 ts_diff.tv_sec, ts_diff.tv_nsec);
151 /* Enable dirty logging */
152 clock_gettime(CLOCK_MONOTONIC, &start);
153 vm_mem_region_set_flags(vm, TEST_MEM_SLOT_INDEX,
154 KVM_MEM_LOG_DIRTY_PAGES);
155 ts_diff = timespec_diff_now(start);
156 pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
157 ts_diff.tv_sec, ts_diff.tv_nsec);
159 while (iteration < iterations) {
161 * Incrementing the iteration number will start the vCPUs
162 * dirtying memory again.
164 clock_gettime(CLOCK_MONOTONIC, &start);
167 pr_debug("Starting iteration %lu\n", iteration);
168 for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
169 while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) != iteration)
170 pr_debug("Waiting for vCPU %d vcpu_last_completed_iteration == %lu\n",
174 ts_diff = timespec_diff_now(start);
175 vcpu_dirty_total = timespec_add(vcpu_dirty_total, ts_diff);
176 pr_info("Iteration %lu dirty memory time: %ld.%.9lds\n",
177 iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
179 clock_gettime(CLOCK_MONOTONIC, &start);
180 kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
182 ts_diff = timespec_diff_now(start);
183 get_dirty_log_total = timespec_add(get_dirty_log_total,
185 pr_info("Iteration %lu get dirty log time: %ld.%.9lds\n",
186 iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
188 if (dirty_log_manual_caps) {
189 clock_gettime(CLOCK_MONOTONIC, &start);
190 kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
193 ts_diff = timespec_diff_now(start);
194 clear_dirty_log_total = timespec_add(clear_dirty_log_total,
196 pr_info("Iteration %lu clear dirty log time: %ld.%.9lds\n",
197 iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
201 /* Tell the vcpu thread to quit */
203 for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
204 pthread_join(vcpu_threads[vcpu_id], NULL);
206 /* Disable dirty logging */
207 clock_gettime(CLOCK_MONOTONIC, &start);
208 vm_mem_region_set_flags(vm, TEST_MEM_SLOT_INDEX, 0);
209 ts_diff = timespec_diff_now(start);
210 pr_info("Disabling dirty logging time: %ld.%.9lds\n",
211 ts_diff.tv_sec, ts_diff.tv_nsec);
213 avg = timespec_div(get_dirty_log_total, iterations);
214 pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
215 iterations, get_dirty_log_total.tv_sec,
216 get_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
218 if (dirty_log_manual_caps) {
219 avg = timespec_div(clear_dirty_log_total, iterations);
220 pr_info("Clear dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
221 iterations, clear_dirty_log_total.tv_sec,
222 clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
235 static struct guest_mode guest_modes[NUM_VM_MODES];
237 #define guest_mode_init(mode, supported, enabled) ({ \
238 guest_modes[mode] = (struct guest_mode){ supported, enabled }; \
241 static void help(char *name)
246 printf("usage: %s [-h] [-i iterations] [-p offset] "
247 "[-m mode] [-b vcpu bytes] [-v vcpus]\n", name);
249 printf(" -i: specify iteration counts (default: %"PRIu64")\n",
251 printf(" -p: specify guest physical test memory offset\n"
252 " Warning: a low offset can conflict with the loaded test code.\n");
253 printf(" -m: specify the guest mode ID to test "
254 "(default: test all supported modes)\n"
255 " This option may be used multiple times.\n"
256 " Guest mode IDs:\n");
257 for (i = 0; i < NUM_VM_MODES; ++i) {
258 printf(" %d: %s%s\n", i, vm_guest_mode_string(i),
259 guest_modes[i].supported ? " (supported)" : "");
261 printf(" -b: specify the size of the memory region which should be\n"
262 " dirtied by each vCPU. e.g. 10M or 3G.\n"
264 printf(" -f: specify the fraction of pages which should be written to\n"
265 " as opposed to simply read, in the form\n"
266 " 1/<fraction of pages to write>.\n"
267 " (default: 1 i.e. all pages are written to.)\n");
268 printf(" -v: specify the number of vCPUs to run.\n");
273 int main(int argc, char *argv[])
275 unsigned long iterations = TEST_HOST_LOOP_N;
276 bool mode_selected = false;
277 uint64_t phys_offset = 0;
282 dirty_log_manual_caps =
283 kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
284 dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
285 KVM_DIRTY_LOG_INITIALLY_SET);
288 guest_mode_init(VM_MODE_PXXV48_4K, true, true);
291 guest_mode_init(VM_MODE_P40V48_4K, true, true);
292 guest_mode_init(VM_MODE_P40V48_64K, true, true);
295 unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
298 guest_mode_init(VM_MODE_P52V48_64K, true, true);
300 guest_mode_init(VM_MODE_P48V48_4K, true, true);
301 guest_mode_init(VM_MODE_P48V48_64K, true, true);
306 guest_mode_init(VM_MODE_P40V48_4K, true, true);
309 while ((opt = getopt(argc, argv, "hi:p:m:b:f:v:")) != -1) {
312 iterations = strtol(optarg, NULL, 10);
315 phys_offset = strtoull(optarg, NULL, 0);
318 if (!mode_selected) {
319 for (i = 0; i < NUM_VM_MODES; ++i)
320 guest_modes[i].enabled = false;
321 mode_selected = true;
323 mode = strtoul(optarg, NULL, 10);
324 TEST_ASSERT(mode < NUM_VM_MODES,
325 "Guest mode ID %d too big", mode);
326 guest_modes[mode].enabled = true;
329 guest_percpu_mem_size = parse_size(optarg);
332 wr_fract = atoi(optarg);
333 TEST_ASSERT(wr_fract >= 1,
334 "Write fraction cannot be less than one");
337 nr_vcpus = atoi(optarg);
338 TEST_ASSERT(nr_vcpus > 0,
339 "Must have a positive number of vCPUs");
340 TEST_ASSERT(nr_vcpus <= MAX_VCPUS,
341 "This test does not currently support\n"
342 "more than %d vCPUs.", MAX_VCPUS);
351 TEST_ASSERT(iterations >= 2, "The test should have at least two iterations");
353 pr_info("Test iterations: %"PRIu64"\n", iterations);
355 for (i = 0; i < NUM_VM_MODES; ++i) {
356 if (!guest_modes[i].enabled)
358 TEST_ASSERT(guest_modes[i].supported,
359 "Guest mode ID %d (%s) not supported.",
360 i, vm_guest_mode_string(i));
361 run_test(i, iterations, phys_offset, wr_fract);