KVM: selftests: Add "-c" parameter to dirty log test
[linux-2.6-microblaze.git] / tools / testing / selftests / kvm / dirty_log_test.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KVM dirty page logging test
4  *
5  * Copyright (C) 2018, Red Hat, Inc.
6  */
7
8 #define _GNU_SOURCE /* for program_invocation_name */
9
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <unistd.h>
13 #include <time.h>
14 #include <pthread.h>
15 #include <semaphore.h>
16 #include <sys/types.h>
17 #include <signal.h>
18 #include <errno.h>
19 #include <linux/bitmap.h>
20 #include <linux/bitops.h>
21 #include <asm/barrier.h>
22
23 #include "test_util.h"
24 #include "kvm_util.h"
25 #include "processor.h"
26
27 #define VCPU_ID                         1
28
29 /* The memory slot index to track dirty pages */
30 #define TEST_MEM_SLOT_INDEX             1
31
32 /* Default guest test virtual memory offset */
33 #define DEFAULT_GUEST_TEST_MEM          0xc0000000
34
35 /* How many pages to dirty for each guest loop */
36 #define TEST_PAGES_PER_LOOP             1024
37
38 /* How many host loops to run (one KVM_GET_DIRTY_LOG for each loop) */
39 #define TEST_HOST_LOOP_N                32UL
40
41 /* Interval for each host loop (ms) */
42 #define TEST_HOST_LOOP_INTERVAL         10UL
43
44 /* Dirty bitmaps are always little endian, so we need to swap on big endian */
45 #if defined(__s390x__)
46 # define BITOP_LE_SWIZZLE       ((BITS_PER_LONG-1) & ~0x7)
47 # define test_bit_le(nr, addr) \
48         test_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
49 # define set_bit_le(nr, addr) \
50         set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
51 # define clear_bit_le(nr, addr) \
52         clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
53 # define test_and_set_bit_le(nr, addr) \
54         test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
55 # define test_and_clear_bit_le(nr, addr) \
56         test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
57 #else
58 # define test_bit_le            test_bit
59 # define set_bit_le             set_bit
60 # define clear_bit_le           clear_bit
61 # define test_and_set_bit_le    test_and_set_bit
62 # define test_and_clear_bit_le  test_and_clear_bit
63 #endif
64
65 #define TEST_DIRTY_RING_COUNT           65536
66
67 #define SIG_IPI SIGUSR1
68
69 /*
70  * Guest/Host shared variables. Ensure addr_gva2hva() and/or
71  * sync_global_to/from_guest() are used when accessing from
72  * the host. READ/WRITE_ONCE() should also be used with anything
73  * that may change.
74  */
75 static uint64_t host_page_size;
76 static uint64_t guest_page_size;
77 static uint64_t guest_num_pages;
78 static uint64_t random_array[TEST_PAGES_PER_LOOP];
79 static uint64_t iteration;
80
81 /*
82  * Guest physical memory offset of the testing memory slot.
83  * This will be set to the topmost valid physical address minus
84  * the test memory size.
85  */
86 static uint64_t guest_test_phys_mem;
87
88 /*
89  * Guest virtual memory offset of the testing memory slot.
90  * Must not conflict with identity mapped test code.
91  */
92 static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
93
94 /*
95  * Continuously write to the first 8 bytes of a random pages within
96  * the testing memory region.
97  */
98 static void guest_code(void)
99 {
100         uint64_t addr;
101         int i;
102
103         /*
104          * On s390x, all pages of a 1M segment are initially marked as dirty
105          * when a page of the segment is written to for the very first time.
106          * To compensate this specialty in this test, we need to touch all
107          * pages during the first iteration.
108          */
109         for (i = 0; i < guest_num_pages; i++) {
110                 addr = guest_test_virt_mem + i * guest_page_size;
111                 *(uint64_t *)addr = READ_ONCE(iteration);
112         }
113
114         while (true) {
115                 for (i = 0; i < TEST_PAGES_PER_LOOP; i++) {
116                         addr = guest_test_virt_mem;
117                         addr += (READ_ONCE(random_array[i]) % guest_num_pages)
118                                 * guest_page_size;
119                         addr &= ~(host_page_size - 1);
120                         *(uint64_t *)addr = READ_ONCE(iteration);
121                 }
122
123                 /* Tell the host that we need more random numbers */
124                 GUEST_SYNC(1);
125         }
126 }
127
128 /* Host variables */
129 static bool host_quit;
130
131 /* Points to the test VM memory region on which we track dirty logs */
132 static void *host_test_mem;
133 static uint64_t host_num_pages;
134
135 /* For statistics only */
136 static uint64_t host_dirty_count;
137 static uint64_t host_clear_count;
138 static uint64_t host_track_next_count;
139
140 /* Whether dirty ring reset is requested, or finished */
141 static sem_t dirty_ring_vcpu_stop;
142 static sem_t dirty_ring_vcpu_cont;
143 /*
144  * This is updated by the vcpu thread to tell the host whether it's a
145  * ring-full event.  It should only be read until a sem_wait() of
146  * dirty_ring_vcpu_stop and before vcpu continues to run.
147  */
148 static bool dirty_ring_vcpu_ring_full;
149 /*
150  * This is only used for verifying the dirty pages.  Dirty ring has a very
151  * tricky case when the ring just got full, kvm will do userspace exit due to
152  * ring full.  When that happens, the very last PFN is set but actually the
153  * data is not changed (the guest WRITE is not really applied yet), because
154  * we found that the dirty ring is full, refused to continue the vcpu, and
155  * recorded the dirty gfn with the old contents.
156  *
157  * For this specific case, it's safe to skip checking this pfn for this
158  * bit, because it's a redundant bit, and when the write happens later the bit
159  * will be set again.  We use this variable to always keep track of the latest
160  * dirty gfn we've collected, so that if a mismatch of data found later in the
161  * verifying process, we let it pass.
162  */
163 static uint64_t dirty_ring_last_page;
164
165 enum log_mode_t {
166         /* Only use KVM_GET_DIRTY_LOG for logging */
167         LOG_MODE_DIRTY_LOG = 0,
168
169         /* Use both KVM_[GET|CLEAR]_DIRTY_LOG for logging */
170         LOG_MODE_CLEAR_LOG = 1,
171
172         /* Use dirty ring for logging */
173         LOG_MODE_DIRTY_RING = 2,
174
175         LOG_MODE_NUM,
176
177         /* Run all supported modes */
178         LOG_MODE_ALL = LOG_MODE_NUM,
179 };
180
181 /* Mode of logging to test.  Default is to run all supported modes */
182 static enum log_mode_t host_log_mode_option = LOG_MODE_ALL;
183 /* Logging mode for current run */
184 static enum log_mode_t host_log_mode;
185 static pthread_t vcpu_thread;
186 static uint32_t test_dirty_ring_count = TEST_DIRTY_RING_COUNT;
187
188 static void vcpu_kick(void)
189 {
190         pthread_kill(vcpu_thread, SIG_IPI);
191 }
192
193 /*
194  * In our test we do signal tricks, let's use a better version of
195  * sem_wait to avoid signal interrupts
196  */
197 static void sem_wait_until(sem_t *sem)
198 {
199         int ret;
200
201         do
202                 ret = sem_wait(sem);
203         while (ret == -1 && errno == EINTR);
204 }
205
206 static bool clear_log_supported(void)
207 {
208         return kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
209 }
210
211 static void clear_log_create_vm_done(struct kvm_vm *vm)
212 {
213         struct kvm_enable_cap cap = {};
214         u64 manual_caps;
215
216         manual_caps = kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
217         TEST_ASSERT(manual_caps, "MANUAL_CAPS is zero!");
218         manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
219                         KVM_DIRTY_LOG_INITIALLY_SET);
220         cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
221         cap.args[0] = manual_caps;
222         vm_enable_cap(vm, &cap);
223 }
224
225 static void dirty_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
226                                           void *bitmap, uint32_t num_pages)
227 {
228         kvm_vm_get_dirty_log(vm, slot, bitmap);
229 }
230
231 static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
232                                           void *bitmap, uint32_t num_pages)
233 {
234         kvm_vm_get_dirty_log(vm, slot, bitmap);
235         kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages);
236 }
237
238 static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
239 {
240         struct kvm_run *run = vcpu_state(vm, VCPU_ID);
241
242         TEST_ASSERT(ret == 0 || (ret == -1 && err == EINTR),
243                     "vcpu run failed: errno=%d", err);
244
245         TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,
246                     "Invalid guest sync status: exit_reason=%s\n",
247                     exit_reason_str(run->exit_reason));
248 }
249
250 static bool dirty_ring_supported(void)
251 {
252         return kvm_check_cap(KVM_CAP_DIRTY_LOG_RING);
253 }
254
255 static void dirty_ring_create_vm_done(struct kvm_vm *vm)
256 {
257         /*
258          * Switch to dirty ring mode after VM creation but before any
259          * of the vcpu creation.
260          */
261         vm_enable_dirty_ring(vm, test_dirty_ring_count *
262                              sizeof(struct kvm_dirty_gfn));
263 }
264
265 static inline bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
266 {
267         return gfn->flags == KVM_DIRTY_GFN_F_DIRTY;
268 }
269
270 static inline void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
271 {
272         gfn->flags = KVM_DIRTY_GFN_F_RESET;
273 }
274
275 static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
276                                        int slot, void *bitmap,
277                                        uint32_t num_pages, uint32_t *fetch_index)
278 {
279         struct kvm_dirty_gfn *cur;
280         uint32_t count = 0;
281
282         while (true) {
283                 cur = &dirty_gfns[*fetch_index % test_dirty_ring_count];
284                 if (!dirty_gfn_is_dirtied(cur))
285                         break;
286                 TEST_ASSERT(cur->slot == slot, "Slot number didn't match: "
287                             "%u != %u", cur->slot, slot);
288                 TEST_ASSERT(cur->offset < num_pages, "Offset overflow: "
289                             "0x%llx >= 0x%x", cur->offset, num_pages);
290                 //pr_info("fetch 0x%x page %llu\n", *fetch_index, cur->offset);
291                 set_bit_le(cur->offset, bitmap);
292                 dirty_ring_last_page = cur->offset;
293                 dirty_gfn_set_collected(cur);
294                 (*fetch_index)++;
295                 count++;
296         }
297
298         return count;
299 }
300
301 static void dirty_ring_wait_vcpu(void)
302 {
303         /* This makes sure that hardware PML cache flushed */
304         vcpu_kick();
305         sem_wait_until(&dirty_ring_vcpu_stop);
306 }
307
308 static void dirty_ring_continue_vcpu(void)
309 {
310         pr_info("Notifying vcpu to continue\n");
311         sem_post(&dirty_ring_vcpu_cont);
312 }
313
314 static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot,
315                                            void *bitmap, uint32_t num_pages)
316 {
317         /* We only have one vcpu */
318         static uint32_t fetch_index = 0;
319         uint32_t count = 0, cleared;
320         bool continued_vcpu = false;
321
322         dirty_ring_wait_vcpu();
323
324         if (!dirty_ring_vcpu_ring_full) {
325                 /*
326                  * This is not a ring-full event, it's safe to allow
327                  * vcpu to continue
328                  */
329                 dirty_ring_continue_vcpu();
330                 continued_vcpu = true;
331         }
332
333         /* Only have one vcpu */
334         count = dirty_ring_collect_one(vcpu_map_dirty_ring(vm, VCPU_ID),
335                                        slot, bitmap, num_pages, &fetch_index);
336
337         cleared = kvm_vm_reset_dirty_ring(vm);
338
339         /* Cleared pages should be the same as collected */
340         TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
341                     "with collected (%u)", cleared, count);
342
343         if (!continued_vcpu) {
344                 TEST_ASSERT(dirty_ring_vcpu_ring_full,
345                             "Didn't continue vcpu even without ring full");
346                 dirty_ring_continue_vcpu();
347         }
348
349         pr_info("Iteration %ld collected %u pages\n", iteration, count);
350 }
351
352 static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
353 {
354         struct kvm_run *run = vcpu_state(vm, VCPU_ID);
355
356         /* A ucall-sync or ring-full event is allowed */
357         if (get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC) {
358                 /* We should allow this to continue */
359                 ;
360         } else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL ||
361                    (ret == -1 && err == EINTR)) {
362                 /* Update the flag first before pause */
363                 WRITE_ONCE(dirty_ring_vcpu_ring_full,
364                            run->exit_reason == KVM_EXIT_DIRTY_RING_FULL);
365                 sem_post(&dirty_ring_vcpu_stop);
366                 pr_info("vcpu stops because %s...\n",
367                         dirty_ring_vcpu_ring_full ?
368                         "dirty ring is full" : "vcpu is kicked out");
369                 sem_wait_until(&dirty_ring_vcpu_cont);
370                 pr_info("vcpu continues now.\n");
371         } else {
372                 TEST_ASSERT(false, "Invalid guest sync status: "
373                             "exit_reason=%s\n",
374                             exit_reason_str(run->exit_reason));
375         }
376 }
377
378 static void dirty_ring_before_vcpu_join(void)
379 {
380         /* Kick another round of vcpu just to make sure it will quit */
381         sem_post(&dirty_ring_vcpu_cont);
382 }
383
384 struct log_mode {
385         const char *name;
386         /* Return true if this mode is supported, otherwise false */
387         bool (*supported)(void);
388         /* Hook when the vm creation is done (before vcpu creation) */
389         void (*create_vm_done)(struct kvm_vm *vm);
390         /* Hook to collect the dirty pages into the bitmap provided */
391         void (*collect_dirty_pages) (struct kvm_vm *vm, int slot,
392                                      void *bitmap, uint32_t num_pages);
393         /* Hook to call when after each vcpu run */
394         void (*after_vcpu_run)(struct kvm_vm *vm, int ret, int err);
395         void (*before_vcpu_join) (void);
396 } log_modes[LOG_MODE_NUM] = {
397         {
398                 .name = "dirty-log",
399                 .collect_dirty_pages = dirty_log_collect_dirty_pages,
400                 .after_vcpu_run = default_after_vcpu_run,
401         },
402         {
403                 .name = "clear-log",
404                 .supported = clear_log_supported,
405                 .create_vm_done = clear_log_create_vm_done,
406                 .collect_dirty_pages = clear_log_collect_dirty_pages,
407                 .after_vcpu_run = default_after_vcpu_run,
408         },
409         {
410                 .name = "dirty-ring",
411                 .supported = dirty_ring_supported,
412                 .create_vm_done = dirty_ring_create_vm_done,
413                 .collect_dirty_pages = dirty_ring_collect_dirty_pages,
414                 .before_vcpu_join = dirty_ring_before_vcpu_join,
415                 .after_vcpu_run = dirty_ring_after_vcpu_run,
416         },
417 };
418
419 /*
420  * We use this bitmap to track some pages that should have its dirty
421  * bit set in the _next_ iteration.  For example, if we detected the
422  * page value changed to current iteration but at the same time the
423  * page bit is cleared in the latest bitmap, then the system must
424  * report that write in the next get dirty log call.
425  */
426 static unsigned long *host_bmap_track;
427
428 static void log_modes_dump(void)
429 {
430         int i;
431
432         printf("all");
433         for (i = 0; i < LOG_MODE_NUM; i++)
434                 printf(", %s", log_modes[i].name);
435         printf("\n");
436 }
437
438 static bool log_mode_supported(void)
439 {
440         struct log_mode *mode = &log_modes[host_log_mode];
441
442         if (mode->supported)
443                 return mode->supported();
444
445         return true;
446 }
447
448 static void log_mode_create_vm_done(struct kvm_vm *vm)
449 {
450         struct log_mode *mode = &log_modes[host_log_mode];
451
452         if (mode->create_vm_done)
453                 mode->create_vm_done(vm);
454 }
455
456 static void log_mode_collect_dirty_pages(struct kvm_vm *vm, int slot,
457                                          void *bitmap, uint32_t num_pages)
458 {
459         struct log_mode *mode = &log_modes[host_log_mode];
460
461         TEST_ASSERT(mode->collect_dirty_pages != NULL,
462                     "collect_dirty_pages() is required for any log mode!");
463         mode->collect_dirty_pages(vm, slot, bitmap, num_pages);
464 }
465
466 static void log_mode_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
467 {
468         struct log_mode *mode = &log_modes[host_log_mode];
469
470         if (mode->after_vcpu_run)
471                 mode->after_vcpu_run(vm, ret, err);
472 }
473
474 static void log_mode_before_vcpu_join(void)
475 {
476         struct log_mode *mode = &log_modes[host_log_mode];
477
478         if (mode->before_vcpu_join)
479                 mode->before_vcpu_join();
480 }
481
482 static void generate_random_array(uint64_t *guest_array, uint64_t size)
483 {
484         uint64_t i;
485
486         for (i = 0; i < size; i++)
487                 guest_array[i] = random();
488 }
489
490 static void *vcpu_worker(void *data)
491 {
492         int ret, vcpu_fd;
493         struct kvm_vm *vm = data;
494         uint64_t *guest_array;
495         uint64_t pages_count = 0;
496         struct kvm_signal_mask *sigmask = alloca(offsetof(struct kvm_signal_mask, sigset)
497                                                  + sizeof(sigset_t));
498         sigset_t *sigset = (sigset_t *) &sigmask->sigset;
499
500         vcpu_fd = vcpu_get_fd(vm, VCPU_ID);
501
502         /*
503          * SIG_IPI is unblocked atomically while in KVM_RUN.  It causes the
504          * ioctl to return with -EINTR, but it is still pending and we need
505          * to accept it with the sigwait.
506          */
507         sigmask->len = 8;
508         pthread_sigmask(0, NULL, sigset);
509         vcpu_ioctl(vm, VCPU_ID, KVM_SET_SIGNAL_MASK, sigmask);
510         sigaddset(sigset, SIG_IPI);
511         pthread_sigmask(SIG_BLOCK, sigset, NULL);
512
513         sigemptyset(sigset);
514         sigaddset(sigset, SIG_IPI);
515
516         guest_array = addr_gva2hva(vm, (vm_vaddr_t)random_array);
517
518         while (!READ_ONCE(host_quit)) {
519                 /* Clear any existing kick signals */
520                 generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
521                 pages_count += TEST_PAGES_PER_LOOP;
522                 /* Let the guest dirty the random pages */
523                 ret = ioctl(vcpu_fd, KVM_RUN, NULL);
524                 if (ret == -1 && errno == EINTR) {
525                         int sig = -1;
526                         sigwait(sigset, &sig);
527                         assert(sig == SIG_IPI);
528                 }
529                 log_mode_after_vcpu_run(vm, ret, errno);
530         }
531
532         pr_info("Dirtied %"PRIu64" pages\n", pages_count);
533
534         return NULL;
535 }
536
537 static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
538 {
539         uint64_t step = vm_num_host_pages(mode, 1);
540         uint64_t page;
541         uint64_t *value_ptr;
542         uint64_t min_iter = 0;
543
544         for (page = 0; page < host_num_pages; page += step) {
545                 value_ptr = host_test_mem + page * host_page_size;
546
547                 /* If this is a special page that we were tracking... */
548                 if (test_and_clear_bit_le(page, host_bmap_track)) {
549                         host_track_next_count++;
550                         TEST_ASSERT(test_bit_le(page, bmap),
551                                     "Page %"PRIu64" should have its dirty bit "
552                                     "set in this iteration but it is missing",
553                                     page);
554                 }
555
556                 if (test_and_clear_bit_le(page, bmap)) {
557                         bool matched;
558
559                         host_dirty_count++;
560
561                         /*
562                          * If the bit is set, the value written onto
563                          * the corresponding page should be either the
564                          * previous iteration number or the current one.
565                          */
566                         matched = (*value_ptr == iteration ||
567                                    *value_ptr == iteration - 1);
568
569                         if (host_log_mode == LOG_MODE_DIRTY_RING && !matched) {
570                                 if (*value_ptr == iteration - 2 && min_iter <= iteration - 2) {
571                                         /*
572                                          * Short answer: this case is special
573                                          * only for dirty ring test where the
574                                          * page is the last page before a kvm
575                                          * dirty ring full in iteration N-2.
576                                          *
577                                          * Long answer: Assuming ring size R,
578                                          * one possible condition is:
579                                          *
580                                          *      main thr       vcpu thr
581                                          *      --------       --------
582                                          *    iter=1
583                                          *                   write 1 to page 0~(R-1)
584                                          *                   full, vmexit
585                                          *    collect 0~(R-1)
586                                          *    kick vcpu
587                                          *                   write 1 to (R-1)~(2R-2)
588                                          *                   full, vmexit
589                                          *    iter=2
590                                          *    collect (R-1)~(2R-2)
591                                          *    kick vcpu
592                                          *                   write 1 to (2R-2)
593                                          *                   (NOTE!!! "1" cached in cpu reg)
594                                          *                   write 2 to (2R-1)~(3R-3)
595                                          *                   full, vmexit
596                                          *    iter=3
597                                          *    collect (2R-2)~(3R-3)
598                                          *    (here if we read value on page
599                                          *     "2R-2" is 1, while iter=3!!!)
600                                          *
601                                          * This however can only happen once per iteration.
602                                          */
603                                         min_iter = iteration - 1;
604                                         continue;
605                                 } else if (page == dirty_ring_last_page) {
606                                         /*
607                                          * Please refer to comments in
608                                          * dirty_ring_last_page.
609                                          */
610                                         continue;
611                                 }
612                         }
613
614                         TEST_ASSERT(matched,
615                                     "Set page %"PRIu64" value %"PRIu64
616                                     " incorrect (iteration=%"PRIu64")",
617                                     page, *value_ptr, iteration);
618                 } else {
619                         host_clear_count++;
620                         /*
621                          * If cleared, the value written can be any
622                          * value smaller or equals to the iteration
623                          * number.  Note that the value can be exactly
624                          * (iteration-1) if that write can happen
625                          * like this:
626                          *
627                          * (1) increase loop count to "iteration-1"
628                          * (2) write to page P happens (with value
629                          *     "iteration-1")
630                          * (3) get dirty log for "iteration-1"; we'll
631                          *     see that page P bit is set (dirtied),
632                          *     and not set the bit in host_bmap_track
633                          * (4) increase loop count to "iteration"
634                          *     (which is current iteration)
635                          * (5) get dirty log for current iteration,
636                          *     we'll see that page P is cleared, with
637                          *     value "iteration-1".
638                          */
639                         TEST_ASSERT(*value_ptr <= iteration,
640                                     "Clear page %"PRIu64" value %"PRIu64
641                                     " incorrect (iteration=%"PRIu64")",
642                                     page, *value_ptr, iteration);
643                         if (*value_ptr == iteration) {
644                                 /*
645                                  * This page is _just_ modified; it
646                                  * should report its dirtyness in the
647                                  * next run
648                                  */
649                                 set_bit_le(page, host_bmap_track);
650                         }
651                 }
652         }
653 }
654
655 static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
656                                 uint64_t extra_mem_pages, void *guest_code)
657 {
658         struct kvm_vm *vm;
659         uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
660
661         pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
662
663         vm = vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
664         kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
665 #ifdef __x86_64__
666         vm_create_irqchip(vm);
667 #endif
668         log_mode_create_vm_done(vm);
669         vm_vcpu_add_default(vm, vcpuid, guest_code);
670         return vm;
671 }
672
673 #define DIRTY_MEM_BITS 30 /* 1G */
674 #define PAGE_SHIFT_4K  12
675
676 static void run_test(enum vm_guest_mode mode, unsigned long iterations,
677                      unsigned long interval, uint64_t phys_offset)
678 {
679         struct kvm_vm *vm;
680         unsigned long *bmap;
681
682         if (!log_mode_supported()) {
683                 print_skip("Log mode '%s' not supported",
684                            log_modes[host_log_mode].name);
685                 return;
686         }
687
688         /*
689          * We reserve page table for 2 times of extra dirty mem which
690          * will definitely cover the original (1G+) test range.  Here
691          * we do the calculation with 4K page size which is the
692          * smallest so the page number will be enough for all archs
693          * (e.g., 64K page size guest will need even less memory for
694          * page tables).
695          */
696         vm = create_vm(mode, VCPU_ID,
697                        2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K),
698                        guest_code);
699
700         guest_page_size = vm_get_page_size(vm);
701         /*
702          * A little more than 1G of guest page sized pages.  Cover the
703          * case where the size is not aligned to 64 pages.
704          */
705         guest_num_pages = (1ul << (DIRTY_MEM_BITS -
706                                    vm_get_page_shift(vm))) + 3;
707         guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
708
709         host_page_size = getpagesize();
710         host_num_pages = vm_num_host_pages(mode, guest_num_pages);
711
712         if (!phys_offset) {
713                 guest_test_phys_mem = (vm_get_max_gfn(vm) -
714                                        guest_num_pages) * guest_page_size;
715                 guest_test_phys_mem &= ~(host_page_size - 1);
716         } else {
717                 guest_test_phys_mem = phys_offset;
718         }
719
720 #ifdef __s390x__
721         /* Align to 1M (segment size) */
722         guest_test_phys_mem &= ~((1 << 20) - 1);
723 #endif
724
725         pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
726
727         bmap = bitmap_alloc(host_num_pages);
728         host_bmap_track = bitmap_alloc(host_num_pages);
729
730         /* Add an extra memory slot for testing dirty logging */
731         vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
732                                     guest_test_phys_mem,
733                                     TEST_MEM_SLOT_INDEX,
734                                     guest_num_pages,
735                                     KVM_MEM_LOG_DIRTY_PAGES);
736
737         /* Do mapping for the dirty track memory slot */
738         virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
739
740         /* Cache the HVA pointer of the region */
741         host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
742
743 #ifdef __x86_64__
744         vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
745 #endif
746         ucall_init(vm, NULL);
747
748         /* Export the shared variables to the guest */
749         sync_global_to_guest(vm, host_page_size);
750         sync_global_to_guest(vm, guest_page_size);
751         sync_global_to_guest(vm, guest_test_virt_mem);
752         sync_global_to_guest(vm, guest_num_pages);
753
754         /* Start the iterations */
755         iteration = 1;
756         sync_global_to_guest(vm, iteration);
757         host_quit = false;
758         host_dirty_count = 0;
759         host_clear_count = 0;
760         host_track_next_count = 0;
761
762         pthread_create(&vcpu_thread, NULL, vcpu_worker, vm);
763
764         while (iteration < iterations) {
765                 /* Give the vcpu thread some time to dirty some pages */
766                 usleep(interval * 1000);
767                 log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX,
768                                              bmap, host_num_pages);
769                 vm_dirty_log_verify(mode, bmap);
770                 iteration++;
771                 sync_global_to_guest(vm, iteration);
772         }
773
774         /* Tell the vcpu thread to quit */
775         host_quit = true;
776         log_mode_before_vcpu_join();
777         pthread_join(vcpu_thread, NULL);
778
779         pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
780                 "track_next (%"PRIu64")\n", host_dirty_count, host_clear_count,
781                 host_track_next_count);
782
783         free(bmap);
784         free(host_bmap_track);
785         ucall_uninit(vm);
786         kvm_vm_free(vm);
787 }
788
789 struct guest_mode {
790         bool supported;
791         bool enabled;
792 };
793 static struct guest_mode guest_modes[NUM_VM_MODES];
794
795 #define guest_mode_init(mode, supported, enabled) ({ \
796         guest_modes[mode] = (struct guest_mode){ supported, enabled }; \
797 })
798
799 static void help(char *name)
800 {
801         int i;
802
803         puts("");
804         printf("usage: %s [-h] [-i iterations] [-I interval] "
805                "[-p offset] [-m mode]\n", name);
806         puts("");
807         printf(" -c: specify dirty ring size, in number of entries\n");
808         printf("     (only useful for dirty-ring test; default: %"PRIu32")\n",
809                TEST_DIRTY_RING_COUNT);
810         printf(" -i: specify iteration counts (default: %"PRIu64")\n",
811                TEST_HOST_LOOP_N);
812         printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n",
813                TEST_HOST_LOOP_INTERVAL);
814         printf(" -p: specify guest physical test memory offset\n"
815                "     Warning: a low offset can conflict with the loaded test code.\n");
816         printf(" -M: specify the host logging mode "
817                "(default: run all log modes).  Supported modes: \n\t");
818         log_modes_dump();
819         printf(" -m: specify the guest mode ID to test "
820                "(default: test all supported modes)\n"
821                "     This option may be used multiple times.\n"
822                "     Guest mode IDs:\n");
823         for (i = 0; i < NUM_VM_MODES; ++i) {
824                 printf("         %d:    %s%s\n", i, vm_guest_mode_string(i),
825                        guest_modes[i].supported ? " (supported)" : "");
826         }
827         puts("");
828         exit(0);
829 }
830
831 int main(int argc, char *argv[])
832 {
833         unsigned long iterations = TEST_HOST_LOOP_N;
834         unsigned long interval = TEST_HOST_LOOP_INTERVAL;
835         bool mode_selected = false;
836         uint64_t phys_offset = 0;
837         unsigned int mode;
838         int opt, i, j;
839
840         sem_init(&dirty_ring_vcpu_stop, 0, 0);
841         sem_init(&dirty_ring_vcpu_cont, 0, 0);
842
843 #ifdef __x86_64__
844         guest_mode_init(VM_MODE_PXXV48_4K, true, true);
845 #endif
846 #ifdef __aarch64__
847         guest_mode_init(VM_MODE_P40V48_4K, true, true);
848         guest_mode_init(VM_MODE_P40V48_64K, true, true);
849
850         {
851                 unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
852
853                 if (limit >= 52)
854                         guest_mode_init(VM_MODE_P52V48_64K, true, true);
855                 if (limit >= 48) {
856                         guest_mode_init(VM_MODE_P48V48_4K, true, true);
857                         guest_mode_init(VM_MODE_P48V48_64K, true, true);
858                 }
859         }
860 #endif
861 #ifdef __s390x__
862         guest_mode_init(VM_MODE_P40V48_4K, true, true);
863 #endif
864
865         while ((opt = getopt(argc, argv, "c:hi:I:p:m:M:")) != -1) {
866                 switch (opt) {
867                 case 'c':
868                         test_dirty_ring_count = strtol(optarg, NULL, 10);
869                         break;
870                 case 'i':
871                         iterations = strtol(optarg, NULL, 10);
872                         break;
873                 case 'I':
874                         interval = strtol(optarg, NULL, 10);
875                         break;
876                 case 'p':
877                         phys_offset = strtoull(optarg, NULL, 0);
878                         break;
879                 case 'm':
880                         if (!mode_selected) {
881                                 for (i = 0; i < NUM_VM_MODES; ++i)
882                                         guest_modes[i].enabled = false;
883                                 mode_selected = true;
884                         }
885                         mode = strtoul(optarg, NULL, 10);
886                         TEST_ASSERT(mode < NUM_VM_MODES,
887                                     "Guest mode ID %d too big", mode);
888                         guest_modes[mode].enabled = true;
889                         break;
890                 case 'M':
891                         if (!strcmp(optarg, "all")) {
892                                 host_log_mode_option = LOG_MODE_ALL;
893                                 break;
894                         }
895                         for (i = 0; i < LOG_MODE_NUM; i++) {
896                                 if (!strcmp(optarg, log_modes[i].name)) {
897                                         pr_info("Setting log mode to: '%s'\n",
898                                                 optarg);
899                                         host_log_mode_option = i;
900                                         break;
901                                 }
902                         }
903                         if (i == LOG_MODE_NUM) {
904                                 printf("Log mode '%s' invalid. Please choose "
905                                        "from: ", optarg);
906                                 log_modes_dump();
907                                 exit(1);
908                         }
909                         break;
910                 case 'h':
911                 default:
912                         help(argv[0]);
913                         break;
914                 }
915         }
916
917         TEST_ASSERT(iterations > 2, "Iterations must be greater than two");
918         TEST_ASSERT(interval > 0, "Interval must be greater than zero");
919
920         pr_info("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
921                 iterations, interval);
922
923         srandom(time(0));
924
925         for (i = 0; i < NUM_VM_MODES; ++i) {
926                 if (!guest_modes[i].enabled)
927                         continue;
928                 TEST_ASSERT(guest_modes[i].supported,
929                             "Guest mode ID %d (%s) not supported.",
930                             i, vm_guest_mode_string(i));
931                 if (host_log_mode_option == LOG_MODE_ALL) {
932                         /* Run each log mode */
933                         for (j = 0; j < LOG_MODE_NUM; j++) {
934                                 pr_info("Testing Log Mode '%s'\n",
935                                         log_modes[j].name);
936                                 host_log_mode = j;
937                                 run_test(i, iterations, interval, phys_offset);
938                         }
939                 } else {
940                         host_log_mode = host_log_mode_option;
941                         run_test(i, iterations, interval, phys_offset);
942                 }
943         }
944
945         return 0;
946 }