Merge tag 'pstore-v6.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees...
[linux-2.6-microblaze.git] / tools / testing / selftests / kvm / aarch64 / vpmu_counter_access.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * vpmu_counter_access - Test vPMU event counter access
4  *
5  * Copyright (c) 2023 Google LLC.
6  *
7  * This test checks if the guest can see the same number of the PMU event
8  * counters (PMCR_EL0.N) that userspace sets, if the guest can access
9  * those counters, and if the guest is prevented from accessing any
10  * other counters.
11  * It also checks if the userspace accesses to the PMU regsisters honor the
12  * PMCR.N value that's set for the guest.
13  * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host.
14  */
15 #include <kvm_util.h>
16 #include <processor.h>
17 #include <test_util.h>
18 #include <vgic.h>
19 #include <perf/arm_pmuv3.h>
20 #include <linux/bitfield.h>
21
22 /* The max number of the PMU event counters (excluding the cycle counter) */
23 #define ARMV8_PMU_MAX_GENERAL_COUNTERS  (ARMV8_PMU_MAX_COUNTERS - 1)
24
25 /* The cycle counter bit position that's common among the PMU registers */
26 #define ARMV8_PMU_CYCLE_IDX             31
27
28 struct vpmu_vm {
29         struct kvm_vm *vm;
30         struct kvm_vcpu *vcpu;
31         int gic_fd;
32 };
33
34 static struct vpmu_vm vpmu_vm;
35
36 struct pmreg_sets {
37         uint64_t set_reg_id;
38         uint64_t clr_reg_id;
39 };
40
41 #define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr}
42
43 static uint64_t get_pmcr_n(uint64_t pmcr)
44 {
45         return FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
46 }
47
48 static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
49 {
50         u64p_replace_bits((__u64 *) pmcr, pmcr_n, ARMV8_PMU_PMCR_N);
51 }
52
53 static uint64_t get_counters_mask(uint64_t n)
54 {
55         uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
56
57         if (n)
58                 mask |= GENMASK(n - 1, 0);
59         return mask;
60 }
61
62 /* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
63 static inline unsigned long read_sel_evcntr(int sel)
64 {
65         write_sysreg(sel, pmselr_el0);
66         isb();
67         return read_sysreg(pmxevcntr_el0);
68 }
69
70 /* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
71 static inline void write_sel_evcntr(int sel, unsigned long val)
72 {
73         write_sysreg(sel, pmselr_el0);
74         isb();
75         write_sysreg(val, pmxevcntr_el0);
76         isb();
77 }
78
79 /* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
80 static inline unsigned long read_sel_evtyper(int sel)
81 {
82         write_sysreg(sel, pmselr_el0);
83         isb();
84         return read_sysreg(pmxevtyper_el0);
85 }
86
87 /* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
88 static inline void write_sel_evtyper(int sel, unsigned long val)
89 {
90         write_sysreg(sel, pmselr_el0);
91         isb();
92         write_sysreg(val, pmxevtyper_el0);
93         isb();
94 }
95
96 static inline void enable_counter(int idx)
97 {
98         uint64_t v = read_sysreg(pmcntenset_el0);
99
100         write_sysreg(BIT(idx) | v, pmcntenset_el0);
101         isb();
102 }
103
104 static inline void disable_counter(int idx)
105 {
106         uint64_t v = read_sysreg(pmcntenset_el0);
107
108         write_sysreg(BIT(idx) | v, pmcntenclr_el0);
109         isb();
110 }
111
112 static void pmu_disable_reset(void)
113 {
114         uint64_t pmcr = read_sysreg(pmcr_el0);
115
116         /* Reset all counters, disabling them */
117         pmcr &= ~ARMV8_PMU_PMCR_E;
118         write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
119         isb();
120 }
121
122 #define RETURN_READ_PMEVCNTRN(n) \
123         return read_sysreg(pmevcntr##n##_el0)
124 static unsigned long read_pmevcntrn(int n)
125 {
126         PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
127         return 0;
128 }
129
130 #define WRITE_PMEVCNTRN(n) \
131         write_sysreg(val, pmevcntr##n##_el0)
132 static void write_pmevcntrn(int n, unsigned long val)
133 {
134         PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
135         isb();
136 }
137
138 #define READ_PMEVTYPERN(n) \
139         return read_sysreg(pmevtyper##n##_el0)
140 static unsigned long read_pmevtypern(int n)
141 {
142         PMEVN_SWITCH(n, READ_PMEVTYPERN);
143         return 0;
144 }
145
146 #define WRITE_PMEVTYPERN(n) \
147         write_sysreg(val, pmevtyper##n##_el0)
148 static void write_pmevtypern(int n, unsigned long val)
149 {
150         PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
151         isb();
152 }
153
154 /*
155  * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0
156  * accessors that test cases will use. Each of the accessors will
157  * either directly reads/writes PMEV{CNTR,TYPER}<n>_EL0
158  * (i.e. {read,write}_pmev{cnt,type}rn()), or reads/writes them through
159  * PMXEV{CNTR,TYPER}_EL0 (i.e. {read,write}_sel_ev{cnt,type}r()).
160  *
161  * This is used to test that combinations of those accessors provide
162  * the consistent behavior.
163  */
164 struct pmc_accessor {
165         /* A function to be used to read PMEVTCNTR<n>_EL0 */
166         unsigned long   (*read_cntr)(int idx);
167         /* A function to be used to write PMEVTCNTR<n>_EL0 */
168         void            (*write_cntr)(int idx, unsigned long val);
169         /* A function to be used to read PMEVTYPER<n>_EL0 */
170         unsigned long   (*read_typer)(int idx);
171         /* A function to be used to write PMEVTYPER<n>_EL0 */
172         void            (*write_typer)(int idx, unsigned long val);
173 };
174
175 struct pmc_accessor pmc_accessors[] = {
176         /* test with all direct accesses */
177         { read_pmevcntrn, write_pmevcntrn, read_pmevtypern, write_pmevtypern },
178         /* test with all indirect accesses */
179         { read_sel_evcntr, write_sel_evcntr, read_sel_evtyper, write_sel_evtyper },
180         /* read with direct accesses, and write with indirect accesses */
181         { read_pmevcntrn, write_sel_evcntr, read_pmevtypern, write_sel_evtyper },
182         /* read with indirect accesses, and write with direct accesses */
183         { read_sel_evcntr, write_pmevcntrn, read_sel_evtyper, write_pmevtypern },
184 };
185
186 /*
187  * Convert a pointer of pmc_accessor to an index in pmc_accessors[],
188  * assuming that the pointer is one of the entries in pmc_accessors[].
189  */
190 #define PMC_ACC_TO_IDX(acc)     (acc - &pmc_accessors[0])
191
192 #define GUEST_ASSERT_BITMAP_REG(regname, mask, set_expected)                     \
193 {                                                                                \
194         uint64_t _tval = read_sysreg(regname);                                   \
195                                                                                  \
196         if (set_expected)                                                        \
197                 __GUEST_ASSERT((_tval & mask),                                   \
198                                 "tval: 0x%lx; mask: 0x%lx; set_expected: 0x%lx", \
199                                 _tval, mask, set_expected);                      \
200         else                                                                     \
201                 __GUEST_ASSERT(!(_tval & mask),                                  \
202                                 "tval: 0x%lx; mask: 0x%lx; set_expected: 0x%lx", \
203                                 _tval, mask, set_expected);                      \
204 }
205
206 /*
207  * Check if @mask bits in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers
208  * are set or cleared as specified in @set_expected.
209  */
210 static void check_bitmap_pmu_regs(uint64_t mask, bool set_expected)
211 {
212         GUEST_ASSERT_BITMAP_REG(pmcntenset_el0, mask, set_expected);
213         GUEST_ASSERT_BITMAP_REG(pmcntenclr_el0, mask, set_expected);
214         GUEST_ASSERT_BITMAP_REG(pmintenset_el1, mask, set_expected);
215         GUEST_ASSERT_BITMAP_REG(pmintenclr_el1, mask, set_expected);
216         GUEST_ASSERT_BITMAP_REG(pmovsset_el0, mask, set_expected);
217         GUEST_ASSERT_BITMAP_REG(pmovsclr_el0, mask, set_expected);
218 }
219
220 /*
221  * Check if the bit in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers corresponding
222  * to the specified counter (@pmc_idx) can be read/written as expected.
223  * When @set_op is true, it tries to set the bit for the counter in
224  * those registers by writing the SET registers (the bit won't be set
225  * if the counter is not implemented though).
226  * Otherwise, it tries to clear the bits in the registers by writing
227  * the CLR registers.
228  * Then, it checks if the values indicated in the registers are as expected.
229  */
230 static void test_bitmap_pmu_regs(int pmc_idx, bool set_op)
231 {
232         uint64_t pmcr_n, test_bit = BIT(pmc_idx);
233         bool set_expected = false;
234
235         if (set_op) {
236                 write_sysreg(test_bit, pmcntenset_el0);
237                 write_sysreg(test_bit, pmintenset_el1);
238                 write_sysreg(test_bit, pmovsset_el0);
239
240                 /* The bit will be set only if the counter is implemented */
241                 pmcr_n = get_pmcr_n(read_sysreg(pmcr_el0));
242                 set_expected = (pmc_idx < pmcr_n) ? true : false;
243         } else {
244                 write_sysreg(test_bit, pmcntenclr_el0);
245                 write_sysreg(test_bit, pmintenclr_el1);
246                 write_sysreg(test_bit, pmovsclr_el0);
247         }
248         check_bitmap_pmu_regs(test_bit, set_expected);
249 }
250
251 /*
252  * Tests for reading/writing registers for the (implemented) event counter
253  * specified by @pmc_idx.
254  */
255 static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
256 {
257         uint64_t write_data, read_data;
258
259         /* Disable all PMCs and reset all PMCs to zero. */
260         pmu_disable_reset();
261
262         /*
263          * Tests for reading/writing {PMCNTEN,PMINTEN,PMOVS}{SET,CLR}_EL1.
264          */
265
266         /* Make sure that the bit in those registers are set to 0 */
267         test_bitmap_pmu_regs(pmc_idx, false);
268         /* Test if setting the bit in those registers works */
269         test_bitmap_pmu_regs(pmc_idx, true);
270         /* Test if clearing the bit in those registers works */
271         test_bitmap_pmu_regs(pmc_idx, false);
272
273         /*
274          * Tests for reading/writing the event type register.
275          */
276
277         /*
278          * Set the event type register to an arbitrary value just for testing
279          * of reading/writing the register.
280          * Arm ARM says that for the event from 0x0000 to 0x003F,
281          * the value indicated in the PMEVTYPER<n>_EL0.evtCount field is
282          * the value written to the field even when the specified event
283          * is not supported.
284          */
285         write_data = (ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMUV3_PERFCTR_INST_RETIRED);
286         acc->write_typer(pmc_idx, write_data);
287         read_data = acc->read_typer(pmc_idx);
288         __GUEST_ASSERT(read_data == write_data,
289                        "pmc_idx: 0x%lx; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx",
290                        pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data);
291
292         /*
293          * Tests for reading/writing the event count register.
294          */
295
296         read_data = acc->read_cntr(pmc_idx);
297
298         /* The count value must be 0, as it is disabled and reset */
299         __GUEST_ASSERT(read_data == 0,
300                        "pmc_idx: 0x%lx; acc_idx: 0x%lx; read_data: 0x%lx",
301                        pmc_idx, PMC_ACC_TO_IDX(acc), read_data);
302
303         write_data = read_data + pmc_idx + 0x12345;
304         acc->write_cntr(pmc_idx, write_data);
305         read_data = acc->read_cntr(pmc_idx);
306         __GUEST_ASSERT(read_data == write_data,
307                        "pmc_idx: 0x%lx; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx",
308                        pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data);
309 }
310
311 #define INVALID_EC      (-1ul)
312 uint64_t expected_ec = INVALID_EC;
313
314 static void guest_sync_handler(struct ex_regs *regs)
315 {
316         uint64_t esr, ec;
317
318         esr = read_sysreg(esr_el1);
319         ec = (esr >> ESR_EC_SHIFT) & ESR_EC_MASK;
320
321         __GUEST_ASSERT(expected_ec == ec,
322                         "PC: 0x%lx; ESR: 0x%lx; EC: 0x%lx; EC expected: 0x%lx",
323                         regs->pc, esr, ec, expected_ec);
324
325         /* skip the trapping instruction */
326         regs->pc += 4;
327
328         /* Use INVALID_EC to indicate an exception occurred */
329         expected_ec = INVALID_EC;
330 }
331
332 /*
333  * Run the given operation that should trigger an exception with the
334  * given exception class. The exception handler (guest_sync_handler)
335  * will reset op_end_addr to 0, expected_ec to INVALID_EC, and skip
336  * the instruction that trapped.
337  */
338 #define TEST_EXCEPTION(ec, ops)                         \
339 ({                                                      \
340         GUEST_ASSERT(ec != INVALID_EC);                 \
341         WRITE_ONCE(expected_ec, ec);                    \
342         dsb(ish);                                       \
343         ops;                                            \
344         GUEST_ASSERT(expected_ec == INVALID_EC);        \
345 })
346
347 /*
348  * Tests for reading/writing registers for the unimplemented event counter
349  * specified by @pmc_idx (>= PMCR_EL0.N).
350  */
351 static void test_access_invalid_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
352 {
353         /*
354          * Reading/writing the event count/type registers should cause
355          * an UNDEFINED exception.
356          */
357         TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->read_cntr(pmc_idx));
358         TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->write_cntr(pmc_idx, 0));
359         TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->read_typer(pmc_idx));
360         TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->write_typer(pmc_idx, 0));
361         /*
362          * The bit corresponding to the (unimplemented) counter in
363          * {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers should be RAZ.
364          */
365         test_bitmap_pmu_regs(pmc_idx, 1);
366         test_bitmap_pmu_regs(pmc_idx, 0);
367 }
368
369 /*
370  * The guest is configured with PMUv3 with @expected_pmcr_n number of
371  * event counters.
372  * Check if @expected_pmcr_n is consistent with PMCR_EL0.N, and
373  * if reading/writing PMU registers for implemented or unimplemented
374  * counters works as expected.
375  */
376 static void guest_code(uint64_t expected_pmcr_n)
377 {
378         uint64_t pmcr, pmcr_n, unimp_mask;
379         int i, pmc;
380
381         __GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS,
382                         "Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%lx",
383                         expected_pmcr_n, ARMV8_PMU_MAX_GENERAL_COUNTERS);
384
385         pmcr = read_sysreg(pmcr_el0);
386         pmcr_n = get_pmcr_n(pmcr);
387
388         /* Make sure that PMCR_EL0.N indicates the value userspace set */
389         __GUEST_ASSERT(pmcr_n == expected_pmcr_n,
390                         "Expected PMCR.N: 0x%lx, PMCR.N: 0x%lx",
391                         expected_pmcr_n, pmcr_n);
392
393         /*
394          * Make sure that (RAZ) bits corresponding to unimplemented event
395          * counters in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers are reset
396          * to zero.
397          * (NOTE: bits for implemented event counters are reset to UNKNOWN)
398          */
399         unimp_mask = GENMASK_ULL(ARMV8_PMU_MAX_GENERAL_COUNTERS - 1, pmcr_n);
400         check_bitmap_pmu_regs(unimp_mask, false);
401
402         /*
403          * Tests for reading/writing PMU registers for implemented counters.
404          * Use each combination of PMEV{CNTR,TYPER}<n>_EL0 accessor functions.
405          */
406         for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) {
407                 for (pmc = 0; pmc < pmcr_n; pmc++)
408                         test_access_pmc_regs(&pmc_accessors[i], pmc);
409         }
410
411         /*
412          * Tests for reading/writing PMU registers for unimplemented counters.
413          * Use each combination of PMEV{CNTR,TYPER}<n>_EL0 accessor functions.
414          */
415         for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) {
416                 for (pmc = pmcr_n; pmc < ARMV8_PMU_MAX_GENERAL_COUNTERS; pmc++)
417                         test_access_invalid_pmc_regs(&pmc_accessors[i], pmc);
418         }
419
420         GUEST_DONE();
421 }
422
423 #define GICD_BASE_GPA   0x8000000ULL
424 #define GICR_BASE_GPA   0x80A0000ULL
425
426 /* Create a VM that has one vCPU with PMUv3 configured. */
427 static void create_vpmu_vm(void *guest_code)
428 {
429         struct kvm_vcpu_init init;
430         uint8_t pmuver, ec;
431         uint64_t dfr0, irq = 23;
432         struct kvm_device_attr irq_attr = {
433                 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
434                 .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
435                 .addr = (uint64_t)&irq,
436         };
437         struct kvm_device_attr init_attr = {
438                 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
439                 .attr = KVM_ARM_VCPU_PMU_V3_INIT,
440         };
441
442         /* The test creates the vpmu_vm multiple times. Ensure a clean state */
443         memset(&vpmu_vm, 0, sizeof(vpmu_vm));
444
445         vpmu_vm.vm = vm_create(1);
446         vm_init_descriptor_tables(vpmu_vm.vm);
447         for (ec = 0; ec < ESR_EC_NUM; ec++) {
448                 vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec,
449                                         guest_sync_handler);
450         }
451
452         /* Create vCPU with PMUv3 */
453         vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
454         init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
455         vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code);
456         vcpu_init_descriptor_tables(vpmu_vm.vcpu);
457         vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64,
458                                         GICD_BASE_GPA, GICR_BASE_GPA);
459         __TEST_REQUIRE(vpmu_vm.gic_fd >= 0,
460                        "Failed to create vgic-v3, skipping");
461
462         /* Make sure that PMUv3 support is indicated in the ID register */
463         vcpu_get_reg(vpmu_vm.vcpu,
464                      KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0);
465         pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0);
466         TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF &&
467                     pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP,
468                     "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
469
470         /* Initialize vPMU */
471         vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
472         vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
473 }
474
475 static void destroy_vpmu_vm(void)
476 {
477         close(vpmu_vm.gic_fd);
478         kvm_vm_free(vpmu_vm.vm);
479 }
480
481 static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n)
482 {
483         struct ucall uc;
484
485         vcpu_args_set(vcpu, 1, pmcr_n);
486         vcpu_run(vcpu);
487         switch (get_ucall(vcpu, &uc)) {
488         case UCALL_ABORT:
489                 REPORT_GUEST_ASSERT(uc);
490                 break;
491         case UCALL_DONE:
492                 break;
493         default:
494                 TEST_FAIL("Unknown ucall %lu", uc.cmd);
495                 break;
496         }
497 }
498
499 static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail)
500 {
501         struct kvm_vcpu *vcpu;
502         uint64_t pmcr, pmcr_orig;
503
504         create_vpmu_vm(guest_code);
505         vcpu = vpmu_vm.vcpu;
506
507         vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr_orig);
508         pmcr = pmcr_orig;
509
510         /*
511          * Setting a larger value of PMCR.N should not modify the field, and
512          * return a success.
513          */
514         set_pmcr_n(&pmcr, pmcr_n);
515         vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr);
516         vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
517
518         if (expect_fail)
519                 TEST_ASSERT(pmcr_orig == pmcr,
520                             "PMCR.N modified by KVM to a larger value (PMCR: 0x%lx) for pmcr_n: 0x%lx\n",
521                             pmcr, pmcr_n);
522         else
523                 TEST_ASSERT(pmcr_n == get_pmcr_n(pmcr),
524                             "Failed to update PMCR.N to %lu (received: %lu)\n",
525                             pmcr_n, get_pmcr_n(pmcr));
526 }
527
528 /*
529  * Create a guest with one vCPU, set the PMCR_EL0.N for the vCPU to @pmcr_n,
530  * and run the test.
531  */
532 static void run_access_test(uint64_t pmcr_n)
533 {
534         uint64_t sp;
535         struct kvm_vcpu *vcpu;
536         struct kvm_vcpu_init init;
537
538         pr_debug("Test with pmcr_n %lu\n", pmcr_n);
539
540         test_create_vpmu_vm_with_pmcr_n(pmcr_n, false);
541         vcpu = vpmu_vm.vcpu;
542
543         /* Save the initial sp to restore them later to run the guest again */
544         vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1), &sp);
545
546         run_vcpu(vcpu, pmcr_n);
547
548         /*
549          * Reset and re-initialize the vCPU, and run the guest code again to
550          * check if PMCR_EL0.N is preserved.
551          */
552         vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
553         init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
554         aarch64_vcpu_setup(vcpu, &init);
555         vcpu_init_descriptor_tables(vcpu);
556         vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), sp);
557         vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
558
559         run_vcpu(vcpu, pmcr_n);
560
561         destroy_vpmu_vm();
562 }
563
564 static struct pmreg_sets validity_check_reg_sets[] = {
565         PMREG_SET(SYS_PMCNTENSET_EL0, SYS_PMCNTENCLR_EL0),
566         PMREG_SET(SYS_PMINTENSET_EL1, SYS_PMINTENCLR_EL1),
567         PMREG_SET(SYS_PMOVSSET_EL0, SYS_PMOVSCLR_EL0),
568 };
569
570 /*
571  * Create a VM, and check if KVM handles the userspace accesses of
572  * the PMU register sets in @validity_check_reg_sets[] correctly.
573  */
574 static void run_pmregs_validity_test(uint64_t pmcr_n)
575 {
576         int i;
577         struct kvm_vcpu *vcpu;
578         uint64_t set_reg_id, clr_reg_id, reg_val;
579         uint64_t valid_counters_mask, max_counters_mask;
580
581         test_create_vpmu_vm_with_pmcr_n(pmcr_n, false);
582         vcpu = vpmu_vm.vcpu;
583
584         valid_counters_mask = get_counters_mask(pmcr_n);
585         max_counters_mask = get_counters_mask(ARMV8_PMU_MAX_COUNTERS);
586
587         for (i = 0; i < ARRAY_SIZE(validity_check_reg_sets); i++) {
588                 set_reg_id = validity_check_reg_sets[i].set_reg_id;
589                 clr_reg_id = validity_check_reg_sets[i].clr_reg_id;
590
591                 /*
592                  * Test if the 'set' and 'clr' variants of the registers
593                  * are initialized based on the number of valid counters.
594                  */
595                 vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), &reg_val);
596                 TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
597                             "Initial read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n",
598                             KVM_ARM64_SYS_REG(set_reg_id), reg_val);
599
600                 vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), &reg_val);
601                 TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
602                             "Initial read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n",
603                             KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
604
605                 /*
606                  * Using the 'set' variant, force-set the register to the
607                  * max number of possible counters and test if KVM discards
608                  * the bits for unimplemented counters as it should.
609                  */
610                 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), max_counters_mask);
611
612                 vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), &reg_val);
613                 TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
614                             "Read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n",
615                             KVM_ARM64_SYS_REG(set_reg_id), reg_val);
616
617                 vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), &reg_val);
618                 TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
619                             "Read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n",
620                             KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
621         }
622
623         destroy_vpmu_vm();
624 }
625
626 /*
627  * Create a guest with one vCPU, and attempt to set the PMCR_EL0.N for
628  * the vCPU to @pmcr_n, which is larger than the host value.
629  * The attempt should fail as @pmcr_n is too big to set for the vCPU.
630  */
631 static void run_error_test(uint64_t pmcr_n)
632 {
633         pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n);
634
635         test_create_vpmu_vm_with_pmcr_n(pmcr_n, true);
636         destroy_vpmu_vm();
637 }
638
639 /*
640  * Return the default number of implemented PMU event counters excluding
641  * the cycle counter (i.e. PMCR_EL0.N value) for the guest.
642  */
643 static uint64_t get_pmcr_n_limit(void)
644 {
645         uint64_t pmcr;
646
647         create_vpmu_vm(guest_code);
648         vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
649         destroy_vpmu_vm();
650         return get_pmcr_n(pmcr);
651 }
652
653 int main(void)
654 {
655         uint64_t i, pmcr_n;
656
657         TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
658
659         pmcr_n = get_pmcr_n_limit();
660         for (i = 0; i <= pmcr_n; i++) {
661                 run_access_test(i);
662                 run_pmregs_validity_test(i);
663         }
664
665         for (i = pmcr_n + 1; i < ARMV8_PMU_MAX_COUNTERS; i++)
666                 run_error_test(i);
667
668         return 0;
669 }