KVM: arm64: PMU: Add a helper to read a vCPU's PMCR_EL0
[linux-2.6-microblaze.git] / arch / arm64 / kvm / pmu-emul.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 Linaro Ltd.
4  * Author: Shannon Zhao <shannon.zhao@linaro.org>
5  */
6
7 #include <linux/cpu.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/list.h>
11 #include <linux/perf_event.h>
12 #include <linux/perf/arm_pmu.h>
13 #include <linux/uaccess.h>
14 #include <asm/kvm_emulate.h>
15 #include <kvm/arm_pmu.h>
16 #include <kvm/arm_vgic.h>
17 #include <asm/arm_pmuv3.h>
18
19 #define PERF_ATTR_CFG1_COUNTER_64BIT    BIT(0)
20
21 DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
22
23 static LIST_HEAD(arm_pmus);
24 static DEFINE_MUTEX(arm_pmus_lock);
25
26 static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
27 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
28
29 static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
30 {
31         return container_of(pmc, struct kvm_vcpu, arch.pmu.pmc[pmc->idx]);
32 }
33
34 static struct kvm_pmc *kvm_vcpu_idx_to_pmc(struct kvm_vcpu *vcpu, int cnt_idx)
35 {
36         return &vcpu->arch.pmu.pmc[cnt_idx];
37 }
38
39 static u32 __kvm_pmu_event_mask(unsigned int pmuver)
40 {
41         switch (pmuver) {
42         case ID_AA64DFR0_EL1_PMUVer_IMP:
43                 return GENMASK(9, 0);
44         case ID_AA64DFR0_EL1_PMUVer_V3P1:
45         case ID_AA64DFR0_EL1_PMUVer_V3P4:
46         case ID_AA64DFR0_EL1_PMUVer_V3P5:
47         case ID_AA64DFR0_EL1_PMUVer_V3P7:
48                 return GENMASK(15, 0);
49         default:                /* Shouldn't be here, just for sanity */
50                 WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
51                 return 0;
52         }
53 }
54
55 static u32 kvm_pmu_event_mask(struct kvm *kvm)
56 {
57         u64 dfr0 = IDREG(kvm, SYS_ID_AA64DFR0_EL1);
58         u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, dfr0);
59
60         return __kvm_pmu_event_mask(pmuver);
61 }
62
63 /**
64  * kvm_pmc_is_64bit - determine if counter is 64bit
65  * @pmc: counter context
66  */
67 static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
68 {
69         return (pmc->idx == ARMV8_PMU_CYCLE_IDX ||
70                 kvm_pmu_is_3p5(kvm_pmc_to_vcpu(pmc)));
71 }
72
73 static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
74 {
75         u64 val = kvm_vcpu_read_pmcr(kvm_pmc_to_vcpu(pmc));
76
77         return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) ||
78                (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC));
79 }
80
81 static bool kvm_pmu_counter_can_chain(struct kvm_pmc *pmc)
82 {
83         return (!(pmc->idx & 1) && (pmc->idx + 1) < ARMV8_PMU_CYCLE_IDX &&
84                 !kvm_pmc_has_64bit_overflow(pmc));
85 }
86
87 static u32 counter_index_to_reg(u64 idx)
88 {
89         return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + idx;
90 }
91
92 static u32 counter_index_to_evtreg(u64 idx)
93 {
94         return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx;
95 }
96
97 static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc)
98 {
99         struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
100         u64 counter, reg, enabled, running;
101
102         reg = counter_index_to_reg(pmc->idx);
103         counter = __vcpu_sys_reg(vcpu, reg);
104
105         /*
106          * The real counter value is equal to the value of counter register plus
107          * the value perf event counts.
108          */
109         if (pmc->perf_event)
110                 counter += perf_event_read_value(pmc->perf_event, &enabled,
111                                                  &running);
112
113         if (!kvm_pmc_is_64bit(pmc))
114                 counter = lower_32_bits(counter);
115
116         return counter;
117 }
118
119 /**
120  * kvm_pmu_get_counter_value - get PMU counter value
121  * @vcpu: The vcpu pointer
122  * @select_idx: The counter index
123  */
124 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
125 {
126         if (!kvm_vcpu_has_pmu(vcpu))
127                 return 0;
128
129         return kvm_pmu_get_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
130 }
131
132 static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force)
133 {
134         struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
135         u64 reg;
136
137         kvm_pmu_release_perf_event(pmc);
138
139         reg = counter_index_to_reg(pmc->idx);
140
141         if (vcpu_mode_is_32bit(vcpu) && pmc->idx != ARMV8_PMU_CYCLE_IDX &&
142             !force) {
143                 /*
144                  * Even with PMUv3p5, AArch32 cannot write to the top
145                  * 32bit of the counters. The only possible course of
146                  * action is to use PMCR.P, which will reset them to
147                  * 0 (the only use of the 'force' parameter).
148                  */
149                 val  = __vcpu_sys_reg(vcpu, reg) & GENMASK(63, 32);
150                 val |= lower_32_bits(val);
151         }
152
153         __vcpu_sys_reg(vcpu, reg) = val;
154
155         /* Recreate the perf event to reflect the updated sample_period */
156         kvm_pmu_create_perf_event(pmc);
157 }
158
159 /**
160  * kvm_pmu_set_counter_value - set PMU counter value
161  * @vcpu: The vcpu pointer
162  * @select_idx: The counter index
163  * @val: The counter value
164  */
165 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
166 {
167         if (!kvm_vcpu_has_pmu(vcpu))
168                 return;
169
170         kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx), val, false);
171 }
172
173 /**
174  * kvm_pmu_release_perf_event - remove the perf event
175  * @pmc: The PMU counter pointer
176  */
177 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
178 {
179         if (pmc->perf_event) {
180                 perf_event_disable(pmc->perf_event);
181                 perf_event_release_kernel(pmc->perf_event);
182                 pmc->perf_event = NULL;
183         }
184 }
185
186 /**
187  * kvm_pmu_stop_counter - stop PMU counter
188  * @pmc: The PMU counter pointer
189  *
190  * If this counter has been configured to monitor some event, release it here.
191  */
192 static void kvm_pmu_stop_counter(struct kvm_pmc *pmc)
193 {
194         struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
195         u64 reg, val;
196
197         if (!pmc->perf_event)
198                 return;
199
200         val = kvm_pmu_get_pmc_value(pmc);
201
202         reg = counter_index_to_reg(pmc->idx);
203
204         __vcpu_sys_reg(vcpu, reg) = val;
205
206         kvm_pmu_release_perf_event(pmc);
207 }
208
209 /**
210  * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
211  * @vcpu: The vcpu pointer
212  *
213  */
214 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
215 {
216         int i;
217         struct kvm_pmu *pmu = &vcpu->arch.pmu;
218
219         for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
220                 pmu->pmc[i].idx = i;
221 }
222
223 /**
224  * kvm_pmu_vcpu_reset - reset pmu state for cpu
225  * @vcpu: The vcpu pointer
226  *
227  */
228 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
229 {
230         unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
231         int i;
232
233         for_each_set_bit(i, &mask, 32)
234                 kvm_pmu_stop_counter(kvm_vcpu_idx_to_pmc(vcpu, i));
235 }
236
237 /**
238  * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
239  * @vcpu: The vcpu pointer
240  *
241  */
242 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
243 {
244         int i;
245
246         for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
247                 kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, i));
248         irq_work_sync(&vcpu->arch.pmu.overflow_work);
249 }
250
251 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
252 {
253         u64 val = kvm_vcpu_read_pmcr(vcpu) >> ARMV8_PMU_PMCR_N_SHIFT;
254
255         val &= ARMV8_PMU_PMCR_N_MASK;
256         if (val == 0)
257                 return BIT(ARMV8_PMU_CYCLE_IDX);
258         else
259                 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
260 }
261
262 /**
263  * kvm_pmu_enable_counter_mask - enable selected PMU counters
264  * @vcpu: The vcpu pointer
265  * @val: the value guest writes to PMCNTENSET register
266  *
267  * Call perf_event_enable to start counting the perf event
268  */
269 void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
270 {
271         int i;
272         if (!kvm_vcpu_has_pmu(vcpu))
273                 return;
274
275         if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val)
276                 return;
277
278         for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
279                 struct kvm_pmc *pmc;
280
281                 if (!(val & BIT(i)))
282                         continue;
283
284                 pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
285
286                 if (!pmc->perf_event) {
287                         kvm_pmu_create_perf_event(pmc);
288                 } else {
289                         perf_event_enable(pmc->perf_event);
290                         if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
291                                 kvm_debug("fail to enable perf event\n");
292                 }
293         }
294 }
295
296 /**
297  * kvm_pmu_disable_counter_mask - disable selected PMU counters
298  * @vcpu: The vcpu pointer
299  * @val: the value guest writes to PMCNTENCLR register
300  *
301  * Call perf_event_disable to stop counting the perf event
302  */
303 void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
304 {
305         int i;
306
307         if (!kvm_vcpu_has_pmu(vcpu) || !val)
308                 return;
309
310         for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
311                 struct kvm_pmc *pmc;
312
313                 if (!(val & BIT(i)))
314                         continue;
315
316                 pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
317
318                 if (pmc->perf_event)
319                         perf_event_disable(pmc->perf_event);
320         }
321 }
322
323 static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
324 {
325         u64 reg = 0;
326
327         if ((kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) {
328                 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
329                 reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
330                 reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
331         }
332
333         return reg;
334 }
335
336 static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
337 {
338         struct kvm_pmu *pmu = &vcpu->arch.pmu;
339         bool overflow;
340
341         if (!kvm_vcpu_has_pmu(vcpu))
342                 return;
343
344         overflow = !!kvm_pmu_overflow_status(vcpu);
345         if (pmu->irq_level == overflow)
346                 return;
347
348         pmu->irq_level = overflow;
349
350         if (likely(irqchip_in_kernel(vcpu->kvm))) {
351                 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
352                                               pmu->irq_num, overflow, pmu);
353                 WARN_ON(ret);
354         }
355 }
356
357 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
358 {
359         struct kvm_pmu *pmu = &vcpu->arch.pmu;
360         struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
361         bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
362
363         if (likely(irqchip_in_kernel(vcpu->kvm)))
364                 return false;
365
366         return pmu->irq_level != run_level;
367 }
368
369 /*
370  * Reflect the PMU overflow interrupt output level into the kvm_run structure
371  */
372 void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
373 {
374         struct kvm_sync_regs *regs = &vcpu->run->s.regs;
375
376         /* Populate the timer bitmap for user space */
377         regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
378         if (vcpu->arch.pmu.irq_level)
379                 regs->device_irq_level |= KVM_ARM_DEV_PMU;
380 }
381
382 /**
383  * kvm_pmu_flush_hwstate - flush pmu state to cpu
384  * @vcpu: The vcpu pointer
385  *
386  * Check if the PMU has overflowed while we were running in the host, and inject
387  * an interrupt if that was the case.
388  */
389 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
390 {
391         kvm_pmu_update_state(vcpu);
392 }
393
394 /**
395  * kvm_pmu_sync_hwstate - sync pmu state from cpu
396  * @vcpu: The vcpu pointer
397  *
398  * Check if the PMU has overflowed while we were running in the guest, and
399  * inject an interrupt if that was the case.
400  */
401 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
402 {
403         kvm_pmu_update_state(vcpu);
404 }
405
406 /**
407  * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
408  * to the event.
409  * This is why we need a callback to do it once outside of the NMI context.
410  */
411 static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
412 {
413         struct kvm_vcpu *vcpu;
414
415         vcpu = container_of(work, struct kvm_vcpu, arch.pmu.overflow_work);
416         kvm_vcpu_kick(vcpu);
417 }
418
419 /*
420  * Perform an increment on any of the counters described in @mask,
421  * generating the overflow if required, and propagate it as a chained
422  * event if possible.
423  */
424 static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
425                                       unsigned long mask, u32 event)
426 {
427         int i;
428
429         if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E))
430                 return;
431
432         /* Weed out disabled counters */
433         mask &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
434
435         for_each_set_bit(i, &mask, ARMV8_PMU_CYCLE_IDX) {
436                 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
437                 u64 type, reg;
438
439                 /* Filter on event type */
440                 type = __vcpu_sys_reg(vcpu, counter_index_to_evtreg(i));
441                 type &= kvm_pmu_event_mask(vcpu->kvm);
442                 if (type != event)
443                         continue;
444
445                 /* Increment this counter */
446                 reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1;
447                 if (!kvm_pmc_is_64bit(pmc))
448                         reg = lower_32_bits(reg);
449                 __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg;
450
451                 /* No overflow? move on */
452                 if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg))
453                         continue;
454
455                 /* Mark overflow */
456                 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
457
458                 if (kvm_pmu_counter_can_chain(pmc))
459                         kvm_pmu_counter_increment(vcpu, BIT(i + 1),
460                                                   ARMV8_PMUV3_PERFCTR_CHAIN);
461         }
462 }
463
464 /* Compute the sample period for a given counter value */
465 static u64 compute_period(struct kvm_pmc *pmc, u64 counter)
466 {
467         u64 val;
468
469         if (kvm_pmc_is_64bit(pmc) && kvm_pmc_has_64bit_overflow(pmc))
470                 val = (-counter) & GENMASK(63, 0);
471         else
472                 val = (-counter) & GENMASK(31, 0);
473
474         return val;
475 }
476
477 /**
478  * When the perf event overflows, set the overflow status and inform the vcpu.
479  */
480 static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
481                                   struct perf_sample_data *data,
482                                   struct pt_regs *regs)
483 {
484         struct kvm_pmc *pmc = perf_event->overflow_handler_context;
485         struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
486         struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
487         int idx = pmc->idx;
488         u64 period;
489
490         cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
491
492         /*
493          * Reset the sample period to the architectural limit,
494          * i.e. the point where the counter overflows.
495          */
496         period = compute_period(pmc, local64_read(&perf_event->count));
497
498         local64_set(&perf_event->hw.period_left, 0);
499         perf_event->attr.sample_period = period;
500         perf_event->hw.sample_period = period;
501
502         __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
503
504         if (kvm_pmu_counter_can_chain(pmc))
505                 kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
506                                           ARMV8_PMUV3_PERFCTR_CHAIN);
507
508         if (kvm_pmu_overflow_status(vcpu)) {
509                 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
510
511                 if (!in_nmi())
512                         kvm_vcpu_kick(vcpu);
513                 else
514                         irq_work_queue(&vcpu->arch.pmu.overflow_work);
515         }
516
517         cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
518 }
519
520 /**
521  * kvm_pmu_software_increment - do software increment
522  * @vcpu: The vcpu pointer
523  * @val: the value guest writes to PMSWINC register
524  */
525 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
526 {
527         kvm_pmu_counter_increment(vcpu, val, ARMV8_PMUV3_PERFCTR_SW_INCR);
528 }
529
530 /**
531  * kvm_pmu_handle_pmcr - handle PMCR register
532  * @vcpu: The vcpu pointer
533  * @val: the value guest writes to PMCR register
534  */
535 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
536 {
537         int i;
538
539         if (!kvm_vcpu_has_pmu(vcpu))
540                 return;
541
542         /* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
543         if (!kvm_pmu_is_3p5(vcpu))
544                 val &= ~ARMV8_PMU_PMCR_LP;
545
546         /* The reset bits don't indicate any state, and shouldn't be saved. */
547         __vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
548
549         if (val & ARMV8_PMU_PMCR_E) {
550                 kvm_pmu_enable_counter_mask(vcpu,
551                        __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
552         } else {
553                 kvm_pmu_disable_counter_mask(vcpu,
554                        __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
555         }
556
557         if (val & ARMV8_PMU_PMCR_C)
558                 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
559
560         if (val & ARMV8_PMU_PMCR_P) {
561                 unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
562                 mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
563                 for_each_set_bit(i, &mask, 32)
564                         kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
565         }
566         kvm_vcpu_pmu_restore_guest(vcpu);
567 }
568
569 static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
570 {
571         struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
572         return (kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) &&
573                (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx));
574 }
575
576 /**
577  * kvm_pmu_create_perf_event - create a perf event for a counter
578  * @pmc: Counter context
579  */
580 static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
581 {
582         struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
583         struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
584         struct perf_event *event;
585         struct perf_event_attr attr;
586         u64 eventsel, reg, data;
587
588         reg = counter_index_to_evtreg(pmc->idx);
589         data = __vcpu_sys_reg(vcpu, reg);
590
591         kvm_pmu_stop_counter(pmc);
592         if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
593                 eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
594         else
595                 eventsel = data & kvm_pmu_event_mask(vcpu->kvm);
596
597         /*
598          * Neither SW increment nor chained events need to be backed
599          * by a perf event.
600          */
601         if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR ||
602             eventsel == ARMV8_PMUV3_PERFCTR_CHAIN)
603                 return;
604
605         /*
606          * If we have a filter in place and that the event isn't allowed, do
607          * not install a perf event either.
608          */
609         if (vcpu->kvm->arch.pmu_filter &&
610             !test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
611                 return;
612
613         memset(&attr, 0, sizeof(struct perf_event_attr));
614         attr.type = arm_pmu->pmu.type;
615         attr.size = sizeof(attr);
616         attr.pinned = 1;
617         attr.disabled = !kvm_pmu_counter_is_enabled(pmc);
618         attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
619         attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
620         attr.exclude_hv = 1; /* Don't count EL2 events */
621         attr.exclude_host = 1; /* Don't count host events */
622         attr.config = eventsel;
623
624         /*
625          * If counting with a 64bit counter, advertise it to the perf
626          * code, carefully dealing with the initial sample period
627          * which also depends on the overflow.
628          */
629         if (kvm_pmc_is_64bit(pmc))
630                 attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT;
631
632         attr.sample_period = compute_period(pmc, kvm_pmu_get_pmc_value(pmc));
633
634         event = perf_event_create_kernel_counter(&attr, -1, current,
635                                                  kvm_pmu_perf_overflow, pmc);
636
637         if (IS_ERR(event)) {
638                 pr_err_once("kvm: pmu event creation failed %ld\n",
639                             PTR_ERR(event));
640                 return;
641         }
642
643         pmc->perf_event = event;
644 }
645
646 /**
647  * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
648  * @vcpu: The vcpu pointer
649  * @data: The data guest writes to PMXEVTYPER_EL0
650  * @select_idx: The number of selected counter
651  *
652  * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
653  * event with given hardware event number. Here we call perf_event API to
654  * emulate this action and create a kernel perf event for it.
655  */
656 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
657                                     u64 select_idx)
658 {
659         struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx);
660         u64 reg, mask;
661
662         if (!kvm_vcpu_has_pmu(vcpu))
663                 return;
664
665         mask  =  ARMV8_PMU_EVTYPE_MASK;
666         mask &= ~ARMV8_PMU_EVTYPE_EVENT;
667         mask |= kvm_pmu_event_mask(vcpu->kvm);
668
669         reg = counter_index_to_evtreg(pmc->idx);
670
671         __vcpu_sys_reg(vcpu, reg) = data & mask;
672
673         kvm_pmu_create_perf_event(pmc);
674 }
675
676 void kvm_host_pmu_init(struct arm_pmu *pmu)
677 {
678         struct arm_pmu_entry *entry;
679
680         /*
681          * Check the sanitised PMU version for the system, as KVM does not
682          * support implementations where PMUv3 exists on a subset of CPUs.
683          */
684         if (!pmuv3_implemented(kvm_arm_pmu_get_pmuver_limit()))
685                 return;
686
687         mutex_lock(&arm_pmus_lock);
688
689         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
690         if (!entry)
691                 goto out_unlock;
692
693         entry->arm_pmu = pmu;
694         list_add_tail(&entry->entry, &arm_pmus);
695
696         if (list_is_singular(&arm_pmus))
697                 static_branch_enable(&kvm_arm_pmu_available);
698
699 out_unlock:
700         mutex_unlock(&arm_pmus_lock);
701 }
702
703 static struct arm_pmu *kvm_pmu_probe_armpmu(void)
704 {
705         struct arm_pmu *tmp, *pmu = NULL;
706         struct arm_pmu_entry *entry;
707         int cpu;
708
709         mutex_lock(&arm_pmus_lock);
710
711         /*
712          * It is safe to use a stale cpu to iterate the list of PMUs so long as
713          * the same value is used for the entirety of the loop. Given this, and
714          * the fact that no percpu data is used for the lookup there is no need
715          * to disable preemption.
716          *
717          * It is still necessary to get a valid cpu, though, to probe for the
718          * default PMU instance as userspace is not required to specify a PMU
719          * type. In order to uphold the preexisting behavior KVM selects the
720          * PMU instance for the core during vcpu init. A dependent use
721          * case would be a user with disdain of all things big.LITTLE that
722          * affines the VMM to a particular cluster of cores.
723          *
724          * In any case, userspace should just do the sane thing and use the UAPI
725          * to select a PMU type directly. But, be wary of the baggage being
726          * carried here.
727          */
728         cpu = raw_smp_processor_id();
729         list_for_each_entry(entry, &arm_pmus, entry) {
730                 tmp = entry->arm_pmu;
731
732                 if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) {
733                         pmu = tmp;
734                         break;
735                 }
736         }
737
738         mutex_unlock(&arm_pmus_lock);
739
740         return pmu;
741 }
742
743 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
744 {
745         unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
746         u64 val, mask = 0;
747         int base, i, nr_events;
748
749         if (!kvm_vcpu_has_pmu(vcpu))
750                 return 0;
751
752         if (!pmceid1) {
753                 val = read_sysreg(pmceid0_el0);
754                 /* always support CHAIN */
755                 val |= BIT(ARMV8_PMUV3_PERFCTR_CHAIN);
756                 base = 0;
757         } else {
758                 val = read_sysreg(pmceid1_el0);
759                 /*
760                  * Don't advertise STALL_SLOT*, as PMMIR_EL0 is handled
761                  * as RAZ
762                  */
763                 val &= ~(BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32) |
764                          BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND - 32) |
765                          BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND - 32));
766                 base = 32;
767         }
768
769         if (!bmap)
770                 return val;
771
772         nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
773
774         for (i = 0; i < 32; i += 8) {
775                 u64 byte;
776
777                 byte = bitmap_get_value8(bmap, base + i);
778                 mask |= byte << i;
779                 if (nr_events >= (0x4000 + base + 32)) {
780                         byte = bitmap_get_value8(bmap, 0x4000 + base + i);
781                         mask |= byte << (32 + i);
782                 }
783         }
784
785         return val & mask;
786 }
787
788 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
789 {
790         if (!kvm_vcpu_has_pmu(vcpu))
791                 return 0;
792
793         if (!vcpu->arch.pmu.created)
794                 return -EINVAL;
795
796         /*
797          * A valid interrupt configuration for the PMU is either to have a
798          * properly configured interrupt number and using an in-kernel
799          * irqchip, or to not have an in-kernel GIC and not set an IRQ.
800          */
801         if (irqchip_in_kernel(vcpu->kvm)) {
802                 int irq = vcpu->arch.pmu.irq_num;
803                 /*
804                  * If we are using an in-kernel vgic, at this point we know
805                  * the vgic will be initialized, so we can check the PMU irq
806                  * number against the dimensions of the vgic and make sure
807                  * it's valid.
808                  */
809                 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
810                         return -EINVAL;
811         } else if (kvm_arm_pmu_irq_initialized(vcpu)) {
812                    return -EINVAL;
813         }
814
815         /* One-off reload of the PMU on first run */
816         kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
817
818         return 0;
819 }
820
821 static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
822 {
823         if (irqchip_in_kernel(vcpu->kvm)) {
824                 int ret;
825
826                 /*
827                  * If using the PMU with an in-kernel virtual GIC
828                  * implementation, we require the GIC to be already
829                  * initialized when initializing the PMU.
830                  */
831                 if (!vgic_initialized(vcpu->kvm))
832                         return -ENODEV;
833
834                 if (!kvm_arm_pmu_irq_initialized(vcpu))
835                         return -ENXIO;
836
837                 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
838                                          &vcpu->arch.pmu);
839                 if (ret)
840                         return ret;
841         }
842
843         init_irq_work(&vcpu->arch.pmu.overflow_work,
844                       kvm_pmu_perf_overflow_notify_vcpu);
845
846         vcpu->arch.pmu.created = true;
847         return 0;
848 }
849
850 /*
851  * For one VM the interrupt type must be same for each vcpu.
852  * As a PPI, the interrupt number is the same for all vcpus,
853  * while as an SPI it must be a separate number per vcpu.
854  */
855 static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
856 {
857         unsigned long i;
858         struct kvm_vcpu *vcpu;
859
860         kvm_for_each_vcpu(i, vcpu, kvm) {
861                 if (!kvm_arm_pmu_irq_initialized(vcpu))
862                         continue;
863
864                 if (irq_is_ppi(irq)) {
865                         if (vcpu->arch.pmu.irq_num != irq)
866                                 return false;
867                 } else {
868                         if (vcpu->arch.pmu.irq_num == irq)
869                                 return false;
870                 }
871         }
872
873         return true;
874 }
875
876 static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu)
877 {
878         lockdep_assert_held(&kvm->arch.config_lock);
879
880         kvm->arch.arm_pmu = arm_pmu;
881 }
882
883 /**
884  * kvm_arm_set_default_pmu - No PMU set, get the default one.
885  * @kvm: The kvm pointer
886  *
887  * The observant among you will notice that the supported_cpus
888  * mask does not get updated for the default PMU even though it
889  * is quite possible the selected instance supports only a
890  * subset of cores in the system. This is intentional, and
891  * upholds the preexisting behavior on heterogeneous systems
892  * where vCPUs can be scheduled on any core but the guest
893  * counters could stop working.
894  */
895 int kvm_arm_set_default_pmu(struct kvm *kvm)
896 {
897         struct arm_pmu *arm_pmu = kvm_pmu_probe_armpmu();
898
899         if (!arm_pmu)
900                 return -ENODEV;
901
902         kvm_arm_set_pmu(kvm, arm_pmu);
903         return 0;
904 }
905
906 static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
907 {
908         struct kvm *kvm = vcpu->kvm;
909         struct arm_pmu_entry *entry;
910         struct arm_pmu *arm_pmu;
911         int ret = -ENXIO;
912
913         lockdep_assert_held(&kvm->arch.config_lock);
914         mutex_lock(&arm_pmus_lock);
915
916         list_for_each_entry(entry, &arm_pmus, entry) {
917                 arm_pmu = entry->arm_pmu;
918                 if (arm_pmu->pmu.type == pmu_id) {
919                         if (kvm_vm_has_ran_once(kvm) ||
920                             (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) {
921                                 ret = -EBUSY;
922                                 break;
923                         }
924
925                         kvm_arm_set_pmu(kvm, arm_pmu);
926                         cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus);
927                         ret = 0;
928                         break;
929                 }
930         }
931
932         mutex_unlock(&arm_pmus_lock);
933         return ret;
934 }
935
936 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
937 {
938         struct kvm *kvm = vcpu->kvm;
939
940         lockdep_assert_held(&kvm->arch.config_lock);
941
942         if (!kvm_vcpu_has_pmu(vcpu))
943                 return -ENODEV;
944
945         if (vcpu->arch.pmu.created)
946                 return -EBUSY;
947
948         switch (attr->attr) {
949         case KVM_ARM_VCPU_PMU_V3_IRQ: {
950                 int __user *uaddr = (int __user *)(long)attr->addr;
951                 int irq;
952
953                 if (!irqchip_in_kernel(kvm))
954                         return -EINVAL;
955
956                 if (get_user(irq, uaddr))
957                         return -EFAULT;
958
959                 /* The PMU overflow interrupt can be a PPI or a valid SPI. */
960                 if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
961                         return -EINVAL;
962
963                 if (!pmu_irq_is_valid(kvm, irq))
964                         return -EINVAL;
965
966                 if (kvm_arm_pmu_irq_initialized(vcpu))
967                         return -EBUSY;
968
969                 kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
970                 vcpu->arch.pmu.irq_num = irq;
971                 return 0;
972         }
973         case KVM_ARM_VCPU_PMU_V3_FILTER: {
974                 u8 pmuver = kvm_arm_pmu_get_pmuver_limit();
975                 struct kvm_pmu_event_filter __user *uaddr;
976                 struct kvm_pmu_event_filter filter;
977                 int nr_events;
978
979                 /*
980                  * Allow userspace to specify an event filter for the entire
981                  * event range supported by PMUVer of the hardware, rather
982                  * than the guest's PMUVer for KVM backward compatibility.
983                  */
984                 nr_events = __kvm_pmu_event_mask(pmuver) + 1;
985
986                 uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
987
988                 if (copy_from_user(&filter, uaddr, sizeof(filter)))
989                         return -EFAULT;
990
991                 if (((u32)filter.base_event + filter.nevents) > nr_events ||
992                     (filter.action != KVM_PMU_EVENT_ALLOW &&
993                      filter.action != KVM_PMU_EVENT_DENY))
994                         return -EINVAL;
995
996                 if (kvm_vm_has_ran_once(kvm))
997                         return -EBUSY;
998
999                 if (!kvm->arch.pmu_filter) {
1000                         kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
1001                         if (!kvm->arch.pmu_filter)
1002                                 return -ENOMEM;
1003
1004                         /*
1005                          * The default depends on the first applied filter.
1006                          * If it allows events, the default is to deny.
1007                          * Conversely, if the first filter denies a set of
1008                          * events, the default is to allow.
1009                          */
1010                         if (filter.action == KVM_PMU_EVENT_ALLOW)
1011                                 bitmap_zero(kvm->arch.pmu_filter, nr_events);
1012                         else
1013                                 bitmap_fill(kvm->arch.pmu_filter, nr_events);
1014                 }
1015
1016                 if (filter.action == KVM_PMU_EVENT_ALLOW)
1017                         bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1018                 else
1019                         bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1020
1021                 return 0;
1022         }
1023         case KVM_ARM_VCPU_PMU_V3_SET_PMU: {
1024                 int __user *uaddr = (int __user *)(long)attr->addr;
1025                 int pmu_id;
1026
1027                 if (get_user(pmu_id, uaddr))
1028                         return -EFAULT;
1029
1030                 return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id);
1031         }
1032         case KVM_ARM_VCPU_PMU_V3_INIT:
1033                 return kvm_arm_pmu_v3_init(vcpu);
1034         }
1035
1036         return -ENXIO;
1037 }
1038
1039 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1040 {
1041         switch (attr->attr) {
1042         case KVM_ARM_VCPU_PMU_V3_IRQ: {
1043                 int __user *uaddr = (int __user *)(long)attr->addr;
1044                 int irq;
1045
1046                 if (!irqchip_in_kernel(vcpu->kvm))
1047                         return -EINVAL;
1048
1049                 if (!kvm_vcpu_has_pmu(vcpu))
1050                         return -ENODEV;
1051
1052                 if (!kvm_arm_pmu_irq_initialized(vcpu))
1053                         return -ENXIO;
1054
1055                 irq = vcpu->arch.pmu.irq_num;
1056                 return put_user(irq, uaddr);
1057         }
1058         }
1059
1060         return -ENXIO;
1061 }
1062
1063 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1064 {
1065         switch (attr->attr) {
1066         case KVM_ARM_VCPU_PMU_V3_IRQ:
1067         case KVM_ARM_VCPU_PMU_V3_INIT:
1068         case KVM_ARM_VCPU_PMU_V3_FILTER:
1069         case KVM_ARM_VCPU_PMU_V3_SET_PMU:
1070                 if (kvm_vcpu_has_pmu(vcpu))
1071                         return 0;
1072         }
1073
1074         return -ENXIO;
1075 }
1076
1077 u8 kvm_arm_pmu_get_pmuver_limit(void)
1078 {
1079         u64 tmp;
1080
1081         tmp = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1082         tmp = cpuid_feature_cap_perfmon_field(tmp,
1083                                               ID_AA64DFR0_EL1_PMUVer_SHIFT,
1084                                               ID_AA64DFR0_EL1_PMUVer_V3P5);
1085         return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp);
1086 }
1087
1088 /**
1089  * kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU
1090  * @vcpu: The vcpu pointer
1091  */
1092 u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
1093 {
1094         return __vcpu_sys_reg(vcpu, PMCR_EL0);
1095 }