1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 Linaro Ltd.
4 * Author: Shannon Zhao <shannon.zhao@linaro.org>
9 #include <linux/kvm_host.h>
10 #include <linux/perf_event.h>
11 #include <linux/perf/arm_pmu.h>
12 #include <linux/uaccess.h>
13 #include <asm/kvm_emulate.h>
14 #include <kvm/arm_pmu.h>
15 #include <kvm/arm_vgic.h>
17 static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
18 static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
19 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
21 #define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
24 * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
25 * @vcpu: The vcpu pointer
26 * @select_idx: The counter index
28 static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
30 return (select_idx == ARMV8_PMU_CYCLE_IDX &&
31 __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
34 static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
37 struct kvm_vcpu_arch *vcpu_arch;
40 pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
41 vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
42 return container_of(vcpu_arch, struct kvm_vcpu, arch);
46 * kvm_pmu_pmc_is_chained - determine if the pmc is chained
47 * @pmc: The PMU counter pointer
49 static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
51 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
53 return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
57 * kvm_pmu_idx_is_high_counter - determine if select_idx is a high/low counter
58 * @select_idx: The counter index
60 static bool kvm_pmu_idx_is_high_counter(u64 select_idx)
62 return select_idx & 0x1;
66 * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
67 * @pmc: The PMU counter pointer
69 * When a pair of PMCs are chained together we use the low counter (canonical)
70 * to hold the underlying perf event.
72 static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
74 if (kvm_pmu_pmc_is_chained(pmc) &&
75 kvm_pmu_idx_is_high_counter(pmc->idx))
80 static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc)
82 if (kvm_pmu_idx_is_high_counter(pmc->idx))
89 * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
90 * @vcpu: The vcpu pointer
91 * @select_idx: The counter index
93 static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
99 if (select_idx == ARMV8_PMU_CYCLE_IDX)
102 reg = PMEVTYPER0_EL0 + select_idx;
103 eventsel = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_EVENT;
105 return eventsel == ARMV8_PMUV3_PERFCTR_CHAIN;
109 * kvm_pmu_get_pair_counter_value - get PMU counter value
110 * @vcpu: The vcpu pointer
111 * @pmc: The PMU counter pointer
113 static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
116 u64 counter, counter_high, reg, enabled, running;
118 if (kvm_pmu_pmc_is_chained(pmc)) {
119 pmc = kvm_pmu_get_canonical_pmc(pmc);
120 reg = PMEVCNTR0_EL0 + pmc->idx;
122 counter = __vcpu_sys_reg(vcpu, reg);
123 counter_high = __vcpu_sys_reg(vcpu, reg + 1);
125 counter = lower_32_bits(counter) | (counter_high << 32);
127 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
128 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
129 counter = __vcpu_sys_reg(vcpu, reg);
133 * The real counter value is equal to the value of counter register plus
134 * the value perf event counts.
137 counter += perf_event_read_value(pmc->perf_event, &enabled,
144 * kvm_pmu_get_counter_value - get PMU counter value
145 * @vcpu: The vcpu pointer
146 * @select_idx: The counter index
148 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
151 struct kvm_pmu *pmu = &vcpu->arch.pmu;
152 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
154 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
156 if (kvm_pmu_pmc_is_chained(pmc) &&
157 kvm_pmu_idx_is_high_counter(select_idx))
158 counter = upper_32_bits(counter);
159 else if (select_idx != ARMV8_PMU_CYCLE_IDX)
160 counter = lower_32_bits(counter);
166 * kvm_pmu_set_counter_value - set PMU counter value
167 * @vcpu: The vcpu pointer
168 * @select_idx: The counter index
169 * @val: The counter value
171 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
175 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
176 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
177 __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
179 /* Recreate the perf event to reflect the updated sample_period */
180 kvm_pmu_create_perf_event(vcpu, select_idx);
184 * kvm_pmu_release_perf_event - remove the perf event
185 * @pmc: The PMU counter pointer
187 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
189 pmc = kvm_pmu_get_canonical_pmc(pmc);
190 if (pmc->perf_event) {
191 perf_event_disable(pmc->perf_event);
192 perf_event_release_kernel(pmc->perf_event);
193 pmc->perf_event = NULL;
198 * kvm_pmu_stop_counter - stop PMU counter
199 * @pmc: The PMU counter pointer
201 * If this counter has been configured to monitor some event, release it here.
203 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
205 u64 counter, reg, val;
207 pmc = kvm_pmu_get_canonical_pmc(pmc);
208 if (!pmc->perf_event)
211 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
213 if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
217 reg = PMEVCNTR0_EL0 + pmc->idx;
218 val = lower_32_bits(counter);
221 __vcpu_sys_reg(vcpu, reg) = val;
223 if (kvm_pmu_pmc_is_chained(pmc))
224 __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
226 kvm_pmu_release_perf_event(pmc);
230 * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
231 * @vcpu: The vcpu pointer
234 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
237 struct kvm_pmu *pmu = &vcpu->arch.pmu;
239 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
244 * kvm_pmu_vcpu_reset - reset pmu state for cpu
245 * @vcpu: The vcpu pointer
248 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
250 unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
251 struct kvm_pmu *pmu = &vcpu->arch.pmu;
254 for_each_set_bit(i, &mask, 32)
255 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
257 bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
261 * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
262 * @vcpu: The vcpu pointer
265 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
268 struct kvm_pmu *pmu = &vcpu->arch.pmu;
270 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
271 kvm_pmu_release_perf_event(&pmu->pmc[i]);
274 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
276 u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
278 val &= ARMV8_PMU_PMCR_N_MASK;
280 return BIT(ARMV8_PMU_CYCLE_IDX);
282 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
286 * kvm_pmu_enable_counter_mask - enable selected PMU counters
287 * @vcpu: The vcpu pointer
288 * @val: the value guest writes to PMCNTENSET register
290 * Call perf_event_enable to start counting the perf event
292 void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
295 struct kvm_pmu *pmu = &vcpu->arch.pmu;
298 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
301 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
307 /* A change in the enable state may affect the chain state */
308 kvm_pmu_update_pmc_chained(vcpu, i);
309 kvm_pmu_create_perf_event(vcpu, i);
311 /* At this point, pmc must be the canonical */
312 if (pmc->perf_event) {
313 perf_event_enable(pmc->perf_event);
314 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
315 kvm_debug("fail to enable perf event\n");
321 * kvm_pmu_disable_counter_mask - disable selected PMU counters
322 * @vcpu: The vcpu pointer
323 * @val: the value guest writes to PMCNTENCLR register
325 * Call perf_event_disable to stop counting the perf event
327 void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
330 struct kvm_pmu *pmu = &vcpu->arch.pmu;
336 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
342 /* A change in the enable state may affect the chain state */
343 kvm_pmu_update_pmc_chained(vcpu, i);
344 kvm_pmu_create_perf_event(vcpu, i);
346 /* At this point, pmc must be the canonical */
348 perf_event_disable(pmc->perf_event);
352 static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
356 if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
357 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
358 reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
359 reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
360 reg &= kvm_pmu_valid_counter_mask(vcpu);
366 static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
368 struct kvm_pmu *pmu = &vcpu->arch.pmu;
371 if (!kvm_arm_pmu_v3_ready(vcpu))
374 overflow = !!kvm_pmu_overflow_status(vcpu);
375 if (pmu->irq_level == overflow)
378 pmu->irq_level = overflow;
380 if (likely(irqchip_in_kernel(vcpu->kvm))) {
381 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
382 pmu->irq_num, overflow, pmu);
387 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
389 struct kvm_pmu *pmu = &vcpu->arch.pmu;
390 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
391 bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
393 if (likely(irqchip_in_kernel(vcpu->kvm)))
396 return pmu->irq_level != run_level;
400 * Reflect the PMU overflow interrupt output level into the kvm_run structure
402 void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
404 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
406 /* Populate the timer bitmap for user space */
407 regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
408 if (vcpu->arch.pmu.irq_level)
409 regs->device_irq_level |= KVM_ARM_DEV_PMU;
413 * kvm_pmu_flush_hwstate - flush pmu state to cpu
414 * @vcpu: The vcpu pointer
416 * Check if the PMU has overflowed while we were running in the host, and inject
417 * an interrupt if that was the case.
419 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
421 kvm_pmu_update_state(vcpu);
425 * kvm_pmu_sync_hwstate - sync pmu state from cpu
426 * @vcpu: The vcpu pointer
428 * Check if the PMU has overflowed while we were running in the guest, and
429 * inject an interrupt if that was the case.
431 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
433 kvm_pmu_update_state(vcpu);
437 * When the perf event overflows, set the overflow status and inform the vcpu.
439 static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
440 struct perf_sample_data *data,
441 struct pt_regs *regs)
443 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
444 struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
445 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
449 cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
452 * Reset the sample period to the architectural limit,
453 * i.e. the point where the counter overflows.
455 period = -(local64_read(&perf_event->count));
457 if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
458 period &= GENMASK(31, 0);
460 local64_set(&perf_event->hw.period_left, 0);
461 perf_event->attr.sample_period = period;
462 perf_event->hw.sample_period = period;
464 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
466 if (kvm_pmu_overflow_status(vcpu)) {
467 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
471 cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
475 * kvm_pmu_software_increment - do software increment
476 * @vcpu: The vcpu pointer
477 * @val: the value guest writes to PMSWINC register
479 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
481 struct kvm_pmu *pmu = &vcpu->arch.pmu;
484 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
487 /* Weed out disabled counters */
488 val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
490 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
496 /* PMSWINC only applies to ... SW_INC! */
497 type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
498 type &= ARMV8_PMU_EVTYPE_EVENT;
499 if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
502 /* increment this even SW_INC counter */
503 reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
504 reg = lower_32_bits(reg);
505 __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
507 if (reg) /* no overflow on the low part */
510 if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
511 /* increment the high counter */
512 reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
513 reg = lower_32_bits(reg);
514 __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
515 if (!reg) /* mark overflow on the high counter */
516 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
518 /* mark overflow on low counter */
519 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
525 * kvm_pmu_handle_pmcr - handle PMCR register
526 * @vcpu: The vcpu pointer
527 * @val: the value guest writes to PMCR register
529 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
531 unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
534 if (val & ARMV8_PMU_PMCR_E) {
535 kvm_pmu_enable_counter_mask(vcpu,
536 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
538 kvm_pmu_disable_counter_mask(vcpu, mask);
541 if (val & ARMV8_PMU_PMCR_C)
542 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
544 if (val & ARMV8_PMU_PMCR_P) {
545 for_each_set_bit(i, &mask, 32)
546 kvm_pmu_set_counter_value(vcpu, i, 0);
550 static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
552 return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
553 (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
557 * kvm_pmu_create_perf_event - create a perf event for a counter
558 * @vcpu: The vcpu pointer
559 * @select_idx: The number of selected counter
561 static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
563 struct kvm_pmu *pmu = &vcpu->arch.pmu;
565 struct perf_event *event;
566 struct perf_event_attr attr;
567 u64 eventsel, counter, reg, data;
570 * For chained counters the event type and filtering attributes are
571 * obtained from the low/even counter. We also use this counter to
572 * determine if the event is enabled/disabled.
574 pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
576 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
577 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
578 data = __vcpu_sys_reg(vcpu, reg);
580 kvm_pmu_stop_counter(vcpu, pmc);
581 eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
583 /* Software increment event does't need to be backed by a perf event */
584 if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
585 pmc->idx != ARMV8_PMU_CYCLE_IDX)
588 memset(&attr, 0, sizeof(struct perf_event_attr));
589 attr.type = PERF_TYPE_RAW;
590 attr.size = sizeof(attr);
592 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
593 attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
594 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
595 attr.exclude_hv = 1; /* Don't count EL2 events */
596 attr.exclude_host = 1; /* Don't count host events */
597 attr.config = (pmc->idx == ARMV8_PMU_CYCLE_IDX) ?
598 ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
600 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
602 if (kvm_pmu_pmc_is_chained(pmc)) {
604 * The initial sample period (overflow count) of an event. For
605 * chained counters we only support overflow interrupts on the
608 attr.sample_period = (-counter) & GENMASK(63, 0);
609 attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
611 event = perf_event_create_kernel_counter(&attr, -1, current,
612 kvm_pmu_perf_overflow,
615 /* The initial sample period (overflow count) of an event. */
616 if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
617 attr.sample_period = (-counter) & GENMASK(63, 0);
619 attr.sample_period = (-counter) & GENMASK(31, 0);
621 event = perf_event_create_kernel_counter(&attr, -1, current,
622 kvm_pmu_perf_overflow, pmc);
626 pr_err_once("kvm: pmu event creation failed %ld\n",
631 pmc->perf_event = event;
635 * kvm_pmu_update_pmc_chained - update chained bitmap
636 * @vcpu: The vcpu pointer
637 * @select_idx: The number of selected counter
639 * Update the chained bitmap based on the event type written in the
640 * typer register and the enable state of the odd register.
642 static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
644 struct kvm_pmu *pmu = &vcpu->arch.pmu;
645 struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc;
646 bool new_state, old_state;
648 old_state = kvm_pmu_pmc_is_chained(pmc);
649 new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) &&
650 kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1);
652 if (old_state == new_state)
655 canonical_pmc = kvm_pmu_get_canonical_pmc(pmc);
656 kvm_pmu_stop_counter(vcpu, canonical_pmc);
659 * During promotion from !chained to chained we must ensure
660 * the adjacent counter is stopped and its event destroyed
662 kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc));
663 set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
666 clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
670 * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
671 * @vcpu: The vcpu pointer
672 * @data: The data guest writes to PMXEVTYPER_EL0
673 * @select_idx: The number of selected counter
675 * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
676 * event with given hardware event number. Here we call perf_event API to
677 * emulate this action and create a kernel perf event for it.
679 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
682 u64 reg, event_type = data & ARMV8_PMU_EVTYPE_MASK;
684 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
685 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
687 __vcpu_sys_reg(vcpu, reg) = event_type;
689 kvm_pmu_update_pmc_chained(vcpu, select_idx);
690 kvm_pmu_create_perf_event(vcpu, select_idx);
693 bool kvm_arm_support_pmu_v3(void)
696 * Check if HW_PERF_EVENTS are supported by checking the number of
697 * hardware performance counters. This could ensure the presence of
698 * a physical PMU and CONFIG_PERF_EVENT is selected.
700 return (perf_num_counters() > 0);
703 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
705 if (!vcpu->arch.pmu.created)
709 * A valid interrupt configuration for the PMU is either to have a
710 * properly configured interrupt number and using an in-kernel
711 * irqchip, or to not have an in-kernel GIC and not set an IRQ.
713 if (irqchip_in_kernel(vcpu->kvm)) {
714 int irq = vcpu->arch.pmu.irq_num;
715 if (!kvm_arm_pmu_irq_initialized(vcpu))
719 * If we are using an in-kernel vgic, at this point we know
720 * the vgic will be initialized, so we can check the PMU irq
721 * number against the dimensions of the vgic and make sure
724 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
726 } else if (kvm_arm_pmu_irq_initialized(vcpu)) {
730 kvm_pmu_vcpu_reset(vcpu);
731 vcpu->arch.pmu.ready = true;
736 static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
738 if (!kvm_arm_support_pmu_v3())
741 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
744 if (vcpu->arch.pmu.created)
747 if (irqchip_in_kernel(vcpu->kvm)) {
751 * If using the PMU with an in-kernel virtual GIC
752 * implementation, we require the GIC to be already
753 * initialized when initializing the PMU.
755 if (!vgic_initialized(vcpu->kvm))
758 if (!kvm_arm_pmu_irq_initialized(vcpu))
761 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
767 vcpu->arch.pmu.created = true;
772 * For one VM the interrupt type must be same for each vcpu.
773 * As a PPI, the interrupt number is the same for all vcpus,
774 * while as an SPI it must be a separate number per vcpu.
776 static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
779 struct kvm_vcpu *vcpu;
781 kvm_for_each_vcpu(i, vcpu, kvm) {
782 if (!kvm_arm_pmu_irq_initialized(vcpu))
785 if (irq_is_ppi(irq)) {
786 if (vcpu->arch.pmu.irq_num != irq)
789 if (vcpu->arch.pmu.irq_num == irq)
797 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
799 switch (attr->attr) {
800 case KVM_ARM_VCPU_PMU_V3_IRQ: {
801 int __user *uaddr = (int __user *)(long)attr->addr;
804 if (!irqchip_in_kernel(vcpu->kvm))
807 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
810 if (get_user(irq, uaddr))
813 /* The PMU overflow interrupt can be a PPI or a valid SPI. */
814 if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
817 if (!pmu_irq_is_valid(vcpu->kvm, irq))
820 if (kvm_arm_pmu_irq_initialized(vcpu))
823 kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
824 vcpu->arch.pmu.irq_num = irq;
827 case KVM_ARM_VCPU_PMU_V3_INIT:
828 return kvm_arm_pmu_v3_init(vcpu);
834 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
836 switch (attr->attr) {
837 case KVM_ARM_VCPU_PMU_V3_IRQ: {
838 int __user *uaddr = (int __user *)(long)attr->addr;
841 if (!irqchip_in_kernel(vcpu->kvm))
844 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
847 if (!kvm_arm_pmu_irq_initialized(vcpu))
850 irq = vcpu->arch.pmu.irq_num;
851 return put_user(irq, uaddr);
858 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
860 switch (attr->attr) {
861 case KVM_ARM_VCPU_PMU_V3_IRQ:
862 case KVM_ARM_VCPU_PMU_V3_INIT:
863 if (kvm_arm_support_pmu_v3() &&
864 test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))