1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
5 * Copyright 2015 Red Hat, Inc. and/or its affiliates.
8 * Avi Kivity <avi@redhat.com>
9 * Gleb Natapov <gleb@redhat.com>
10 * Wei Huang <wei@redhat.com>
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/types.h>
15 #include <linux/kvm_host.h>
16 #include <linux/perf_event.h>
17 #include <linux/bsearch.h>
18 #include <linux/sort.h>
19 #include <asm/perf_event.h>
20 #include <asm/cpu_device_id.h>
26 /* This is enough to filter the vast majority of currently defined events. */
27 #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
29 struct x86_pmu_capability __read_mostly kvm_pmu_cap;
30 EXPORT_SYMBOL_GPL(kvm_pmu_cap);
32 static const struct x86_cpu_id vmx_icl_pebs_cpu[] = {
33 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
34 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL),
39 * - Each perf counter is defined as "struct kvm_pmc";
40 * - There are two types of perf counters: general purpose (gp) and fixed.
41 * gp counters are stored in gp_counters[] and fixed counters are stored
42 * in fixed_counters[] respectively. Both of them are part of "struct
44 * - pmu.c understands the difference between gp counters and fixed counters.
45 * However AMD doesn't support fixed-counters;
46 * - There are three types of index to access perf counters (PMC):
47 * 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
48 * has MSR_K7_PERFCTRn and, for families 15H and later,
49 * MSR_F15H_PERF_CTRn, where MSR_F15H_PERF_CTR[0-3] are
50 * aliased to MSR_K7_PERFCTRn.
51 * 2. MSR Index (named idx): This normally is used by RDPMC instruction.
52 * For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
53 * C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
54 * that it also supports fixed counters. idx can be used to as index to
55 * gp and fixed counters.
56 * 3. Global PMC Index (named pmc): pmc is an index specific to PMU
57 * code. Each pmc, stored in kvm_pmc.idx field, is unique across
58 * all perf counters (both gp and fixed). The mapping relationship
59 * between pmc and perf counters is as the following:
60 * * Intel: [0 .. KVM_INTEL_PMC_MAX_GENERIC-1] <=> gp counters
61 * [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
62 * * AMD: [0 .. AMD64_NUM_COUNTERS-1] and, for families 15H
63 * and later, [0 .. AMD64_NUM_COUNTERS_CORE-1] <=> gp counters
66 static struct kvm_pmu_ops kvm_pmu_ops __read_mostly;
68 #define KVM_X86_PMU_OP(func) \
69 DEFINE_STATIC_CALL_NULL(kvm_x86_pmu_##func, \
70 *(((struct kvm_pmu_ops *)0)->func));
71 #define KVM_X86_PMU_OP_OPTIONAL KVM_X86_PMU_OP
72 #include <asm/kvm-x86-pmu-ops.h>
74 void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
76 memcpy(&kvm_pmu_ops, pmu_ops, sizeof(kvm_pmu_ops));
78 #define __KVM_X86_PMU_OP(func) \
79 static_call_update(kvm_x86_pmu_##func, kvm_pmu_ops.func);
80 #define KVM_X86_PMU_OP(func) \
81 WARN_ON(!kvm_pmu_ops.func); __KVM_X86_PMU_OP(func)
82 #define KVM_X86_PMU_OP_OPTIONAL __KVM_X86_PMU_OP
83 #include <asm/kvm-x86-pmu-ops.h>
84 #undef __KVM_X86_PMU_OP
87 static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
89 return static_call(kvm_x86_pmu_pmc_is_enabled)(pmc);
92 static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
94 struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
95 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
97 kvm_pmu_deliver_pmi(vcpu);
100 static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
102 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
103 bool skip_pmi = false;
105 if (pmc->perf_event && pmc->perf_event->attr.precise_ip) {
108 * TODO: KVM is currently _choosing_ to not generate records
109 * for emulated instructions, avoiding BUFFER_OVF PMI when
110 * there are no records. Strictly speaking, it should be done
111 * as well in the right context to improve sampling accuracy.
115 /* Indicate PEBS overflow PMI to guest. */
116 skip_pmi = __test_and_set_bit(GLOBAL_STATUS_BUFFER_OVF_BIT,
117 (unsigned long *)&pmu->global_status);
120 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
123 if (!pmc->intr || skip_pmi)
127 * Inject PMI. If vcpu was in a guest mode during NMI PMI
128 * can be ejected on a guest mode re-entry. Otherwise we can't
129 * be sure that vcpu wasn't executing hlt instruction at the
130 * time of vmexit and is not going to re-enter guest mode until
131 * woken up. So we should wake it, but this is impossible from
132 * NMI context. Do it from irq work instead.
134 if (in_pmi && !kvm_handling_nmi_from_guest(pmc->vcpu))
135 irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
137 kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
140 static void kvm_perf_overflow(struct perf_event *perf_event,
141 struct perf_sample_data *data,
142 struct pt_regs *regs)
144 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
147 * Ignore overflow events for counters that are scheduled to be
148 * reprogrammed, e.g. if a PMI for the previous event races with KVM's
149 * handling of a related guest WRMSR.
151 if (test_and_set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi))
154 __kvm_perf_overflow(pmc, true);
156 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
159 static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config,
160 bool exclude_user, bool exclude_kernel,
163 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
164 struct perf_event *event;
165 struct perf_event_attr attr = {
167 .size = sizeof(attr),
169 .exclude_idle = true,
171 .exclude_user = exclude_user,
172 .exclude_kernel = exclude_kernel,
175 bool pebs = test_bit(pmc->idx, (unsigned long *)&pmu->pebs_enable);
177 attr.sample_period = get_sample_period(pmc, pmc->counter);
179 if ((attr.config & HSW_IN_TX_CHECKPOINTED) &&
180 guest_cpuid_is_intel(pmc->vcpu)) {
182 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
183 * period. Just clear the sample period so at least
184 * allocating the counter doesn't fail.
186 attr.sample_period = 0;
190 * The non-zero precision level of guest event makes the ordinary
191 * guest event becomes a guest PEBS event and triggers the host
192 * PEBS PMI handler to determine whether the PEBS overflow PMI
193 * comes from the host counters or the guest.
195 * For most PEBS hardware events, the difference in the software
196 * precision levels of guest and host PEBS events will not affect
197 * the accuracy of the PEBS profiling result, because the "event IP"
198 * in the PEBS record is calibrated on the guest side.
200 * On Icelake everything is fine. Other hardware (GLC+, TNT+) that
201 * could possibly care here is unsupported and needs changes.
204 if (x86_match_cpu(vmx_icl_pebs_cpu) && pmc->idx == 32)
208 event = perf_event_create_kernel_counter(&attr, -1, current,
209 kvm_perf_overflow, pmc);
211 pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
212 PTR_ERR(event), pmc->idx);
213 return PTR_ERR(event);
216 pmc->perf_event = event;
217 pmc_to_pmu(pmc)->event_count++;
218 pmc->is_paused = false;
219 pmc->intr = intr || pebs;
223 static void pmc_pause_counter(struct kvm_pmc *pmc)
225 u64 counter = pmc->counter;
227 if (!pmc->perf_event || pmc->is_paused)
230 /* update counter, reset event value to avoid redundant accumulation */
231 counter += perf_event_pause(pmc->perf_event, true);
232 pmc->counter = counter & pmc_bitmask(pmc);
233 pmc->is_paused = true;
236 static bool pmc_resume_counter(struct kvm_pmc *pmc)
238 if (!pmc->perf_event)
241 /* recalibrate sample period and check if it's accepted by perf core */
242 if (is_sampling_event(pmc->perf_event) &&
243 perf_event_period(pmc->perf_event,
244 get_sample_period(pmc, pmc->counter)))
247 if (test_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->pebs_enable) !=
248 (!!pmc->perf_event->attr.precise_ip))
251 /* reuse perf_event to serve as pmc_reprogram_counter() does*/
252 perf_event_enable(pmc->perf_event);
253 pmc->is_paused = false;
258 static int cmp_u64(const void *pa, const void *pb)
263 return (a > b) - (a < b);
266 static bool check_pmu_event_filter(struct kvm_pmc *pmc)
268 struct kvm_pmu_event_filter *filter;
269 struct kvm *kvm = pmc->vcpu->kvm;
270 bool allow_event = true;
274 if (!static_call(kvm_x86_pmu_hw_event_available)(pmc))
277 filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
281 if (pmc_is_gp(pmc)) {
282 key = pmc->eventsel & AMD64_RAW_EVENT_MASK_NB;
283 if (bsearch(&key, filter->events, filter->nevents,
284 sizeof(__u64), cmp_u64))
285 allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
287 allow_event = filter->action == KVM_PMU_EVENT_DENY;
289 idx = pmc->idx - INTEL_PMC_IDX_FIXED;
290 if (filter->action == KVM_PMU_EVENT_DENY &&
291 test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
293 if (filter->action == KVM_PMU_EVENT_ALLOW &&
294 !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
302 static void reprogram_counter(struct kvm_pmc *pmc)
304 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
305 u64 eventsel = pmc->eventsel;
306 u64 new_config = eventsel;
309 pmc_pause_counter(pmc);
311 if (!pmc_speculative_in_use(pmc) || !pmc_is_enabled(pmc))
312 goto reprogram_complete;
314 if (!check_pmu_event_filter(pmc))
315 goto reprogram_complete;
317 if (pmc->counter < pmc->prev_counter)
318 __kvm_perf_overflow(pmc, false);
320 if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
321 printk_once("kvm pmu: pin control bit is ignored\n");
323 if (pmc_is_fixed(pmc)) {
324 fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl,
325 pmc->idx - INTEL_PMC_IDX_FIXED);
326 if (fixed_ctr_ctrl & 0x1)
327 eventsel |= ARCH_PERFMON_EVENTSEL_OS;
328 if (fixed_ctr_ctrl & 0x2)
329 eventsel |= ARCH_PERFMON_EVENTSEL_USR;
330 if (fixed_ctr_ctrl & 0x8)
331 eventsel |= ARCH_PERFMON_EVENTSEL_INT;
332 new_config = (u64)fixed_ctr_ctrl;
335 if (pmc->current_config == new_config && pmc_resume_counter(pmc))
336 goto reprogram_complete;
338 pmc_release_perf_event(pmc);
340 pmc->current_config = new_config;
343 * If reprogramming fails, e.g. due to contention, leave the counter's
344 * regprogram bit set, i.e. opportunistically try again on the next PMU
345 * refresh. Don't make a new request as doing so can stall the guest
346 * if reprogramming repeatedly fails.
348 if (pmc_reprogram_counter(pmc, PERF_TYPE_RAW,
349 (eventsel & pmu->raw_event_mask),
350 !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
351 !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
352 eventsel & ARCH_PERFMON_EVENTSEL_INT))
356 clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
357 pmc->prev_counter = 0;
360 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
362 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
365 for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
366 struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, bit);
368 if (unlikely(!pmc)) {
369 clear_bit(bit, pmu->reprogram_pmi);
373 reprogram_counter(pmc);
377 * Unused perf_events are only released if the corresponding MSRs
378 * weren't accessed during the last vCPU time slice. kvm_arch_sched_in
379 * triggers KVM_REQ_PMU if cleanup is needed.
381 if (unlikely(pmu->need_cleanup))
382 kvm_pmu_cleanup(vcpu);
385 /* check if idx is a valid index to access PMU */
386 bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
388 return static_call(kvm_x86_pmu_is_valid_rdpmc_ecx)(vcpu, idx);
391 bool is_vmware_backdoor_pmc(u32 pmc_idx)
394 case VMWARE_BACKDOOR_PMC_HOST_TSC:
395 case VMWARE_BACKDOOR_PMC_REAL_TIME:
396 case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
402 static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
407 case VMWARE_BACKDOOR_PMC_HOST_TSC:
410 case VMWARE_BACKDOOR_PMC_REAL_TIME:
411 ctr_val = ktime_get_boottime_ns();
413 case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
414 ctr_val = ktime_get_boottime_ns() +
415 vcpu->kvm->arch.kvmclock_offset;
425 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
427 bool fast_mode = idx & (1u << 31);
428 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
430 u64 mask = fast_mode ? ~0u : ~0ull;
435 if (is_vmware_backdoor_pmc(idx))
436 return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
438 pmc = static_call(kvm_x86_pmu_rdpmc_ecx_to_pmc)(vcpu, idx, &mask);
442 if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
443 (static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
444 (kvm_read_cr0(vcpu) & X86_CR0_PE))
447 *data = pmc_read_counter(pmc) & mask;
451 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
453 if (lapic_in_kernel(vcpu)) {
454 static_call_cond(kvm_x86_pmu_deliver_pmi)(vcpu);
455 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
459 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
461 return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) ||
462 static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr);
465 static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
467 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
468 struct kvm_pmc *pmc = static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr);
471 __set_bit(pmc->idx, pmu->pmc_in_use);
474 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
476 return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
479 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
481 kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
482 return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
485 /* refresh PMU settings. This function generally is called when underlying
486 * settings are changed (such as changes of PMU CPUID by guest VMs), which
487 * should rarely happen.
489 void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
491 static_call(kvm_x86_pmu_refresh)(vcpu);
494 void kvm_pmu_reset(struct kvm_vcpu *vcpu)
496 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
498 irq_work_sync(&pmu->irq_work);
499 static_call(kvm_x86_pmu_reset)(vcpu);
502 void kvm_pmu_init(struct kvm_vcpu *vcpu)
504 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
506 memset(pmu, 0, sizeof(*pmu));
507 static_call(kvm_x86_pmu_init)(vcpu);
508 init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
509 pmu->event_count = 0;
510 pmu->need_cleanup = false;
511 kvm_pmu_refresh(vcpu);
514 /* Release perf_events for vPMCs that have been unused for a full time slice. */
515 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
517 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
518 struct kvm_pmc *pmc = NULL;
519 DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
522 pmu->need_cleanup = false;
524 bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
525 pmu->pmc_in_use, X86_PMC_IDX_MAX);
527 for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
528 pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
530 if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
531 pmc_stop_counter(pmc);
534 static_call_cond(kvm_x86_pmu_cleanup)(vcpu);
536 bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
539 void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
544 static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
546 pmc->prev_counter = pmc->counter;
547 pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
548 kvm_pmu_request_counter_reprogam(pmc);
551 static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
552 unsigned int perf_hw_id)
554 return !((pmc->eventsel ^ perf_get_hw_event_config(perf_hw_id)) &
555 AMD64_RAW_EVENT_MASK_NB);
558 static inline bool cpl_is_matched(struct kvm_pmc *pmc)
560 bool select_os, select_user;
563 if (pmc_is_gp(pmc)) {
564 config = pmc->eventsel;
565 select_os = config & ARCH_PERFMON_EVENTSEL_OS;
566 select_user = config & ARCH_PERFMON_EVENTSEL_USR;
568 config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl,
569 pmc->idx - INTEL_PMC_IDX_FIXED);
570 select_os = config & 0x1;
571 select_user = config & 0x2;
574 return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user;
577 void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
579 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
583 for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
584 pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
586 if (!pmc || !pmc_is_enabled(pmc) || !pmc_speculative_in_use(pmc))
589 /* Ignore checks for edge detect, pin control, invert and CMASK bits */
590 if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc))
591 kvm_pmu_incr_counter(pmc);
594 EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
596 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
598 struct kvm_pmu_event_filter tmp, *filter;
599 struct kvm_vcpu *vcpu;
604 if (copy_from_user(&tmp, argp, sizeof(tmp)))
607 if (tmp.action != KVM_PMU_EVENT_ALLOW &&
608 tmp.action != KVM_PMU_EVENT_DENY)
614 if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
617 size = struct_size(filter, events, tmp.nevents);
618 filter = kmalloc(size, GFP_KERNEL_ACCOUNT);
623 if (copy_from_user(filter, argp, size))
626 /* Ensure nevents can't be changed between the user copies. */
630 * Sort the in-kernel list so that we can search it with bsearch.
632 sort(&filter->events, filter->nevents, sizeof(__u64), cmp_u64, NULL);
634 mutex_lock(&kvm->lock);
635 filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
636 mutex_is_locked(&kvm->lock));
637 synchronize_srcu_expedited(&kvm->srcu);
639 BUILD_BUG_ON(sizeof(((struct kvm_pmu *)0)->reprogram_pmi) >
640 sizeof(((struct kvm_pmu *)0)->__reprogram_pmi));
642 kvm_for_each_vcpu(i, vcpu, kvm)
643 atomic64_set(&vcpu_to_pmu(vcpu)->__reprogram_pmi, -1ull);
645 kvm_make_all_cpus_request(kvm, KVM_REQ_PMU);
647 mutex_unlock(&kvm->lock);