1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM PMU support for Intel CPUs
5 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
8 * Avi Kivity <avi@redhat.com>
9 * Gleb Natapov <gleb@redhat.com>
11 #include <linux/types.h>
12 #include <linux/kvm_host.h>
13 #include <linux/perf_event.h>
14 #include <asm/perf_event.h>
20 static struct kvm_event_hw_type_mapping intel_arch_events[] = {
21 /* Index must match CPUID 0x0A.EBX bit vector */
22 [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
23 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
24 [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
25 [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
26 [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
27 [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
28 [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
29 [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
32 /* mapping between fixed pmc index and intel_arch_events array */
33 static int fixed_pmc_events[] = {1, 0, 7};
35 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
39 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
40 u8 new_ctrl = fixed_ctrl_field(data, i);
41 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
44 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
46 if (old_ctrl == new_ctrl)
49 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
50 reprogram_fixed_counter(pmc, new_ctrl, i);
53 pmu->fixed_ctr_ctrl = data;
56 /* function is called when global control register has been updated. */
57 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
60 u64 diff = pmu->global_ctrl ^ data;
62 pmu->global_ctrl = data;
64 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
65 reprogram_counter(pmu, bit);
68 static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
74 for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
75 if (intel_arch_events[i].eventsel == event_select
76 && intel_arch_events[i].unit_mask == unit_mask
77 && (pmu->available_event_types & (1 << i)))
80 if (i == ARRAY_SIZE(intel_arch_events))
81 return PERF_COUNT_HW_MAX;
83 return intel_arch_events[i].event_type;
86 static unsigned intel_find_fixed_event(int idx)
88 if (idx >= ARRAY_SIZE(fixed_pmc_events))
89 return PERF_COUNT_HW_MAX;
91 return intel_arch_events[fixed_pmc_events[idx]].event_type;
94 /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
95 static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
97 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
99 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
102 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
104 if (pmc_idx < INTEL_PMC_IDX_FIXED)
105 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
108 u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
110 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
114 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
115 static int intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
117 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
118 bool fixed = idx & (1u << 30);
122 return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
123 (fixed && idx >= pmu->nr_arch_fixed_counters);
126 static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
127 unsigned int idx, u64 *mask)
129 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
130 bool fixed = idx & (1u << 30);
131 struct kvm_pmc *counters;
134 if (!fixed && idx >= pmu->nr_arch_gp_counters)
136 if (fixed && idx >= pmu->nr_arch_fixed_counters)
138 counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
139 *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
141 return &counters[idx];
144 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
146 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
150 case MSR_CORE_PERF_FIXED_CTR_CTRL:
151 case MSR_CORE_PERF_GLOBAL_STATUS:
152 case MSR_CORE_PERF_GLOBAL_CTRL:
153 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
154 ret = pmu->version > 1;
157 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
158 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
159 get_fixed_pmc(pmu, msr);
166 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
168 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
171 pmc = get_fixed_pmc(pmu, msr);
172 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
173 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
178 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
180 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
184 case MSR_CORE_PERF_FIXED_CTR_CTRL:
185 *data = pmu->fixed_ctr_ctrl;
187 case MSR_CORE_PERF_GLOBAL_STATUS:
188 *data = pmu->global_status;
190 case MSR_CORE_PERF_GLOBAL_CTRL:
191 *data = pmu->global_ctrl;
193 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
194 *data = pmu->global_ovf_ctrl;
197 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
198 u64 val = pmc_read_counter(pmc);
199 *data = val & pmu->counter_bitmask[KVM_PMC_GP];
201 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
202 u64 val = pmc_read_counter(pmc);
203 *data = val & pmu->counter_bitmask[KVM_PMC_FIXED];
205 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
206 *data = pmc->eventsel;
214 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
216 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
218 u32 msr = msr_info->index;
219 u64 data = msr_info->data;
222 case MSR_CORE_PERF_FIXED_CTR_CTRL:
223 if (pmu->fixed_ctr_ctrl == data)
225 if (!(data & 0xfffffffffffff444ull)) {
226 reprogram_fixed_counters(pmu, data);
230 case MSR_CORE_PERF_GLOBAL_STATUS:
231 if (msr_info->host_initiated) {
232 pmu->global_status = data;
236 case MSR_CORE_PERF_GLOBAL_CTRL:
237 if (pmu->global_ctrl == data)
239 if (!(data & pmu->global_ctrl_mask)) {
240 global_ctrl_changed(pmu, data);
244 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
245 if (!(data & pmu->global_ovf_ctrl_mask)) {
246 if (!msr_info->host_initiated)
247 pmu->global_status &= ~data;
248 pmu->global_ovf_ctrl = data;
253 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
254 if (msr_info->host_initiated)
257 pmc->counter = (s32)data;
259 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
262 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
263 if (data == pmc->eventsel)
265 if (!(data & pmu->reserved_bits)) {
266 reprogram_gp_counter(pmc, data);
275 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
277 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
278 struct x86_pmu_capability x86_pmu;
279 struct kvm_cpuid_entry2 *entry;
280 union cpuid10_eax eax;
281 union cpuid10_edx edx;
283 pmu->nr_arch_gp_counters = 0;
284 pmu->nr_arch_fixed_counters = 0;
285 pmu->counter_bitmask[KVM_PMC_GP] = 0;
286 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
288 pmu->reserved_bits = 0xffffffff00200000ull;
290 entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
293 eax.full = entry->eax;
294 edx.full = entry->edx;
296 pmu->version = eax.split.version_id;
300 perf_get_x86_pmu_capability(&x86_pmu);
302 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
303 x86_pmu.num_counters_gp);
304 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
305 pmu->available_event_types = ~entry->ebx &
306 ((1ull << eax.split.mask_length) - 1);
308 if (pmu->version == 1) {
309 pmu->nr_arch_fixed_counters = 0;
311 pmu->nr_arch_fixed_counters =
312 min_t(int, edx.split.num_counters_fixed,
313 x86_pmu.num_counters_fixed);
314 pmu->counter_bitmask[KVM_PMC_FIXED] =
315 ((u64)1 << edx.split.bit_width_fixed) - 1;
318 pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
319 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
320 pmu->global_ctrl_mask = ~pmu->global_ctrl;
321 pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
322 & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
323 MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
324 if (kvm_x86_ops->pt_supported())
325 pmu->global_ovf_ctrl_mask &=
326 ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
328 entry = kvm_find_cpuid_entry(vcpu, 7, 0);
330 (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
331 (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
332 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
334 bitmap_set(pmu->all_valid_pmc_idx,
335 0, pmu->nr_arch_gp_counters);
336 bitmap_set(pmu->all_valid_pmc_idx,
337 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
340 static void intel_pmu_init(struct kvm_vcpu *vcpu)
343 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
345 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
346 pmu->gp_counters[i].type = KVM_PMC_GP;
347 pmu->gp_counters[i].vcpu = vcpu;
348 pmu->gp_counters[i].idx = i;
349 pmu->gp_counters[i].current_config = 0;
352 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
353 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
354 pmu->fixed_counters[i].vcpu = vcpu;
355 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
356 pmu->fixed_counters[i].current_config = 0;
360 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
362 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
363 struct kvm_pmc *pmc = NULL;
366 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
367 pmc = &pmu->gp_counters[i];
369 pmc_stop_counter(pmc);
370 pmc->counter = pmc->eventsel = 0;
373 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
374 pmc = &pmu->fixed_counters[i];
376 pmc_stop_counter(pmc);
380 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
381 pmu->global_ovf_ctrl = 0;
384 struct kvm_pmu_ops intel_pmu_ops = {
385 .find_arch_event = intel_find_arch_event,
386 .find_fixed_event = intel_find_fixed_event,
387 .pmc_is_enabled = intel_pmc_is_enabled,
388 .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
389 .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
390 .msr_idx_to_pmc = intel_msr_idx_to_pmc,
391 .is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx,
392 .is_valid_msr = intel_is_valid_msr,
393 .get_msr = intel_pmu_get_msr,
394 .set_msr = intel_pmu_set_msr,
395 .refresh = intel_pmu_refresh,
396 .init = intel_pmu_init,
397 .reset = intel_pmu_reset,