1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM PMU support for AMD
5 * Copyright 2015, Red Hat, Inc. and/or its affiliates.
8 * Wei Huang <wei@redhat.com>
10 * Implementation is based on pmu_intel.c file
12 #include <linux/types.h>
13 #include <linux/kvm_host.h>
14 #include <linux/perf_event.h>
35 /* duplicated from amd_perfmon_event_map, K7 and above should work. */
36 static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
37 [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
38 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
39 [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
40 [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
41 [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
42 [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
43 [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
44 [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
47 static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
49 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
51 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
52 if (type == PMU_TYPE_COUNTER)
53 return MSR_F15H_PERF_CTR;
55 return MSR_F15H_PERF_CTL;
57 if (type == PMU_TYPE_COUNTER)
58 return MSR_K7_PERFCTR0;
60 return MSR_K7_EVNTSEL0;
64 static enum index msr_to_index(u32 msr)
67 case MSR_F15H_PERF_CTL0:
68 case MSR_F15H_PERF_CTR0:
72 case MSR_F15H_PERF_CTL1:
73 case MSR_F15H_PERF_CTR1:
77 case MSR_F15H_PERF_CTL2:
78 case MSR_F15H_PERF_CTR2:
82 case MSR_F15H_PERF_CTL3:
83 case MSR_F15H_PERF_CTR3:
87 case MSR_F15H_PERF_CTL4:
88 case MSR_F15H_PERF_CTR4:
90 case MSR_F15H_PERF_CTL5:
91 case MSR_F15H_PERF_CTR5:
98 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
101 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
104 case MSR_F15H_PERF_CTL0:
105 case MSR_F15H_PERF_CTL1:
106 case MSR_F15H_PERF_CTL2:
107 case MSR_F15H_PERF_CTL3:
108 case MSR_F15H_PERF_CTL4:
109 case MSR_F15H_PERF_CTL5:
110 if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
113 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
114 if (type != PMU_TYPE_EVNTSEL)
117 case MSR_F15H_PERF_CTR0:
118 case MSR_F15H_PERF_CTR1:
119 case MSR_F15H_PERF_CTR2:
120 case MSR_F15H_PERF_CTR3:
121 case MSR_F15H_PERF_CTR4:
122 case MSR_F15H_PERF_CTR5:
123 if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
126 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
127 if (type != PMU_TYPE_COUNTER)
134 return &pmu->gp_counters[msr_to_index(msr)];
137 static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
143 for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
144 if (amd_event_mapping[i].eventsel == event_select
145 && amd_event_mapping[i].unit_mask == unit_mask)
148 if (i == ARRAY_SIZE(amd_event_mapping))
149 return PERF_COUNT_HW_MAX;
151 return amd_event_mapping[i].event_type;
154 /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
155 static unsigned amd_find_fixed_event(int idx)
157 return PERF_COUNT_HW_MAX;
160 /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
161 * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
163 static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
168 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
170 unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
171 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
173 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
175 * The idx is contiguous. The MSRs are not. The counter MSRs
176 * are interleaved with the event select MSRs.
181 return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
184 static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
186 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
190 return idx < pmu->nr_arch_gp_counters;
193 /* idx is the ECX register of RDPMC instruction */
194 static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
195 unsigned int idx, u64 *mask)
197 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
198 struct kvm_pmc *counters;
201 if (idx >= pmu->nr_arch_gp_counters)
203 counters = pmu->gp_counters;
205 return &counters[idx];
208 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
210 /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */
214 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
216 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
219 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
220 pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
225 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
227 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
229 u32 msr = msr_info->index;
232 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
234 msr_info->data = pmc_read_counter(pmc);
238 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
240 msr_info->data = pmc->eventsel;
247 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
249 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
251 u32 msr = msr_info->index;
252 u64 data = msr_info->data;
255 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
257 pmc->counter += data - pmc_read_counter(pmc);
261 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
263 if (data == pmc->eventsel)
265 if (!(data & pmu->reserved_bits)) {
266 reprogram_gp_counter(pmc, data);
274 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
276 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
278 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
279 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
281 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
283 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
284 pmu->reserved_bits = 0xfffffff000280000ull;
286 /* not applicable to AMD; but clean them to prevent any fall out */
287 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
288 pmu->nr_arch_fixed_counters = 0;
289 pmu->global_status = 0;
290 bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
293 static void amd_pmu_init(struct kvm_vcpu *vcpu)
295 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
298 BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
300 for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
301 pmu->gp_counters[i].type = KVM_PMC_GP;
302 pmu->gp_counters[i].vcpu = vcpu;
303 pmu->gp_counters[i].idx = i;
304 pmu->gp_counters[i].current_config = 0;
308 static void amd_pmu_reset(struct kvm_vcpu *vcpu)
310 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
313 for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
314 struct kvm_pmc *pmc = &pmu->gp_counters[i];
316 pmc_stop_counter(pmc);
317 pmc->counter = pmc->eventsel = 0;
321 struct kvm_pmu_ops amd_pmu_ops = {
322 .find_arch_event = amd_find_arch_event,
323 .find_fixed_event = amd_find_fixed_event,
324 .pmc_is_enabled = amd_pmc_is_enabled,
325 .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
326 .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
327 .msr_idx_to_pmc = amd_msr_idx_to_pmc,
328 .is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
329 .is_valid_msr = amd_is_valid_msr,
330 .get_msr = amd_pmu_get_msr,
331 .set_msr = amd_pmu_set_msr,
332 .refresh = amd_pmu_refresh,
333 .init = amd_pmu_init,
334 .reset = amd_pmu_reset,