Merge branch 'for-5.13' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[linux-2.6-microblaze.git] / arch / x86 / kvm / svm / pmu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM PMU support for AMD
4  *
5  * Copyright 2015, Red Hat, Inc. and/or its affiliates.
6  *
7  * Author:
8  *   Wei Huang <wei@redhat.com>
9  *
10  * Implementation is based on pmu_intel.c file
11  */
12 #include <linux/types.h>
13 #include <linux/kvm_host.h>
14 #include <linux/perf_event.h>
15 #include "x86.h"
16 #include "cpuid.h"
17 #include "lapic.h"
18 #include "pmu.h"
19
20 enum pmu_type {
21         PMU_TYPE_COUNTER = 0,
22         PMU_TYPE_EVNTSEL,
23 };
24
25 enum index {
26         INDEX_ZERO = 0,
27         INDEX_ONE,
28         INDEX_TWO,
29         INDEX_THREE,
30         INDEX_FOUR,
31         INDEX_FIVE,
32         INDEX_ERROR,
33 };
34
35 /* duplicated from amd_perfmon_event_map, K7 and above should work. */
36 static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
37         [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
38         [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
39         [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
40         [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
41         [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
42         [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
43         [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
44         [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
45 };
46
47 static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
48 {
49         struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
50
51         if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
52                 if (type == PMU_TYPE_COUNTER)
53                         return MSR_F15H_PERF_CTR;
54                 else
55                         return MSR_F15H_PERF_CTL;
56         } else {
57                 if (type == PMU_TYPE_COUNTER)
58                         return MSR_K7_PERFCTR0;
59                 else
60                         return MSR_K7_EVNTSEL0;
61         }
62 }
63
64 static enum index msr_to_index(u32 msr)
65 {
66         switch (msr) {
67         case MSR_F15H_PERF_CTL0:
68         case MSR_F15H_PERF_CTR0:
69         case MSR_K7_EVNTSEL0:
70         case MSR_K7_PERFCTR0:
71                 return INDEX_ZERO;
72         case MSR_F15H_PERF_CTL1:
73         case MSR_F15H_PERF_CTR1:
74         case MSR_K7_EVNTSEL1:
75         case MSR_K7_PERFCTR1:
76                 return INDEX_ONE;
77         case MSR_F15H_PERF_CTL2:
78         case MSR_F15H_PERF_CTR2:
79         case MSR_K7_EVNTSEL2:
80         case MSR_K7_PERFCTR2:
81                 return INDEX_TWO;
82         case MSR_F15H_PERF_CTL3:
83         case MSR_F15H_PERF_CTR3:
84         case MSR_K7_EVNTSEL3:
85         case MSR_K7_PERFCTR3:
86                 return INDEX_THREE;
87         case MSR_F15H_PERF_CTL4:
88         case MSR_F15H_PERF_CTR4:
89                 return INDEX_FOUR;
90         case MSR_F15H_PERF_CTL5:
91         case MSR_F15H_PERF_CTR5:
92                 return INDEX_FIVE;
93         default:
94                 return INDEX_ERROR;
95         }
96 }
97
98 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
99                                              enum pmu_type type)
100 {
101         struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
102
103         switch (msr) {
104         case MSR_F15H_PERF_CTL0:
105         case MSR_F15H_PERF_CTL1:
106         case MSR_F15H_PERF_CTL2:
107         case MSR_F15H_PERF_CTL3:
108         case MSR_F15H_PERF_CTL4:
109         case MSR_F15H_PERF_CTL5:
110                 if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
111                         return NULL;
112                 fallthrough;
113         case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
114                 if (type != PMU_TYPE_EVNTSEL)
115                         return NULL;
116                 break;
117         case MSR_F15H_PERF_CTR0:
118         case MSR_F15H_PERF_CTR1:
119         case MSR_F15H_PERF_CTR2:
120         case MSR_F15H_PERF_CTR3:
121         case MSR_F15H_PERF_CTR4:
122         case MSR_F15H_PERF_CTR5:
123                 if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
124                         return NULL;
125                 fallthrough;
126         case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
127                 if (type != PMU_TYPE_COUNTER)
128                         return NULL;
129                 break;
130         default:
131                 return NULL;
132         }
133
134         return &pmu->gp_counters[msr_to_index(msr)];
135 }
136
137 static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
138                                     u8 event_select,
139                                     u8 unit_mask)
140 {
141         int i;
142
143         for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
144                 if (amd_event_mapping[i].eventsel == event_select
145                     && amd_event_mapping[i].unit_mask == unit_mask)
146                         break;
147
148         if (i == ARRAY_SIZE(amd_event_mapping))
149                 return PERF_COUNT_HW_MAX;
150
151         return amd_event_mapping[i].event_type;
152 }
153
154 /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
155 static unsigned amd_find_fixed_event(int idx)
156 {
157         return PERF_COUNT_HW_MAX;
158 }
159
160 /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
161  * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
162  */
163 static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
164 {
165         return true;
166 }
167
168 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
169 {
170         unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
171         struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
172
173         if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
174                 /*
175                  * The idx is contiguous. The MSRs are not. The counter MSRs
176                  * are interleaved with the event select MSRs.
177                  */
178                 pmc_idx *= 2;
179         }
180
181         return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
182 }
183
184 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
185 static int amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
186 {
187         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
188
189         idx &= ~(3u << 30);
190
191         return (idx >= pmu->nr_arch_gp_counters);
192 }
193
194 /* idx is the ECX register of RDPMC instruction */
195 static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
196         unsigned int idx, u64 *mask)
197 {
198         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
199         struct kvm_pmc *counters;
200
201         idx &= ~(3u << 30);
202         if (idx >= pmu->nr_arch_gp_counters)
203                 return NULL;
204         counters = pmu->gp_counters;
205
206         return &counters[idx];
207 }
208
209 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
210 {
211         /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough.  */
212         return false;
213 }
214
215 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
216 {
217         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
218         struct kvm_pmc *pmc;
219
220         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
221         pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
222
223         return pmc;
224 }
225
226 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
227 {
228         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
229         struct kvm_pmc *pmc;
230         u32 msr = msr_info->index;
231
232         /* MSR_PERFCTRn */
233         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
234         if (pmc) {
235                 msr_info->data = pmc_read_counter(pmc);
236                 return 0;
237         }
238         /* MSR_EVNTSELn */
239         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
240         if (pmc) {
241                 msr_info->data = pmc->eventsel;
242                 return 0;
243         }
244
245         return 1;
246 }
247
248 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
249 {
250         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
251         struct kvm_pmc *pmc;
252         u32 msr = msr_info->index;
253         u64 data = msr_info->data;
254
255         /* MSR_PERFCTRn */
256         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
257         if (pmc) {
258                 pmc->counter += data - pmc_read_counter(pmc);
259                 return 0;
260         }
261         /* MSR_EVNTSELn */
262         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
263         if (pmc) {
264                 if (data == pmc->eventsel)
265                         return 0;
266                 if (!(data & pmu->reserved_bits)) {
267                         reprogram_gp_counter(pmc, data);
268                         return 0;
269                 }
270         }
271
272         return 1;
273 }
274
275 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
276 {
277         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
278
279         if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
280                 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
281         else
282                 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
283
284         pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
285         pmu->reserved_bits = 0xffffffff00200000ull;
286         pmu->version = 1;
287         /* not applicable to AMD; but clean them to prevent any fall out */
288         pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
289         pmu->nr_arch_fixed_counters = 0;
290         pmu->global_status = 0;
291         bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
292 }
293
294 static void amd_pmu_init(struct kvm_vcpu *vcpu)
295 {
296         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
297         int i;
298
299         BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
300
301         for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
302                 pmu->gp_counters[i].type = KVM_PMC_GP;
303                 pmu->gp_counters[i].vcpu = vcpu;
304                 pmu->gp_counters[i].idx = i;
305                 pmu->gp_counters[i].current_config = 0;
306         }
307 }
308
309 static void amd_pmu_reset(struct kvm_vcpu *vcpu)
310 {
311         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
312         int i;
313
314         for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
315                 struct kvm_pmc *pmc = &pmu->gp_counters[i];
316
317                 pmc_stop_counter(pmc);
318                 pmc->counter = pmc->eventsel = 0;
319         }
320 }
321
322 struct kvm_pmu_ops amd_pmu_ops = {
323         .find_arch_event = amd_find_arch_event,
324         .find_fixed_event = amd_find_fixed_event,
325         .pmc_is_enabled = amd_pmc_is_enabled,
326         .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
327         .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
328         .msr_idx_to_pmc = amd_msr_idx_to_pmc,
329         .is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
330         .is_valid_msr = amd_is_valid_msr,
331         .get_msr = amd_pmu_get_msr,
332         .set_msr = amd_pmu_set_msr,
333         .refresh = amd_pmu_refresh,
334         .init = amd_pmu_init,
335         .reset = amd_pmu_reset,
336 };