e1a303fefc167104c0614879e8a5a440a23751d4
[linux-2.6-microblaze.git] / arch / x86 / kvm / vmx / pmu_intel.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM PMU support for Intel CPUs
4  *
5  * Copyright 2011 Red Hat, Inc. and/or its affiliates.
6  *
7  * Authors:
8  *   Avi Kivity   <avi@redhat.com>
9  *   Gleb Natapov <gleb@redhat.com>
10  */
11 #include <linux/types.h>
12 #include <linux/kvm_host.h>
13 #include <linux/perf_event.h>
14 #include <asm/perf_event.h>
15 #include "x86.h"
16 #include "cpuid.h"
17 #include "lapic.h"
18 #include "nested.h"
19 #include "pmu.h"
20
21 static struct kvm_event_hw_type_mapping intel_arch_events[] = {
22         /* Index must match CPUID 0x0A.EBX bit vector */
23         [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
24         [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
25         [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES  },
26         [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
27         [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
28         [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
29         [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
30         [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
31 };
32
33 /* mapping between fixed pmc index and intel_arch_events array */
34 static int fixed_pmc_events[] = {1, 0, 7};
35
36 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
37 {
38         int i;
39
40         for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
41                 u8 new_ctrl = fixed_ctrl_field(data, i);
42                 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
43                 struct kvm_pmc *pmc;
44
45                 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
46
47                 if (old_ctrl == new_ctrl)
48                         continue;
49
50                 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
51                 reprogram_fixed_counter(pmc, new_ctrl, i);
52         }
53
54         pmu->fixed_ctr_ctrl = data;
55 }
56
57 /* function is called when global control register has been updated. */
58 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
59 {
60         int bit;
61         u64 diff = pmu->global_ctrl ^ data;
62
63         pmu->global_ctrl = data;
64
65         for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
66                 reprogram_counter(pmu, bit);
67 }
68
69 static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
70                                       u8 event_select,
71                                       u8 unit_mask)
72 {
73         int i;
74
75         for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
76                 if (intel_arch_events[i].eventsel == event_select
77                     && intel_arch_events[i].unit_mask == unit_mask
78                     && (pmu->available_event_types & (1 << i)))
79                         break;
80
81         if (i == ARRAY_SIZE(intel_arch_events))
82                 return PERF_COUNT_HW_MAX;
83
84         return intel_arch_events[i].event_type;
85 }
86
87 static unsigned intel_find_fixed_event(int idx)
88 {
89         u32 event;
90         size_t size = ARRAY_SIZE(fixed_pmc_events);
91
92         if (idx >= size)
93                 return PERF_COUNT_HW_MAX;
94
95         event = fixed_pmc_events[array_index_nospec(idx, size)];
96         return intel_arch_events[event].event_type;
97 }
98
99 /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
100 static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
101 {
102         struct kvm_pmu *pmu = pmc_to_pmu(pmc);
103
104         return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
105 }
106
107 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
108 {
109         if (pmc_idx < INTEL_PMC_IDX_FIXED)
110                 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
111                                   MSR_P6_EVNTSEL0);
112         else {
113                 u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
114
115                 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
116         }
117 }
118
119 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
120 static int intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
121 {
122         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
123         bool fixed = idx & (1u << 30);
124
125         idx &= ~(3u << 30);
126
127         return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
128                 (fixed && idx >= pmu->nr_arch_fixed_counters);
129 }
130
131 static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
132                                             unsigned int idx, u64 *mask)
133 {
134         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
135         bool fixed = idx & (1u << 30);
136         struct kvm_pmc *counters;
137         unsigned int num_counters;
138
139         idx &= ~(3u << 30);
140         if (fixed) {
141                 counters = pmu->fixed_counters;
142                 num_counters = pmu->nr_arch_fixed_counters;
143         } else {
144                 counters = pmu->gp_counters;
145                 num_counters = pmu->nr_arch_gp_counters;
146         }
147         if (idx >= num_counters)
148                 return NULL;
149         *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
150         return &counters[array_index_nospec(idx, num_counters)];
151 }
152
153 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
154 {
155         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
156         int ret;
157
158         switch (msr) {
159         case MSR_CORE_PERF_FIXED_CTR_CTRL:
160         case MSR_CORE_PERF_GLOBAL_STATUS:
161         case MSR_CORE_PERF_GLOBAL_CTRL:
162         case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
163                 ret = pmu->version > 1;
164                 break;
165         default:
166                 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
167                         get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
168                         get_fixed_pmc(pmu, msr);
169                 break;
170         }
171
172         return ret;
173 }
174
175 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
176 {
177         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
178         struct kvm_pmc *pmc;
179
180         pmc = get_fixed_pmc(pmu, msr);
181         pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
182         pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
183
184         return pmc;
185 }
186
187 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
188 {
189         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
190         struct kvm_pmc *pmc;
191         u32 msr = msr_info->index;
192
193         switch (msr) {
194         case MSR_CORE_PERF_FIXED_CTR_CTRL:
195                 msr_info->data = pmu->fixed_ctr_ctrl;
196                 return 0;
197         case MSR_CORE_PERF_GLOBAL_STATUS:
198                 msr_info->data = pmu->global_status;
199                 return 0;
200         case MSR_CORE_PERF_GLOBAL_CTRL:
201                 msr_info->data = pmu->global_ctrl;
202                 return 0;
203         case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
204                 msr_info->data = pmu->global_ovf_ctrl;
205                 return 0;
206         default:
207                 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
208                         u64 val = pmc_read_counter(pmc);
209                         msr_info->data =
210                                 val & pmu->counter_bitmask[KVM_PMC_GP];
211                         return 0;
212                 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
213                         u64 val = pmc_read_counter(pmc);
214                         msr_info->data =
215                                 val & pmu->counter_bitmask[KVM_PMC_FIXED];
216                         return 0;
217                 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
218                         msr_info->data = pmc->eventsel;
219                         return 0;
220                 }
221         }
222
223         return 1;
224 }
225
226 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
227 {
228         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
229         struct kvm_pmc *pmc;
230         u32 msr = msr_info->index;
231         u64 data = msr_info->data;
232
233         switch (msr) {
234         case MSR_CORE_PERF_FIXED_CTR_CTRL:
235                 if (pmu->fixed_ctr_ctrl == data)
236                         return 0;
237                 if (!(data & 0xfffffffffffff444ull)) {
238                         reprogram_fixed_counters(pmu, data);
239                         return 0;
240                 }
241                 break;
242         case MSR_CORE_PERF_GLOBAL_STATUS:
243                 if (msr_info->host_initiated) {
244                         pmu->global_status = data;
245                         return 0;
246                 }
247                 break; /* RO MSR */
248         case MSR_CORE_PERF_GLOBAL_CTRL:
249                 if (pmu->global_ctrl == data)
250                         return 0;
251                 if (kvm_valid_perf_global_ctrl(pmu, data)) {
252                         global_ctrl_changed(pmu, data);
253                         return 0;
254                 }
255                 break;
256         case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
257                 if (!(data & pmu->global_ovf_ctrl_mask)) {
258                         if (!msr_info->host_initiated)
259                                 pmu->global_status &= ~data;
260                         pmu->global_ovf_ctrl = data;
261                         return 0;
262                 }
263                 break;
264         default:
265                 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
266                         if (!msr_info->host_initiated)
267                                 data = (s64)(s32)data;
268                         pmc->counter += data - pmc_read_counter(pmc);
269                         if (pmc->perf_event)
270                                 perf_event_period(pmc->perf_event,
271                                                   get_sample_period(pmc, data));
272                         return 0;
273                 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
274                         pmc->counter += data - pmc_read_counter(pmc);
275                         if (pmc->perf_event)
276                                 perf_event_period(pmc->perf_event,
277                                                   get_sample_period(pmc, data));
278                         return 0;
279                 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
280                         if (data == pmc->eventsel)
281                                 return 0;
282                         if (!(data & pmu->reserved_bits)) {
283                                 reprogram_gp_counter(pmc, data);
284                                 return 0;
285                         }
286                 }
287         }
288
289         return 1;
290 }
291
292 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
293 {
294         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
295         struct x86_pmu_capability x86_pmu;
296         struct kvm_cpuid_entry2 *entry;
297         union cpuid10_eax eax;
298         union cpuid10_edx edx;
299
300         pmu->nr_arch_gp_counters = 0;
301         pmu->nr_arch_fixed_counters = 0;
302         pmu->counter_bitmask[KVM_PMC_GP] = 0;
303         pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
304         pmu->version = 0;
305         pmu->reserved_bits = 0xffffffff00200000ull;
306
307         entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
308         if (!entry)
309                 return;
310         eax.full = entry->eax;
311         edx.full = entry->edx;
312
313         pmu->version = eax.split.version_id;
314         if (!pmu->version)
315                 return;
316
317         perf_get_x86_pmu_capability(&x86_pmu);
318
319         pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
320                                          x86_pmu.num_counters_gp);
321         pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
322         pmu->available_event_types = ~entry->ebx &
323                                         ((1ull << eax.split.mask_length) - 1);
324
325         if (pmu->version == 1) {
326                 pmu->nr_arch_fixed_counters = 0;
327         } else {
328                 pmu->nr_arch_fixed_counters =
329                         min_t(int, edx.split.num_counters_fixed,
330                               x86_pmu.num_counters_fixed);
331                 pmu->counter_bitmask[KVM_PMC_FIXED] =
332                         ((u64)1 << edx.split.bit_width_fixed) - 1;
333         }
334
335         pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
336                 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
337         pmu->global_ctrl_mask = ~pmu->global_ctrl;
338         pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
339                         & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
340                             MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
341         if (vmx_pt_mode_is_host_guest())
342                 pmu->global_ovf_ctrl_mask &=
343                                 ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
344
345         entry = kvm_find_cpuid_entry(vcpu, 7, 0);
346         if (entry &&
347             (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
348             (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
349                 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
350
351         bitmap_set(pmu->all_valid_pmc_idx,
352                 0, pmu->nr_arch_gp_counters);
353         bitmap_set(pmu->all_valid_pmc_idx,
354                 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
355
356         nested_vmx_pmu_entry_exit_ctls_update(vcpu);
357 }
358
359 static void intel_pmu_init(struct kvm_vcpu *vcpu)
360 {
361         int i;
362         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
363
364         for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
365                 pmu->gp_counters[i].type = KVM_PMC_GP;
366                 pmu->gp_counters[i].vcpu = vcpu;
367                 pmu->gp_counters[i].idx = i;
368                 pmu->gp_counters[i].current_config = 0;
369         }
370
371         for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
372                 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
373                 pmu->fixed_counters[i].vcpu = vcpu;
374                 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
375                 pmu->fixed_counters[i].current_config = 0;
376         }
377 }
378
379 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
380 {
381         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
382         struct kvm_pmc *pmc = NULL;
383         int i;
384
385         for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
386                 pmc = &pmu->gp_counters[i];
387
388                 pmc_stop_counter(pmc);
389                 pmc->counter = pmc->eventsel = 0;
390         }
391
392         for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
393                 pmc = &pmu->fixed_counters[i];
394
395                 pmc_stop_counter(pmc);
396                 pmc->counter = 0;
397         }
398
399         pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
400                 pmu->global_ovf_ctrl = 0;
401 }
402
403 struct kvm_pmu_ops intel_pmu_ops = {
404         .find_arch_event = intel_find_arch_event,
405         .find_fixed_event = intel_find_fixed_event,
406         .pmc_is_enabled = intel_pmc_is_enabled,
407         .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
408         .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
409         .msr_idx_to_pmc = intel_msr_idx_to_pmc,
410         .is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx,
411         .is_valid_msr = intel_is_valid_msr,
412         .get_msr = intel_pmu_get_msr,
413         .set_msr = intel_pmu_set_msr,
414         .refresh = intel_pmu_refresh,
415         .init = intel_pmu_init,
416         .reset = intel_pmu_reset,
417 };