1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM PMU support for Intel CPUs
5 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
8 * Avi Kivity <avi@redhat.com>
9 * Gleb Natapov <gleb@redhat.com>
11 #include <linux/types.h>
12 #include <linux/kvm_host.h>
13 #include <linux/perf_event.h>
14 #include <asm/perf_event.h>
21 #define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
23 static struct kvm_event_hw_type_mapping intel_arch_events[] = {
24 /* Index must match CPUID 0x0A.EBX bit vector */
25 [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
26 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
27 [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
28 [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
29 [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
30 [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
31 [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
32 [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
35 /* mapping between fixed pmc index and intel_arch_events array */
36 static int fixed_pmc_events[] = {1, 0, 7};
38 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
42 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
43 u8 new_ctrl = fixed_ctrl_field(data, i);
44 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
47 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
49 if (old_ctrl == new_ctrl)
52 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
53 reprogram_fixed_counter(pmc, new_ctrl, i);
56 pmu->fixed_ctr_ctrl = data;
59 /* function is called when global control register has been updated. */
60 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
63 u64 diff = pmu->global_ctrl ^ data;
65 pmu->global_ctrl = data;
67 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
68 reprogram_counter(pmu, bit);
71 static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
73 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
74 u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
75 u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
78 for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
79 if (intel_arch_events[i].eventsel == event_select &&
80 intel_arch_events[i].unit_mask == unit_mask &&
81 (pmc_is_fixed(pmc) || pmu->available_event_types & (1 << i)))
84 if (i == ARRAY_SIZE(intel_arch_events))
85 return PERF_COUNT_HW_MAX;
87 return intel_arch_events[i].event_type;
90 /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
91 static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
93 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
95 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
98 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
100 if (pmc_idx < INTEL_PMC_IDX_FIXED)
101 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
104 u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
106 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
110 static bool intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
112 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
113 bool fixed = idx & (1u << 30);
117 return fixed ? idx < pmu->nr_arch_fixed_counters
118 : idx < pmu->nr_arch_gp_counters;
121 static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
122 unsigned int idx, u64 *mask)
124 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
125 bool fixed = idx & (1u << 30);
126 struct kvm_pmc *counters;
127 unsigned int num_counters;
131 counters = pmu->fixed_counters;
132 num_counters = pmu->nr_arch_fixed_counters;
134 counters = pmu->gp_counters;
135 num_counters = pmu->nr_arch_gp_counters;
137 if (idx >= num_counters)
139 *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
140 return &counters[array_index_nospec(idx, num_counters)];
143 static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
145 if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
148 return vcpu->arch.perf_capabilities;
151 static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
153 return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0;
156 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
158 if (!fw_writes_is_enabled(pmu_to_vcpu(pmu)))
161 return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
164 bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu)
167 * As a first step, a guest could only enable LBR feature if its
168 * cpu model is the same as the host because the LBR registers
169 * would be pass-through to the guest and they're model specific.
171 return boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
174 bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
176 struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
178 return lbr->nr && (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_LBR_FMT);
181 static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
183 struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
186 if (!intel_pmu_lbr_is_enabled(vcpu))
189 ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) ||
190 (index >= records->from && index < records->from + records->nr) ||
191 (index >= records->to && index < records->to + records->nr);
193 if (!ret && records->info)
194 ret = (index >= records->info && index < records->info + records->nr);
199 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
201 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
205 case MSR_CORE_PERF_FIXED_CTR_CTRL:
206 case MSR_CORE_PERF_GLOBAL_STATUS:
207 case MSR_CORE_PERF_GLOBAL_CTRL:
208 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
209 ret = pmu->version > 1;
212 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
213 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
214 get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) ||
215 intel_pmu_is_valid_lbr_msr(vcpu, msr);
222 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
224 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
227 pmc = get_fixed_pmc(pmu, msr);
228 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
229 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
234 static inline void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu)
236 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
238 if (lbr_desc->event) {
239 perf_event_release_kernel(lbr_desc->event);
240 lbr_desc->event = NULL;
241 vcpu_to_pmu(vcpu)->event_count--;
245 int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
247 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
248 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
249 struct perf_event *event;
252 * The perf_event_attr is constructed in the minimum efficient way:
253 * - set 'pinned = true' to make it task pinned so that if another
254 * cpu pinned event reclaims LBR, the event->oncpu will be set to -1;
255 * - set '.exclude_host = true' to record guest branches behavior;
257 * - set '.config = INTEL_FIXED_VLBR_EVENT' to indicates host perf
258 * schedule the event without a real HW counter but a fake one;
259 * check is_guest_lbr_event() and __intel_get_event_constraints();
261 * - set 'sample_type = PERF_SAMPLE_BRANCH_STACK' and
262 * 'branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
263 * PERF_SAMPLE_BRANCH_USER' to configure it as a LBR callstack
264 * event, which helps KVM to save/restore guest LBR records
265 * during host context switches and reduces quite a lot overhead,
266 * check branch_user_callstack() and intel_pmu_lbr_sched_task();
268 struct perf_event_attr attr = {
269 .type = PERF_TYPE_RAW,
270 .size = sizeof(attr),
271 .config = INTEL_FIXED_VLBR_EVENT,
272 .sample_type = PERF_SAMPLE_BRANCH_STACK,
274 .exclude_host = true,
275 .branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
276 PERF_SAMPLE_BRANCH_USER,
279 if (unlikely(lbr_desc->event)) {
280 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
284 event = perf_event_create_kernel_counter(&attr, -1,
285 current, NULL, NULL);
287 pr_debug_ratelimited("%s: failed %ld\n",
288 __func__, PTR_ERR(event));
289 return PTR_ERR(event);
291 lbr_desc->event = event;
293 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
298 * It's safe to access LBR msrs from guest when they have not
299 * been passthrough since the host would help restore or reset
300 * the LBR msrs records when the guest LBR event is scheduled in.
302 static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu,
303 struct msr_data *msr_info, bool read)
305 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
306 u32 index = msr_info->index;
308 if (!intel_pmu_is_valid_lbr_msr(vcpu, index))
311 if (!lbr_desc->event && intel_pmu_create_guest_lbr_event(vcpu) < 0)
315 * Disable irq to ensure the LBR feature doesn't get reclaimed by the
316 * host at the time the value is read from the msr, and this avoids the
317 * host LBR value to be leaked to the guest. If LBR has been reclaimed,
318 * return 0 on guest reads.
321 if (lbr_desc->event->state == PERF_EVENT_STATE_ACTIVE) {
323 rdmsrl(index, msr_info->data);
325 wrmsrl(index, msr_info->data);
326 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
330 clear_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
339 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
341 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
343 u32 msr = msr_info->index;
346 case MSR_CORE_PERF_FIXED_CTR_CTRL:
347 msr_info->data = pmu->fixed_ctr_ctrl;
349 case MSR_CORE_PERF_GLOBAL_STATUS:
350 msr_info->data = pmu->global_status;
352 case MSR_CORE_PERF_GLOBAL_CTRL:
353 msr_info->data = pmu->global_ctrl;
355 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
359 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
360 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
361 u64 val = pmc_read_counter(pmc);
363 val & pmu->counter_bitmask[KVM_PMC_GP];
365 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
366 u64 val = pmc_read_counter(pmc);
368 val & pmu->counter_bitmask[KVM_PMC_FIXED];
370 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
371 msr_info->data = pmc->eventsel;
373 } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true))
380 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
382 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
384 u32 msr = msr_info->index;
385 u64 data = msr_info->data;
388 case MSR_CORE_PERF_FIXED_CTR_CTRL:
389 if (pmu->fixed_ctr_ctrl == data)
391 if (!(data & 0xfffffffffffff444ull)) {
392 reprogram_fixed_counters(pmu, data);
396 case MSR_CORE_PERF_GLOBAL_STATUS:
397 if (msr_info->host_initiated) {
398 pmu->global_status = data;
402 case MSR_CORE_PERF_GLOBAL_CTRL:
403 if (pmu->global_ctrl == data)
405 if (kvm_valid_perf_global_ctrl(pmu, data)) {
406 global_ctrl_changed(pmu, data);
410 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
411 if (!(data & pmu->global_ovf_ctrl_mask)) {
412 if (!msr_info->host_initiated)
413 pmu->global_status &= ~data;
418 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
419 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
420 if ((msr & MSR_PMC_FULL_WIDTH_BIT) &&
421 (data & ~pmu->counter_bitmask[KVM_PMC_GP]))
423 if (!msr_info->host_initiated &&
424 !(msr & MSR_PMC_FULL_WIDTH_BIT))
425 data = (s64)(s32)data;
426 pmc->counter += data - pmc_read_counter(pmc);
427 if (pmc->perf_event && !pmc->is_paused)
428 perf_event_period(pmc->perf_event,
429 get_sample_period(pmc, data));
431 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
432 pmc->counter += data - pmc_read_counter(pmc);
433 if (pmc->perf_event && !pmc->is_paused)
434 perf_event_period(pmc->perf_event,
435 get_sample_period(pmc, data));
437 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
438 if (data == pmc->eventsel)
440 if (!(data & pmu->reserved_bits)) {
441 reprogram_gp_counter(pmc, data);
444 } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false))
451 static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu)
453 size_t size = ARRAY_SIZE(fixed_pmc_events);
458 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
459 pmc = &pmu->fixed_counters[i];
460 event = fixed_pmc_events[array_index_nospec(i, size)];
461 pmc->eventsel = (intel_arch_events[event].unit_mask << 8) |
462 intel_arch_events[event].eventsel;
466 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
468 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
469 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
471 struct x86_pmu_capability x86_pmu;
472 struct kvm_cpuid_entry2 *entry;
473 union cpuid10_eax eax;
474 union cpuid10_edx edx;
476 pmu->nr_arch_gp_counters = 0;
477 pmu->nr_arch_fixed_counters = 0;
478 pmu->counter_bitmask[KVM_PMC_GP] = 0;
479 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
481 pmu->reserved_bits = 0xffffffff00200000ull;
483 entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
486 eax.full = entry->eax;
487 edx.full = entry->edx;
489 pmu->version = eax.split.version_id;
493 perf_get_x86_pmu_capability(&x86_pmu);
495 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
496 x86_pmu.num_counters_gp);
497 eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
498 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
499 eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
500 pmu->available_event_types = ~entry->ebx &
501 ((1ull << eax.split.mask_length) - 1);
503 if (pmu->version == 1) {
504 pmu->nr_arch_fixed_counters = 0;
506 pmu->nr_arch_fixed_counters =
507 min3(ARRAY_SIZE(fixed_pmc_events),
508 (size_t) edx.split.num_counters_fixed,
509 (size_t) x86_pmu.num_counters_fixed);
510 edx.split.bit_width_fixed = min_t(int,
511 edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
512 pmu->counter_bitmask[KVM_PMC_FIXED] =
513 ((u64)1 << edx.split.bit_width_fixed) - 1;
514 setup_fixed_pmc_eventsel(pmu);
517 pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
518 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
519 pmu->global_ctrl_mask = ~pmu->global_ctrl;
520 pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
521 & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
522 MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
523 if (vmx_pt_mode_is_host_guest())
524 pmu->global_ovf_ctrl_mask &=
525 ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
527 entry = kvm_find_cpuid_entry(vcpu, 7, 0);
529 (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
530 (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
531 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
533 bitmap_set(pmu->all_valid_pmc_idx,
534 0, pmu->nr_arch_gp_counters);
535 bitmap_set(pmu->all_valid_pmc_idx,
536 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
538 nested_vmx_pmu_entry_exit_ctls_update(vcpu);
540 if (intel_pmu_lbr_is_compatible(vcpu))
541 x86_perf_get_lbr(&lbr_desc->records);
543 lbr_desc->records.nr = 0;
545 if (lbr_desc->records.nr)
546 bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
549 static void intel_pmu_init(struct kvm_vcpu *vcpu)
552 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
553 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
555 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
556 pmu->gp_counters[i].type = KVM_PMC_GP;
557 pmu->gp_counters[i].vcpu = vcpu;
558 pmu->gp_counters[i].idx = i;
559 pmu->gp_counters[i].current_config = 0;
562 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
563 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
564 pmu->fixed_counters[i].vcpu = vcpu;
565 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
566 pmu->fixed_counters[i].current_config = 0;
569 vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
570 lbr_desc->records.nr = 0;
571 lbr_desc->event = NULL;
572 lbr_desc->msr_passthrough = false;
575 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
577 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
578 struct kvm_pmc *pmc = NULL;
581 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
582 pmc = &pmu->gp_counters[i];
584 pmc_stop_counter(pmc);
585 pmc->counter = pmc->eventsel = 0;
588 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
589 pmc = &pmu->fixed_counters[i];
591 pmc_stop_counter(pmc);
595 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
597 intel_pmu_release_guest_lbr_event(vcpu);
601 * Emulate LBR_On_PMI behavior for 1 < pmu.version < 4.
603 * If Freeze_LBR_On_PMI = 1, the LBR is frozen on PMI and
604 * the KVM emulates to clear the LBR bit (bit 0) in IA32_DEBUGCTL.
606 * Guest needs to re-enable LBR to resume branches recording.
608 static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu)
610 u64 data = vmcs_read64(GUEST_IA32_DEBUGCTL);
612 if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) {
613 data &= ~DEBUGCTLMSR_LBR;
614 vmcs_write64(GUEST_IA32_DEBUGCTL, data);
618 static void intel_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
620 u8 version = vcpu_to_pmu(vcpu)->version;
622 if (!intel_pmu_lbr_is_enabled(vcpu))
625 if (version > 1 && version < 4)
626 intel_pmu_legacy_freezing_lbrs_on_pmi(vcpu);
629 static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
631 struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
634 for (i = 0; i < lbr->nr; i++) {
635 vmx_set_intercept_for_msr(vcpu, lbr->from + i, MSR_TYPE_RW, set);
636 vmx_set_intercept_for_msr(vcpu, lbr->to + i, MSR_TYPE_RW, set);
638 vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set);
641 vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set);
642 vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set);
645 static inline void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
647 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
649 if (!lbr_desc->msr_passthrough)
652 vmx_update_intercept_for_lbr_msrs(vcpu, true);
653 lbr_desc->msr_passthrough = false;
656 static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
658 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
660 if (lbr_desc->msr_passthrough)
663 vmx_update_intercept_for_lbr_msrs(vcpu, false);
664 lbr_desc->msr_passthrough = true;
668 * Higher priority host perf events (e.g. cpu pinned) could reclaim the
669 * pmu resources (e.g. LBR) that were assigned to the guest. This is
670 * usually done via ipi calls (more details in perf_install_in_context).
672 * Before entering the non-root mode (with irq disabled here), double
673 * confirm that the pmu features enabled to the guest are not reclaimed
674 * by higher priority host events. Otherwise, disallow vcpu's access to
675 * the reclaimed features.
677 void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
679 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
680 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
682 if (!lbr_desc->event) {
683 vmx_disable_lbr_msrs_passthrough(vcpu);
684 if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
686 if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
691 if (lbr_desc->event->state < PERF_EVENT_STATE_ACTIVE) {
692 vmx_disable_lbr_msrs_passthrough(vcpu);
693 __clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
696 vmx_enable_lbr_msrs_passthrough(vcpu);
701 pr_warn_ratelimited("kvm: vcpu-%d: fail to passthrough LBR.\n",
705 static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
707 if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
708 intel_pmu_release_guest_lbr_event(vcpu);
711 struct kvm_pmu_ops intel_pmu_ops = {
712 .pmc_perf_hw_id = intel_pmc_perf_hw_id,
713 .pmc_is_enabled = intel_pmc_is_enabled,
714 .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
715 .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
716 .msr_idx_to_pmc = intel_msr_idx_to_pmc,
717 .is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx,
718 .is_valid_msr = intel_is_valid_msr,
719 .get_msr = intel_pmu_get_msr,
720 .set_msr = intel_pmu_set_msr,
721 .refresh = intel_pmu_refresh,
722 .init = intel_pmu_init,
723 .reset = intel_pmu_reset,
724 .deliver_pmi = intel_pmu_deliver_pmi,
725 .cleanup = intel_pmu_cleanup,