KVM: x86/pmu: Fix and isolate TSX-specific performance event logic
[linux-2.6-microblaze.git] / arch / x86 / kvm / vmx / pmu_intel.c
index 5e0ac57..efa172a 100644 (file)
@@ -21,7 +21,6 @@
 #define MSR_PMC_FULL_WIDTH_BIT      (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
 
 static struct kvm_event_hw_type_mapping intel_arch_events[] = {
-       /* Index must match CPUID 0x0A.EBX bit vector */
        [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
        [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
        [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES  },
@@ -29,6 +28,7 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = {
        [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
        [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
        [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
+       /* The above index must match CPUID 0x0A.EBX bit vector */
        [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
 };
 
@@ -75,11 +75,17 @@ static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
        u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
-               if (intel_arch_events[i].eventsel == event_select &&
-                   intel_arch_events[i].unit_mask == unit_mask &&
-                   (pmc_is_fixed(pmc) || pmu->available_event_types & (1 << i)))
-                       break;
+       for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) {
+               if (intel_arch_events[i].eventsel != event_select ||
+                   intel_arch_events[i].unit_mask != unit_mask)
+                       continue;
+
+               /* disable event that reported as not present by cpuid */
+               if ((i < 7) && !(pmu->available_event_types & (1 << i)))
+                       return PERF_COUNT_HW_MAX + 1;
+
+               break;
+       }
 
        if (i == ARRAY_SIZE(intel_arch_events))
                return PERF_COUNT_HW_MAX;
@@ -383,6 +389,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        struct kvm_pmc *pmc;
        u32 msr = msr_info->index;
        u64 data = msr_info->data;
+       u64 reserved_bits;
 
        switch (msr) {
        case MSR_CORE_PERF_FIXED_CTR_CTRL:
@@ -437,7 +444,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
                        if (data == pmc->eventsel)
                                return 0;
-                       if (!(data & pmu->reserved_bits)) {
+                       reserved_bits = pmu->reserved_bits;
+                       if ((pmc->idx == 2) &&
+                           (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
+                               reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
+                       if (!(data & reserved_bits)) {
                                reprogram_gp_counter(pmc, data);
                                return 0;
                        }
@@ -479,9 +490,10 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
        pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
        pmu->version = 0;
        pmu->reserved_bits = 0xffffffff00200000ull;
+       pmu->raw_event_mask = X86_RAW_EVENT_MASK;
 
        entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
-       if (!entry)
+       if (!entry || !vcpu->kvm->arch.enable_pmu)
                return;
        eax.full = entry->eax;
        edx.full = entry->edx;
@@ -527,15 +539,18 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
        entry = kvm_find_cpuid_entry(vcpu, 7, 0);
        if (entry &&
            (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
-           (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
-               pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
+           (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
+               pmu->reserved_bits ^= HSW_IN_TX;
+               pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
+       }
 
        bitmap_set(pmu->all_valid_pmc_idx,
                0, pmu->nr_arch_gp_counters);
        bitmap_set(pmu->all_valid_pmc_idx,
                INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
 
-       nested_vmx_pmu_entry_exit_ctls_update(vcpu);
+       nested_vmx_pmu_refresh(vcpu,
+                              intel_is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL));
 
        if (intel_pmu_lbr_is_compatible(vcpu))
                x86_perf_get_lbr(&lbr_desc->records);