perf/x86: Hybrid PMU support for intel_ctrl
[linux-2.6-microblaze.git] / arch / x86 / events / intel / core.c
index dc9e2fb..2d56055 100644 (file)
@@ -2153,10 +2153,11 @@ static void intel_pmu_disable_all(void)
 static void __intel_pmu_enable_all(int added, bool pmi)
 {
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
 
        intel_pmu_lbr_enable_all(pmi);
        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
-                       x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
+              intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
 
        if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
                struct perf_event *event =
@@ -2709,6 +2710,7 @@ int intel_pmu_save_and_restart(struct perf_event *event)
 static void intel_pmu_reset(void)
 {
        struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        unsigned long flags;
        int idx;
 
@@ -2724,7 +2726,7 @@ static void intel_pmu_reset(void)
                wrmsrl_safe(x86_pmu_event_addr(idx),  0ull);
        }
        for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
-               if (fixed_counter_disabled(idx))
+               if (fixed_counter_disabled(idx, cpuc->pmu))
                        continue;
                wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
        }
@@ -2753,6 +2755,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        int bit;
        int handled = 0;
+       u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
 
        inc_irq_stat(apic_perf_irqs);
 
@@ -2798,7 +2801,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
 
                handled++;
                x86_pmu.drain_pebs(regs, &data);
-               status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
+               status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
 
                /*
                 * PMI throttle may be triggered, which stops the PEBS event.
@@ -3804,10 +3807,11 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
 {
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
+       u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
 
        arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
-       arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
-       arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
+       arr[0].host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
+       arr[0].guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask;
        if (x86_pmu.flags & PMU_FL_PEBS_ALL)
                arr[0].guest &= ~cpuc->pebs_enabled;
        else