perf/x86: Hybrid PMU support for intel_ctrl
[linux-2.6-microblaze.git] / arch / x86 / events / perf_event.h
index da947d3..557c674 100644 (file)
@@ -631,6 +631,30 @@ enum {
        x86_lbr_exclusive_max,
 };
 
+struct x86_hybrid_pmu {
+       struct pmu                      pmu;
+       union perf_capabilities         intel_cap;
+       u64                             intel_ctrl;
+};
+
+static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)
+{
+       return container_of(pmu, struct x86_hybrid_pmu, pmu);
+}
+
+extern struct static_key_false perf_is_hybrid;
+#define is_hybrid()            static_branch_unlikely(&perf_is_hybrid)
+
+#define hybrid(_pmu, _field)                           \
+(*({                                                   \
+       typeof(&x86_pmu._field) __Fp = &x86_pmu._field; \
+                                                       \
+       if (is_hybrid() && (_pmu))                      \
+               __Fp = &hybrid_pmu(_pmu)->_field;       \
+                                                       \
+       __Fp;                                           \
+}))
+
 /*
  * struct x86_pmu - generic x86 pmu
  */
@@ -817,6 +841,16 @@ struct x86_pmu {
        int (*check_period) (struct perf_event *event, u64 period);
 
        int (*aux_output_match) (struct perf_event *event);
+
+       /*
+        * Hybrid support
+        *
+        * Most PMU capabilities are the same among different hybrid PMUs.
+        * The global x86_pmu saves the architecture capabilities, which
+        * are available for all PMUs. The hybrid_pmu only includes the
+        * unique capabilities.
+        */
+       struct x86_hybrid_pmu           *hybrid_pmu;
 };
 
 struct x86_perf_task_context_opt {
@@ -965,6 +999,9 @@ static inline int x86_pmu_rdpmc_index(int index)
        return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
 }
 
+bool check_hw_exists(struct pmu *pmu, int num_counters,
+                    int num_counters_fixed);
+
 int x86_add_exclusive(unsigned int what);
 
 void x86_del_exclusive(unsigned int what);
@@ -1069,9 +1106,11 @@ ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
 ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
                          char *page);
 
-static inline bool fixed_counter_disabled(int i)
+static inline bool fixed_counter_disabled(int i, struct pmu *pmu)
 {
-       return !(x86_pmu.intel_ctrl >> (i + INTEL_PMC_IDX_FIXED));
+       u64 intel_ctrl = hybrid(pmu, intel_ctrl);
+
+       return !(intel_ctrl >> (i + INTEL_PMC_IDX_FIXED));
 }
 
 #ifdef CONFIG_CPU_SUP_AMD