perf/x86: Hybrid PMU support for intel_ctrl
[linux-2.6-microblaze.git] / arch / x86 / events / perf_event.h
index 53b2b5f..557c674 100644 (file)
@@ -228,7 +228,6 @@ struct cpu_hw_events {
         */
        struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
        unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
-       unsigned long           running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
        int                     enabled;
 
        int                     n_events; /* the # of events in the below arrays */
@@ -327,6 +326,8 @@ struct cpu_hw_events {
        int                             n_pair; /* Large increment events */
 
        void                            *kfree_on_online[X86_PERF_KFREE_MAX];
+
+       struct pmu                      *pmu;
 };
 
 #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) {        \
@@ -630,6 +631,30 @@ enum {
        x86_lbr_exclusive_max,
 };
 
+struct x86_hybrid_pmu {
+       struct pmu                      pmu;
+       union perf_capabilities         intel_cap;
+       u64                             intel_ctrl;
+};
+
+static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)
+{
+       return container_of(pmu, struct x86_hybrid_pmu, pmu);
+}
+
+extern struct static_key_false perf_is_hybrid;
+#define is_hybrid()            static_branch_unlikely(&perf_is_hybrid)
+
+#define hybrid(_pmu, _field)                           \
+(*({                                                   \
+       typeof(&x86_pmu._field) __Fp = &x86_pmu._field; \
+                                                       \
+       if (is_hybrid() && (_pmu))                      \
+               __Fp = &hybrid_pmu(_pmu)->_field;       \
+                                                       \
+       __Fp;                                           \
+}))
+
 /*
  * struct x86_pmu - generic x86 pmu
  */
@@ -816,6 +841,16 @@ struct x86_pmu {
        int (*check_period) (struct perf_event *event, u64 period);
 
        int (*aux_output_match) (struct perf_event *event);
+
+       /*
+        * Hybrid support
+        *
+        * Most PMU capabilities are the same among different hybrid PMUs.
+        * The global x86_pmu saves the architecture capabilities, which
+        * are available for all PMUs. The hybrid_pmu only includes the
+        * unique capabilities.
+        */
+       struct x86_hybrid_pmu           *hybrid_pmu;
 };
 
 struct x86_perf_task_context_opt {
@@ -905,7 +940,7 @@ static struct perf_pmu_events_ht_attr event_attr_##v = {            \
        .event_str_ht   = ht,                                           \
 }
 
-struct pmu *x86_get_pmu(void);
+struct pmu *x86_get_pmu(unsigned int cpu);
 extern struct x86_pmu x86_pmu __read_mostly;
 
 static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
@@ -964,6 +999,9 @@ static inline int x86_pmu_rdpmc_index(int index)
        return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
 }
 
+bool check_hw_exists(struct pmu *pmu, int num_counters,
+                    int num_counters_fixed);
+
 int x86_add_exclusive(unsigned int what);
 
 void x86_del_exclusive(unsigned int what);
@@ -1068,9 +1106,11 @@ ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
 ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
                          char *page);
 
-static inline bool fixed_counter_disabled(int i)
+static inline bool fixed_counter_disabled(int i, struct pmu *pmu)
 {
-       return !(x86_pmu.intel_ctrl >> (i + INTEL_PMC_IDX_FIXED));
+       u64 intel_ctrl = hybrid(pmu, intel_ctrl);
+
+       return !(intel_ctrl >> (i + INTEL_PMC_IDX_FIXED));
 }
 
 #ifdef CONFIG_CPU_SUP_AMD