Merge tag 's390-5.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[linux-2.6-microblaze.git] / arch / x86 / events / perf_event.h
index 1e98a42..07fc84b 100644 (file)
@@ -49,28 +49,33 @@ struct event_constraint {
                unsigned long   idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
                u64             idxmsk64;
        };
-       u64     code;
-       u64     cmask;
-       int     weight;
-       int     overlap;
-       int     flags;
+       u64             code;
+       u64             cmask;
+       int             weight;
+       int             overlap;
+       int             flags;
+       unsigned int    size;
 };
+
+static inline bool constraint_match(struct event_constraint *c, u64 ecode)
+{
+       return ((ecode & c->cmask) - c->code) <= (u64)c->size;
+}
+
 /*
  * struct hw_perf_event.flags flags
  */
 #define PERF_X86_EVENT_PEBS_LDLAT      0x0001 /* ld+ldlat data address sampling */
 #define PERF_X86_EVENT_PEBS_ST         0x0002 /* st data address sampling */
 #define PERF_X86_EVENT_PEBS_ST_HSW     0x0004 /* haswell style datala, store */
-#define PERF_X86_EVENT_COMMITTED       0x0008 /* event passed commit_txn */
-#define PERF_X86_EVENT_PEBS_LD_HSW     0x0010 /* haswell style datala, load */
-#define PERF_X86_EVENT_PEBS_NA_HSW     0x0020 /* haswell style datala, unknown */
-#define PERF_X86_EVENT_EXCL            0x0040 /* HT exclusivity on counter */
-#define PERF_X86_EVENT_DYNAMIC         0x0080 /* dynamic alloc'd constraint */
-#define PERF_X86_EVENT_RDPMC_ALLOWED   0x0100 /* grant rdpmc permission */
-#define PERF_X86_EVENT_EXCL_ACCT       0x0200 /* accounted EXCL event */
-#define PERF_X86_EVENT_AUTO_RELOAD     0x0400 /* use PEBS auto-reload */
-#define PERF_X86_EVENT_LARGE_PEBS      0x0800 /* use large PEBS */
-
+#define PERF_X86_EVENT_PEBS_LD_HSW     0x0008 /* haswell style datala, load */
+#define PERF_X86_EVENT_PEBS_NA_HSW     0x0010 /* haswell style datala, unknown */
+#define PERF_X86_EVENT_EXCL            0x0020 /* HT exclusivity on counter */
+#define PERF_X86_EVENT_DYNAMIC         0x0040 /* dynamic alloc'd constraint */
+#define PERF_X86_EVENT_RDPMC_ALLOWED   0x0080 /* grant rdpmc permission */
+#define PERF_X86_EVENT_EXCL_ACCT       0x0100 /* accounted EXCL event */
+#define PERF_X86_EVENT_AUTO_RELOAD     0x0200 /* use PEBS auto-reload */
+#define PERF_X86_EVENT_LARGE_PEBS      0x0400 /* use large PEBS */
 
 struct amd_nb {
        int nb_id;  /* NorthBridge id */
@@ -116,6 +121,24 @@ struct amd_nb {
         (1ULL << PERF_REG_X86_R14)   | \
         (1ULL << PERF_REG_X86_R15))
 
+#define PEBS_XMM_REGS                   \
+       ((1ULL << PERF_REG_X86_XMM0)  | \
+        (1ULL << PERF_REG_X86_XMM1)  | \
+        (1ULL << PERF_REG_X86_XMM2)  | \
+        (1ULL << PERF_REG_X86_XMM3)  | \
+        (1ULL << PERF_REG_X86_XMM4)  | \
+        (1ULL << PERF_REG_X86_XMM5)  | \
+        (1ULL << PERF_REG_X86_XMM6)  | \
+        (1ULL << PERF_REG_X86_XMM7)  | \
+        (1ULL << PERF_REG_X86_XMM8)  | \
+        (1ULL << PERF_REG_X86_XMM9)  | \
+        (1ULL << PERF_REG_X86_XMM10) | \
+        (1ULL << PERF_REG_X86_XMM11) | \
+        (1ULL << PERF_REG_X86_XMM12) | \
+        (1ULL << PERF_REG_X86_XMM13) | \
+        (1ULL << PERF_REG_X86_XMM14) | \
+        (1ULL << PERF_REG_X86_XMM15))
+
 /*
  * Per register state.
  */
@@ -207,10 +230,16 @@ struct cpu_hw_events {
        int                     n_pebs;
        int                     n_large_pebs;
 
+       /* Current super set of events hardware configuration */
+       u64                     pebs_data_cfg;
+       u64                     active_pebs_data_cfg;
+       int                     pebs_record_size;
+
        /*
         * Intel LBR bits
         */
        int                             lbr_users;
+       int                             lbr_pebs_users;
        struct perf_branch_stack        lbr_stack;
        struct perf_branch_entry        lbr_entries[MAX_LBR_ENTRIES];
        struct er_account               *lbr_sel;
@@ -257,18 +286,29 @@ struct cpu_hw_events {
        void                            *kfree_on_online[X86_PERF_KFREE_MAX];
 };
 
-#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
+#define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) {        \
        { .idxmsk64 = (n) },            \
        .code = (c),                    \
+       .size = (e) - (c),              \
        .cmask = (m),                   \
        .weight = (w),                  \
        .overlap = (o),                 \
        .flags = f,                     \
 }
 
+#define __EVENT_CONSTRAINT(c, n, m, w, o, f) \
+       __EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f)
+
 #define EVENT_CONSTRAINT(c, n, m)      \
        __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
 
+/*
+ * The constraint_match() function only works for 'simple' event codes
+ * and not for extended (AMD64_EVENTSEL_EVENT) events codes.
+ */
+#define EVENT_CONSTRAINT_RANGE(c, e, n, m) \
+       __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0)
+
 #define INTEL_EXCLEVT_CONSTRAINT(c, n) \
        __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
                           0, PERF_X86_EVENT_EXCL)
@@ -303,6 +343,12 @@ struct cpu_hw_events {
 #define INTEL_EVENT_CONSTRAINT(c, n)   \
        EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
 
+/*
+ * Constraint on a range of Event codes
+ */
+#define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n)                  \
+       EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT)
+
 /*
  * Constraint on the Event code + UMask + fixed-mask
  *
@@ -350,6 +396,9 @@ struct cpu_hw_events {
 #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
        EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
 
+#define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n)                    \
+       EVENT_CONSTRAINT_RANGE(c, e, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
+
 /* Check only flags, but allow all event/umask */
 #define INTEL_ALL_EVENT_CONSTRAINT(code, n)    \
        EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
@@ -366,6 +415,11 @@ struct cpu_hw_events {
                          ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
 
+#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \
+       __EVENT_CONSTRAINT_RANGE(code, end, n,                          \
+                         ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
+                         HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
+
 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
        __EVENT_CONSTRAINT(code, n,                     \
                          ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
@@ -473,6 +527,7 @@ union perf_capabilities {
                 * values > 32bit.
                 */
                u64     full_width_write:1;
+               u64     pebs_baseline:1;
        };
        u64     capabilities;
 };
@@ -613,14 +668,16 @@ struct x86_pmu {
                        pebs_broken             :1,
                        pebs_prec_dist          :1,
                        pebs_no_tlb             :1,
-                       pebs_no_isolation       :1;
+                       pebs_no_isolation       :1,
+                       pebs_no_xmm_regs        :1;
        int             pebs_record_size;
        int             pebs_buffer_size;
+       int             max_pebs_events;
        void            (*drain_pebs)(struct pt_regs *regs);
        struct event_constraint *pebs_constraints;
        void            (*pebs_aliases)(struct perf_event *event);
-       int             max_pebs_events;
        unsigned long   large_pebs_flags;
+       u64             rtm_abort_event;
 
        /*
         * Intel LBR
@@ -714,6 +771,7 @@ static struct perf_pmu_events_ht_attr event_attr_##v = {            \
        .event_str_ht   = ht,                                           \
 }
 
+struct pmu *x86_get_pmu(void);
 extern struct x86_pmu x86_pmu __read_mostly;
 
 static inline bool x86_pmu_has_lbr_callstack(void)
@@ -941,6 +999,8 @@ extern struct event_constraint intel_bdw_pebs_event_constraints[];
 
 extern struct event_constraint intel_skl_pebs_event_constraints[];
 
+extern struct event_constraint intel_icl_pebs_event_constraints[];
+
 struct event_constraint *intel_pebs_constraints(struct perf_event *event);
 
 void intel_pmu_pebs_add(struct perf_event *event);
@@ -959,6 +1019,8 @@ void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
 
 void intel_pmu_auto_reload_read(struct perf_event *event);
 
+void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr);
+
 void intel_ds_init(void);
 
 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);