4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/nmi.h>
17 #include <asm/cpufeature.h>
18 #include <asm/hardirq.h>
19 #include <asm/intel-family.h>
22 #include "../perf_event.h"
25 * Intel PerfMon, used on Core and later.
27 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
29 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
30 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
31 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
32 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
33 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
34 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
35 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
36 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
39 static struct event_constraint intel_core_event_constraints[] __read_mostly =
41 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
42 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
43 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
44 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
45 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
46 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
50 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
52 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
53 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
54 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
55 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
56 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
57 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
58 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
59 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
60 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
61 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
62 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
63 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
64 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
68 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
70 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
71 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
72 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
73 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
74 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
75 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
76 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
77 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
78 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
79 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
80 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
84 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
86 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
87 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
88 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
92 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
94 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
95 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
96 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
97 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
98 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
99 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
100 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
104 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
106 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
107 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
108 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
109 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
110 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
111 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
112 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
113 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
114 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
115 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
116 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
117 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
120 * When HT is off these events can only run on the bottom 4 counters
121 * When HT is on, they are impacted by the HT bug and require EXCL access
123 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
124 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
125 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
126 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
131 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
133 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
134 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
135 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
136 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
137 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
138 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
139 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
140 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
141 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
142 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
143 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
144 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
145 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
148 * When HT is off these events can only run on the bottom 4 counters
149 * When HT is on, they are impacted by the HT bug and require EXCL access
151 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
152 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
153 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
154 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
159 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
161 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
162 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
163 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
164 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
168 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
173 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
175 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
176 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
177 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
181 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
183 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
184 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
185 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
189 static struct event_constraint intel_skl_event_constraints[] = {
190 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
191 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
192 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
193 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
196 * when HT is off, these can only run on the bottom 4 counters
198 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
199 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
200 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
201 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
202 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
207 static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
208 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
209 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
213 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
214 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
215 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
216 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
217 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
221 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
222 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
223 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
224 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
225 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
229 static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
230 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
231 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
232 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
234 * Note the low 8 bits eventsel code is not a continuous field, containing
235 * some #GPing bits. These are masked out.
237 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
241 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
242 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
243 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
245 static struct attribute *nhm_mem_events_attrs[] = {
246 EVENT_PTR(mem_ld_nhm),
251 * topdown events for Intel Core CPUs.
253 * The events are all in slots, which is a free slot in a 4 wide
254 * pipeline. Some events are already reported in slots, for cycle
255 * events we multiply by the pipeline width (4).
257 * With Hyper Threading on, topdown metrics are either summed or averaged
258 * between the threads of a core: (count_t0 + count_t1).
260 * For the average case the metric is always scaled to pipeline width,
261 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
264 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
265 "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */
266 "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */
267 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
268 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
269 "event=0xe,umask=0x1"); /* uops_issued.any */
270 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
271 "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */
272 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
273 "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */
274 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
275 "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */
276 "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */
277 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
280 static struct attribute *snb_events_attrs[] = {
281 EVENT_PTR(td_slots_issued),
282 EVENT_PTR(td_slots_retired),
283 EVENT_PTR(td_fetch_bubbles),
284 EVENT_PTR(td_total_slots),
285 EVENT_PTR(td_total_slots_scale),
286 EVENT_PTR(td_recovery_bubbles),
287 EVENT_PTR(td_recovery_bubbles_scale),
291 static struct attribute *snb_mem_events_attrs[] = {
292 EVENT_PTR(mem_ld_snb),
293 EVENT_PTR(mem_st_snb),
297 static struct event_constraint intel_hsw_event_constraints[] = {
298 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
299 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
300 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
301 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
302 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
303 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
304 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
305 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
306 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
307 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
308 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
309 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
312 * When HT is off these events can only run on the bottom 4 counters
313 * When HT is on, they are impacted by the HT bug and require EXCL access
315 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
316 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
317 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
318 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
323 static struct event_constraint intel_bdw_event_constraints[] = {
324 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
325 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
326 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
327 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
328 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
330 * when HT is off, these can only run on the bottom 4 counters
332 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
333 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
334 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
335 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
339 static u64 intel_pmu_event_map(int hw_event)
341 return intel_perfmon_event_map[hw_event];
345 * Notes on the events:
346 * - data reads do not include code reads (comparable to earlier tables)
347 * - data counts include speculative execution (except L1 write, dtlb, bpu)
348 * - remote node access includes remote memory, remote cache, remote mmio.
349 * - prefetches are not included in the counts.
350 * - icache miss does not include decoded icache
353 #define SKL_DEMAND_DATA_RD BIT_ULL(0)
354 #define SKL_DEMAND_RFO BIT_ULL(1)
355 #define SKL_ANY_RESPONSE BIT_ULL(16)
356 #define SKL_SUPPLIER_NONE BIT_ULL(17)
357 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26)
358 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27)
359 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28)
360 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29)
361 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \
362 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
363 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
364 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
365 #define SKL_SPL_HIT BIT_ULL(30)
366 #define SKL_SNOOP_NONE BIT_ULL(31)
367 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32)
368 #define SKL_SNOOP_MISS BIT_ULL(33)
369 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34)
370 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35)
371 #define SKL_SNOOP_HITM BIT_ULL(36)
372 #define SKL_SNOOP_NON_DRAM BIT_ULL(37)
373 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \
374 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
375 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
376 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
377 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD
378 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \
379 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
380 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
381 SKL_SNOOP_HITM|SKL_SPL_HIT)
382 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO
383 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE
384 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
385 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
386 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
388 static __initconst const u64 skl_hw_cache_event_ids
389 [PERF_COUNT_HW_CACHE_MAX]
390 [PERF_COUNT_HW_CACHE_OP_MAX]
391 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
395 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
396 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
399 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
400 [ C(RESULT_MISS) ] = 0x0,
402 [ C(OP_PREFETCH) ] = {
403 [ C(RESULT_ACCESS) ] = 0x0,
404 [ C(RESULT_MISS) ] = 0x0,
409 [ C(RESULT_ACCESS) ] = 0x0,
410 [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */
413 [ C(RESULT_ACCESS) ] = -1,
414 [ C(RESULT_MISS) ] = -1,
416 [ C(OP_PREFETCH) ] = {
417 [ C(RESULT_ACCESS) ] = 0x0,
418 [ C(RESULT_MISS) ] = 0x0,
423 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
424 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
427 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
428 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
430 [ C(OP_PREFETCH) ] = {
431 [ C(RESULT_ACCESS) ] = 0x0,
432 [ C(RESULT_MISS) ] = 0x0,
437 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
438 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
441 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
442 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
444 [ C(OP_PREFETCH) ] = {
445 [ C(RESULT_ACCESS) ] = 0x0,
446 [ C(RESULT_MISS) ] = 0x0,
451 [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */
452 [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */
455 [ C(RESULT_ACCESS) ] = -1,
456 [ C(RESULT_MISS) ] = -1,
458 [ C(OP_PREFETCH) ] = {
459 [ C(RESULT_ACCESS) ] = -1,
460 [ C(RESULT_MISS) ] = -1,
465 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
466 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
469 [ C(RESULT_ACCESS) ] = -1,
470 [ C(RESULT_MISS) ] = -1,
472 [ C(OP_PREFETCH) ] = {
473 [ C(RESULT_ACCESS) ] = -1,
474 [ C(RESULT_MISS) ] = -1,
479 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
480 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
483 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
484 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
486 [ C(OP_PREFETCH) ] = {
487 [ C(RESULT_ACCESS) ] = 0x0,
488 [ C(RESULT_MISS) ] = 0x0,
493 static __initconst const u64 skl_hw_cache_extra_regs
494 [PERF_COUNT_HW_CACHE_MAX]
495 [PERF_COUNT_HW_CACHE_OP_MAX]
496 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
500 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
501 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
502 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
503 SKL_L3_MISS|SKL_ANY_SNOOP|
507 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
508 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
509 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
510 SKL_L3_MISS|SKL_ANY_SNOOP|
513 [ C(OP_PREFETCH) ] = {
514 [ C(RESULT_ACCESS) ] = 0x0,
515 [ C(RESULT_MISS) ] = 0x0,
520 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
521 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
522 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
523 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
526 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
527 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
528 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
529 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
531 [ C(OP_PREFETCH) ] = {
532 [ C(RESULT_ACCESS) ] = 0x0,
533 [ C(RESULT_MISS) ] = 0x0,
538 #define SNB_DMND_DATA_RD (1ULL << 0)
539 #define SNB_DMND_RFO (1ULL << 1)
540 #define SNB_DMND_IFETCH (1ULL << 2)
541 #define SNB_DMND_WB (1ULL << 3)
542 #define SNB_PF_DATA_RD (1ULL << 4)
543 #define SNB_PF_RFO (1ULL << 5)
544 #define SNB_PF_IFETCH (1ULL << 6)
545 #define SNB_LLC_DATA_RD (1ULL << 7)
546 #define SNB_LLC_RFO (1ULL << 8)
547 #define SNB_LLC_IFETCH (1ULL << 9)
548 #define SNB_BUS_LOCKS (1ULL << 10)
549 #define SNB_STRM_ST (1ULL << 11)
550 #define SNB_OTHER (1ULL << 15)
551 #define SNB_RESP_ANY (1ULL << 16)
552 #define SNB_NO_SUPP (1ULL << 17)
553 #define SNB_LLC_HITM (1ULL << 18)
554 #define SNB_LLC_HITE (1ULL << 19)
555 #define SNB_LLC_HITS (1ULL << 20)
556 #define SNB_LLC_HITF (1ULL << 21)
557 #define SNB_LOCAL (1ULL << 22)
558 #define SNB_REMOTE (0xffULL << 23)
559 #define SNB_SNP_NONE (1ULL << 31)
560 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
561 #define SNB_SNP_MISS (1ULL << 33)
562 #define SNB_NO_FWD (1ULL << 34)
563 #define SNB_SNP_FWD (1ULL << 35)
564 #define SNB_HITM (1ULL << 36)
565 #define SNB_NON_DRAM (1ULL << 37)
567 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
568 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
569 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
571 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
572 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
575 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
576 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
578 #define SNB_L3_ACCESS SNB_RESP_ANY
579 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
581 static __initconst const u64 snb_hw_cache_extra_regs
582 [PERF_COUNT_HW_CACHE_MAX]
583 [PERF_COUNT_HW_CACHE_OP_MAX]
584 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
588 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
589 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
592 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
593 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
595 [ C(OP_PREFETCH) ] = {
596 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
597 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
602 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
603 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
606 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
607 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
609 [ C(OP_PREFETCH) ] = {
610 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
611 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
616 static __initconst const u64 snb_hw_cache_event_ids
617 [PERF_COUNT_HW_CACHE_MAX]
618 [PERF_COUNT_HW_CACHE_OP_MAX]
619 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
623 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
624 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
627 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
628 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
630 [ C(OP_PREFETCH) ] = {
631 [ C(RESULT_ACCESS) ] = 0x0,
632 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
637 [ C(RESULT_ACCESS) ] = 0x0,
638 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
641 [ C(RESULT_ACCESS) ] = -1,
642 [ C(RESULT_MISS) ] = -1,
644 [ C(OP_PREFETCH) ] = {
645 [ C(RESULT_ACCESS) ] = 0x0,
646 [ C(RESULT_MISS) ] = 0x0,
651 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
652 [ C(RESULT_ACCESS) ] = 0x01b7,
653 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
654 [ C(RESULT_MISS) ] = 0x01b7,
657 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
658 [ C(RESULT_ACCESS) ] = 0x01b7,
659 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
660 [ C(RESULT_MISS) ] = 0x01b7,
662 [ C(OP_PREFETCH) ] = {
663 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
664 [ C(RESULT_ACCESS) ] = 0x01b7,
665 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
666 [ C(RESULT_MISS) ] = 0x01b7,
671 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
672 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
675 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
676 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
678 [ C(OP_PREFETCH) ] = {
679 [ C(RESULT_ACCESS) ] = 0x0,
680 [ C(RESULT_MISS) ] = 0x0,
685 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
686 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
689 [ C(RESULT_ACCESS) ] = -1,
690 [ C(RESULT_MISS) ] = -1,
692 [ C(OP_PREFETCH) ] = {
693 [ C(RESULT_ACCESS) ] = -1,
694 [ C(RESULT_MISS) ] = -1,
699 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
700 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
703 [ C(RESULT_ACCESS) ] = -1,
704 [ C(RESULT_MISS) ] = -1,
706 [ C(OP_PREFETCH) ] = {
707 [ C(RESULT_ACCESS) ] = -1,
708 [ C(RESULT_MISS) ] = -1,
713 [ C(RESULT_ACCESS) ] = 0x01b7,
714 [ C(RESULT_MISS) ] = 0x01b7,
717 [ C(RESULT_ACCESS) ] = 0x01b7,
718 [ C(RESULT_MISS) ] = 0x01b7,
720 [ C(OP_PREFETCH) ] = {
721 [ C(RESULT_ACCESS) ] = 0x01b7,
722 [ C(RESULT_MISS) ] = 0x01b7,
729 * Notes on the events:
730 * - data reads do not include code reads (comparable to earlier tables)
731 * - data counts include speculative execution (except L1 write, dtlb, bpu)
732 * - remote node access includes remote memory, remote cache, remote mmio.
733 * - prefetches are not included in the counts because they are not
737 #define HSW_DEMAND_DATA_RD BIT_ULL(0)
738 #define HSW_DEMAND_RFO BIT_ULL(1)
739 #define HSW_ANY_RESPONSE BIT_ULL(16)
740 #define HSW_SUPPLIER_NONE BIT_ULL(17)
741 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
742 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
743 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
744 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
745 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
746 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
747 HSW_L3_MISS_REMOTE_HOP2P)
748 #define HSW_SNOOP_NONE BIT_ULL(31)
749 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
750 #define HSW_SNOOP_MISS BIT_ULL(33)
751 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
752 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
753 #define HSW_SNOOP_HITM BIT_ULL(36)
754 #define HSW_SNOOP_NON_DRAM BIT_ULL(37)
755 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
756 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
757 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
758 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
759 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
760 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
761 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO
762 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
763 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
764 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE
766 #define BDW_L3_MISS_LOCAL BIT(26)
767 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
768 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
769 HSW_L3_MISS_REMOTE_HOP2P)
772 static __initconst const u64 hsw_hw_cache_event_ids
773 [PERF_COUNT_HW_CACHE_MAX]
774 [PERF_COUNT_HW_CACHE_OP_MAX]
775 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
779 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
780 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
783 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
784 [ C(RESULT_MISS) ] = 0x0,
786 [ C(OP_PREFETCH) ] = {
787 [ C(RESULT_ACCESS) ] = 0x0,
788 [ C(RESULT_MISS) ] = 0x0,
793 [ C(RESULT_ACCESS) ] = 0x0,
794 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
797 [ C(RESULT_ACCESS) ] = -1,
798 [ C(RESULT_MISS) ] = -1,
800 [ C(OP_PREFETCH) ] = {
801 [ C(RESULT_ACCESS) ] = 0x0,
802 [ C(RESULT_MISS) ] = 0x0,
807 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
808 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
811 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
812 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
814 [ C(OP_PREFETCH) ] = {
815 [ C(RESULT_ACCESS) ] = 0x0,
816 [ C(RESULT_MISS) ] = 0x0,
821 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
822 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
825 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
826 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
828 [ C(OP_PREFETCH) ] = {
829 [ C(RESULT_ACCESS) ] = 0x0,
830 [ C(RESULT_MISS) ] = 0x0,
835 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
836 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
839 [ C(RESULT_ACCESS) ] = -1,
840 [ C(RESULT_MISS) ] = -1,
842 [ C(OP_PREFETCH) ] = {
843 [ C(RESULT_ACCESS) ] = -1,
844 [ C(RESULT_MISS) ] = -1,
849 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
850 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
853 [ C(RESULT_ACCESS) ] = -1,
854 [ C(RESULT_MISS) ] = -1,
856 [ C(OP_PREFETCH) ] = {
857 [ C(RESULT_ACCESS) ] = -1,
858 [ C(RESULT_MISS) ] = -1,
863 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
864 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
867 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
868 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
870 [ C(OP_PREFETCH) ] = {
871 [ C(RESULT_ACCESS) ] = 0x0,
872 [ C(RESULT_MISS) ] = 0x0,
877 static __initconst const u64 hsw_hw_cache_extra_regs
878 [PERF_COUNT_HW_CACHE_MAX]
879 [PERF_COUNT_HW_CACHE_OP_MAX]
880 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
884 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
886 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
887 HSW_L3_MISS|HSW_ANY_SNOOP,
890 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
892 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
893 HSW_L3_MISS|HSW_ANY_SNOOP,
895 [ C(OP_PREFETCH) ] = {
896 [ C(RESULT_ACCESS) ] = 0x0,
897 [ C(RESULT_MISS) ] = 0x0,
902 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
903 HSW_L3_MISS_LOCAL_DRAM|
905 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
910 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
911 HSW_L3_MISS_LOCAL_DRAM|
913 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
917 [ C(OP_PREFETCH) ] = {
918 [ C(RESULT_ACCESS) ] = 0x0,
919 [ C(RESULT_MISS) ] = 0x0,
924 static __initconst const u64 westmere_hw_cache_event_ids
925 [PERF_COUNT_HW_CACHE_MAX]
926 [PERF_COUNT_HW_CACHE_OP_MAX]
927 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
931 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
932 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
935 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
936 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
938 [ C(OP_PREFETCH) ] = {
939 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
940 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
945 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
946 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
949 [ C(RESULT_ACCESS) ] = -1,
950 [ C(RESULT_MISS) ] = -1,
952 [ C(OP_PREFETCH) ] = {
953 [ C(RESULT_ACCESS) ] = 0x0,
954 [ C(RESULT_MISS) ] = 0x0,
959 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
960 [ C(RESULT_ACCESS) ] = 0x01b7,
961 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
962 [ C(RESULT_MISS) ] = 0x01b7,
965 * Use RFO, not WRITEBACK, because a write miss would typically occur
969 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
970 [ C(RESULT_ACCESS) ] = 0x01b7,
971 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
972 [ C(RESULT_MISS) ] = 0x01b7,
974 [ C(OP_PREFETCH) ] = {
975 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
976 [ C(RESULT_ACCESS) ] = 0x01b7,
977 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
978 [ C(RESULT_MISS) ] = 0x01b7,
983 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
984 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
987 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
988 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
990 [ C(OP_PREFETCH) ] = {
991 [ C(RESULT_ACCESS) ] = 0x0,
992 [ C(RESULT_MISS) ] = 0x0,
997 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
998 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
1001 [ C(RESULT_ACCESS) ] = -1,
1002 [ C(RESULT_MISS) ] = -1,
1004 [ C(OP_PREFETCH) ] = {
1005 [ C(RESULT_ACCESS) ] = -1,
1006 [ C(RESULT_MISS) ] = -1,
1011 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1012 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1015 [ C(RESULT_ACCESS) ] = -1,
1016 [ C(RESULT_MISS) ] = -1,
1018 [ C(OP_PREFETCH) ] = {
1019 [ C(RESULT_ACCESS) ] = -1,
1020 [ C(RESULT_MISS) ] = -1,
1025 [ C(RESULT_ACCESS) ] = 0x01b7,
1026 [ C(RESULT_MISS) ] = 0x01b7,
1029 [ C(RESULT_ACCESS) ] = 0x01b7,
1030 [ C(RESULT_MISS) ] = 0x01b7,
1032 [ C(OP_PREFETCH) ] = {
1033 [ C(RESULT_ACCESS) ] = 0x01b7,
1034 [ C(RESULT_MISS) ] = 0x01b7,
1040 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1041 * See IA32 SDM Vol 3B 30.6.1.3
1044 #define NHM_DMND_DATA_RD (1 << 0)
1045 #define NHM_DMND_RFO (1 << 1)
1046 #define NHM_DMND_IFETCH (1 << 2)
1047 #define NHM_DMND_WB (1 << 3)
1048 #define NHM_PF_DATA_RD (1 << 4)
1049 #define NHM_PF_DATA_RFO (1 << 5)
1050 #define NHM_PF_IFETCH (1 << 6)
1051 #define NHM_OFFCORE_OTHER (1 << 7)
1052 #define NHM_UNCORE_HIT (1 << 8)
1053 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
1054 #define NHM_OTHER_CORE_HITM (1 << 10)
1056 #define NHM_REMOTE_CACHE_FWD (1 << 12)
1057 #define NHM_REMOTE_DRAM (1 << 13)
1058 #define NHM_LOCAL_DRAM (1 << 14)
1059 #define NHM_NON_DRAM (1 << 15)
1061 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1062 #define NHM_REMOTE (NHM_REMOTE_DRAM)
1064 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
1065 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
1066 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1068 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1069 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1070 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
1072 static __initconst const u64 nehalem_hw_cache_extra_regs
1073 [PERF_COUNT_HW_CACHE_MAX]
1074 [PERF_COUNT_HW_CACHE_OP_MAX]
1075 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1079 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1080 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
1083 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1084 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
1086 [ C(OP_PREFETCH) ] = {
1087 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1088 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1093 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1094 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
1097 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1098 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
1100 [ C(OP_PREFETCH) ] = {
1101 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1102 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1107 static __initconst const u64 nehalem_hw_cache_event_ids
1108 [PERF_COUNT_HW_CACHE_MAX]
1109 [PERF_COUNT_HW_CACHE_OP_MAX]
1110 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1114 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1115 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1118 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1119 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1121 [ C(OP_PREFETCH) ] = {
1122 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1123 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1128 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1129 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1132 [ C(RESULT_ACCESS) ] = -1,
1133 [ C(RESULT_MISS) ] = -1,
1135 [ C(OP_PREFETCH) ] = {
1136 [ C(RESULT_ACCESS) ] = 0x0,
1137 [ C(RESULT_MISS) ] = 0x0,
1142 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1143 [ C(RESULT_ACCESS) ] = 0x01b7,
1144 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1145 [ C(RESULT_MISS) ] = 0x01b7,
1148 * Use RFO, not WRITEBACK, because a write miss would typically occur
1152 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1153 [ C(RESULT_ACCESS) ] = 0x01b7,
1154 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1155 [ C(RESULT_MISS) ] = 0x01b7,
1157 [ C(OP_PREFETCH) ] = {
1158 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1159 [ C(RESULT_ACCESS) ] = 0x01b7,
1160 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1161 [ C(RESULT_MISS) ] = 0x01b7,
1166 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1167 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1170 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1171 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1173 [ C(OP_PREFETCH) ] = {
1174 [ C(RESULT_ACCESS) ] = 0x0,
1175 [ C(RESULT_MISS) ] = 0x0,
1180 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1181 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
1184 [ C(RESULT_ACCESS) ] = -1,
1185 [ C(RESULT_MISS) ] = -1,
1187 [ C(OP_PREFETCH) ] = {
1188 [ C(RESULT_ACCESS) ] = -1,
1189 [ C(RESULT_MISS) ] = -1,
1194 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1195 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1198 [ C(RESULT_ACCESS) ] = -1,
1199 [ C(RESULT_MISS) ] = -1,
1201 [ C(OP_PREFETCH) ] = {
1202 [ C(RESULT_ACCESS) ] = -1,
1203 [ C(RESULT_MISS) ] = -1,
1208 [ C(RESULT_ACCESS) ] = 0x01b7,
1209 [ C(RESULT_MISS) ] = 0x01b7,
1212 [ C(RESULT_ACCESS) ] = 0x01b7,
1213 [ C(RESULT_MISS) ] = 0x01b7,
1215 [ C(OP_PREFETCH) ] = {
1216 [ C(RESULT_ACCESS) ] = 0x01b7,
1217 [ C(RESULT_MISS) ] = 0x01b7,
1222 static __initconst const u64 core2_hw_cache_event_ids
1223 [PERF_COUNT_HW_CACHE_MAX]
1224 [PERF_COUNT_HW_CACHE_OP_MAX]
1225 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1229 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
1230 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
1233 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
1234 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
1236 [ C(OP_PREFETCH) ] = {
1237 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
1238 [ C(RESULT_MISS) ] = 0,
1243 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
1244 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
1247 [ C(RESULT_ACCESS) ] = -1,
1248 [ C(RESULT_MISS) ] = -1,
1250 [ C(OP_PREFETCH) ] = {
1251 [ C(RESULT_ACCESS) ] = 0,
1252 [ C(RESULT_MISS) ] = 0,
1257 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1258 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1261 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1262 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1264 [ C(OP_PREFETCH) ] = {
1265 [ C(RESULT_ACCESS) ] = 0,
1266 [ C(RESULT_MISS) ] = 0,
1271 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1272 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
1275 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1276 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
1278 [ C(OP_PREFETCH) ] = {
1279 [ C(RESULT_ACCESS) ] = 0,
1280 [ C(RESULT_MISS) ] = 0,
1285 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1286 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
1289 [ C(RESULT_ACCESS) ] = -1,
1290 [ C(RESULT_MISS) ] = -1,
1292 [ C(OP_PREFETCH) ] = {
1293 [ C(RESULT_ACCESS) ] = -1,
1294 [ C(RESULT_MISS) ] = -1,
1299 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1300 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1303 [ C(RESULT_ACCESS) ] = -1,
1304 [ C(RESULT_MISS) ] = -1,
1306 [ C(OP_PREFETCH) ] = {
1307 [ C(RESULT_ACCESS) ] = -1,
1308 [ C(RESULT_MISS) ] = -1,
1313 static __initconst const u64 atom_hw_cache_event_ids
1314 [PERF_COUNT_HW_CACHE_MAX]
1315 [PERF_COUNT_HW_CACHE_OP_MAX]
1316 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1320 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
1321 [ C(RESULT_MISS) ] = 0,
1324 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
1325 [ C(RESULT_MISS) ] = 0,
1327 [ C(OP_PREFETCH) ] = {
1328 [ C(RESULT_ACCESS) ] = 0x0,
1329 [ C(RESULT_MISS) ] = 0,
1334 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1335 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1338 [ C(RESULT_ACCESS) ] = -1,
1339 [ C(RESULT_MISS) ] = -1,
1341 [ C(OP_PREFETCH) ] = {
1342 [ C(RESULT_ACCESS) ] = 0,
1343 [ C(RESULT_MISS) ] = 0,
1348 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1349 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1352 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1353 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1355 [ C(OP_PREFETCH) ] = {
1356 [ C(RESULT_ACCESS) ] = 0,
1357 [ C(RESULT_MISS) ] = 0,
1362 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1363 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1366 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1367 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1369 [ C(OP_PREFETCH) ] = {
1370 [ C(RESULT_ACCESS) ] = 0,
1371 [ C(RESULT_MISS) ] = 0,
1376 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1377 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1380 [ C(RESULT_ACCESS) ] = -1,
1381 [ C(RESULT_MISS) ] = -1,
1383 [ C(OP_PREFETCH) ] = {
1384 [ C(RESULT_ACCESS) ] = -1,
1385 [ C(RESULT_MISS) ] = -1,
1390 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1391 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1394 [ C(RESULT_ACCESS) ] = -1,
1395 [ C(RESULT_MISS) ] = -1,
1397 [ C(OP_PREFETCH) ] = {
1398 [ C(RESULT_ACCESS) ] = -1,
1399 [ C(RESULT_MISS) ] = -1,
1404 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1405 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1406 /* no_alloc_cycles.not_delivered */
1407 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1408 "event=0xca,umask=0x50");
1409 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1410 /* uops_retired.all */
1411 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1412 "event=0xc2,umask=0x10");
1413 /* uops_retired.all */
1414 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1415 "event=0xc2,umask=0x10");
1417 static struct attribute *slm_events_attrs[] = {
1418 EVENT_PTR(td_total_slots_slm),
1419 EVENT_PTR(td_total_slots_scale_slm),
1420 EVENT_PTR(td_fetch_bubbles_slm),
1421 EVENT_PTR(td_fetch_bubbles_scale_slm),
1422 EVENT_PTR(td_slots_issued_slm),
1423 EVENT_PTR(td_slots_retired_slm),
1427 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1429 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1430 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1431 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1435 #define SLM_DMND_READ SNB_DMND_DATA_RD
1436 #define SLM_DMND_WRITE SNB_DMND_RFO
1437 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1439 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1440 #define SLM_LLC_ACCESS SNB_RESP_ANY
1441 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1443 static __initconst const u64 slm_hw_cache_extra_regs
1444 [PERF_COUNT_HW_CACHE_MAX]
1445 [PERF_COUNT_HW_CACHE_OP_MAX]
1446 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1450 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1451 [ C(RESULT_MISS) ] = 0,
1454 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1455 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1457 [ C(OP_PREFETCH) ] = {
1458 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1459 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1464 static __initconst const u64 slm_hw_cache_event_ids
1465 [PERF_COUNT_HW_CACHE_MAX]
1466 [PERF_COUNT_HW_CACHE_OP_MAX]
1467 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1471 [ C(RESULT_ACCESS) ] = 0,
1472 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
1475 [ C(RESULT_ACCESS) ] = 0,
1476 [ C(RESULT_MISS) ] = 0,
1478 [ C(OP_PREFETCH) ] = {
1479 [ C(RESULT_ACCESS) ] = 0,
1480 [ C(RESULT_MISS) ] = 0,
1485 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1486 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
1489 [ C(RESULT_ACCESS) ] = -1,
1490 [ C(RESULT_MISS) ] = -1,
1492 [ C(OP_PREFETCH) ] = {
1493 [ C(RESULT_ACCESS) ] = 0,
1494 [ C(RESULT_MISS) ] = 0,
1499 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1500 [ C(RESULT_ACCESS) ] = 0x01b7,
1501 [ C(RESULT_MISS) ] = 0,
1504 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1505 [ C(RESULT_ACCESS) ] = 0x01b7,
1506 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1507 [ C(RESULT_MISS) ] = 0x01b7,
1509 [ C(OP_PREFETCH) ] = {
1510 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1511 [ C(RESULT_ACCESS) ] = 0x01b7,
1512 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1513 [ C(RESULT_MISS) ] = 0x01b7,
1518 [ C(RESULT_ACCESS) ] = 0,
1519 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
1522 [ C(RESULT_ACCESS) ] = 0,
1523 [ C(RESULT_MISS) ] = 0,
1525 [ C(OP_PREFETCH) ] = {
1526 [ C(RESULT_ACCESS) ] = 0,
1527 [ C(RESULT_MISS) ] = 0,
1532 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1533 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1536 [ C(RESULT_ACCESS) ] = -1,
1537 [ C(RESULT_MISS) ] = -1,
1539 [ C(OP_PREFETCH) ] = {
1540 [ C(RESULT_ACCESS) ] = -1,
1541 [ C(RESULT_MISS) ] = -1,
1546 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1547 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1550 [ C(RESULT_ACCESS) ] = -1,
1551 [ C(RESULT_MISS) ] = -1,
1553 [ C(OP_PREFETCH) ] = {
1554 [ C(RESULT_ACCESS) ] = -1,
1555 [ C(RESULT_MISS) ] = -1,
1560 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1561 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1562 /* UOPS_NOT_DELIVERED.ANY */
1563 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1564 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1565 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1566 /* UOPS_RETIRED.ANY */
1567 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1568 /* UOPS_ISSUED.ANY */
1569 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1571 static struct attribute *glm_events_attrs[] = {
1572 EVENT_PTR(td_total_slots_glm),
1573 EVENT_PTR(td_total_slots_scale_glm),
1574 EVENT_PTR(td_fetch_bubbles_glm),
1575 EVENT_PTR(td_recovery_bubbles_glm),
1576 EVENT_PTR(td_slots_issued_glm),
1577 EVENT_PTR(td_slots_retired_glm),
1581 static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1582 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1583 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1584 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1588 #define GLM_DEMAND_DATA_RD BIT_ULL(0)
1589 #define GLM_DEMAND_RFO BIT_ULL(1)
1590 #define GLM_ANY_RESPONSE BIT_ULL(16)
1591 #define GLM_SNP_NONE_OR_MISS BIT_ULL(33)
1592 #define GLM_DEMAND_READ GLM_DEMAND_DATA_RD
1593 #define GLM_DEMAND_WRITE GLM_DEMAND_RFO
1594 #define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1595 #define GLM_LLC_ACCESS GLM_ANY_RESPONSE
1596 #define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1597 #define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM)
1599 static __initconst const u64 glm_hw_cache_event_ids
1600 [PERF_COUNT_HW_CACHE_MAX]
1601 [PERF_COUNT_HW_CACHE_OP_MAX]
1602 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1605 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1606 [C(RESULT_MISS)] = 0x0,
1609 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1610 [C(RESULT_MISS)] = 0x0,
1612 [C(OP_PREFETCH)] = {
1613 [C(RESULT_ACCESS)] = 0x0,
1614 [C(RESULT_MISS)] = 0x0,
1619 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1620 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1623 [C(RESULT_ACCESS)] = -1,
1624 [C(RESULT_MISS)] = -1,
1626 [C(OP_PREFETCH)] = {
1627 [C(RESULT_ACCESS)] = 0x0,
1628 [C(RESULT_MISS)] = 0x0,
1633 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1634 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1637 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1638 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1640 [C(OP_PREFETCH)] = {
1641 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1642 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1647 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1648 [C(RESULT_MISS)] = 0x0,
1651 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1652 [C(RESULT_MISS)] = 0x0,
1654 [C(OP_PREFETCH)] = {
1655 [C(RESULT_ACCESS)] = 0x0,
1656 [C(RESULT_MISS)] = 0x0,
1661 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1662 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1665 [C(RESULT_ACCESS)] = -1,
1666 [C(RESULT_MISS)] = -1,
1668 [C(OP_PREFETCH)] = {
1669 [C(RESULT_ACCESS)] = -1,
1670 [C(RESULT_MISS)] = -1,
1675 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1676 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1679 [C(RESULT_ACCESS)] = -1,
1680 [C(RESULT_MISS)] = -1,
1682 [C(OP_PREFETCH)] = {
1683 [C(RESULT_ACCESS)] = -1,
1684 [C(RESULT_MISS)] = -1,
1689 static __initconst const u64 glm_hw_cache_extra_regs
1690 [PERF_COUNT_HW_CACHE_MAX]
1691 [PERF_COUNT_HW_CACHE_OP_MAX]
1692 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1695 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1697 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1701 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1703 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1706 [C(OP_PREFETCH)] = {
1707 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH|
1709 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH|
1715 static __initconst const u64 glp_hw_cache_event_ids
1716 [PERF_COUNT_HW_CACHE_MAX]
1717 [PERF_COUNT_HW_CACHE_OP_MAX]
1718 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1721 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1722 [C(RESULT_MISS)] = 0x0,
1725 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1726 [C(RESULT_MISS)] = 0x0,
1728 [C(OP_PREFETCH)] = {
1729 [C(RESULT_ACCESS)] = 0x0,
1730 [C(RESULT_MISS)] = 0x0,
1735 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1736 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1739 [C(RESULT_ACCESS)] = -1,
1740 [C(RESULT_MISS)] = -1,
1742 [C(OP_PREFETCH)] = {
1743 [C(RESULT_ACCESS)] = 0x0,
1744 [C(RESULT_MISS)] = 0x0,
1749 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1750 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1753 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1754 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1756 [C(OP_PREFETCH)] = {
1757 [C(RESULT_ACCESS)] = 0x0,
1758 [C(RESULT_MISS)] = 0x0,
1763 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1764 [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
1767 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1768 [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
1770 [C(OP_PREFETCH)] = {
1771 [C(RESULT_ACCESS)] = 0x0,
1772 [C(RESULT_MISS)] = 0x0,
1777 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1778 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1781 [C(RESULT_ACCESS)] = -1,
1782 [C(RESULT_MISS)] = -1,
1784 [C(OP_PREFETCH)] = {
1785 [C(RESULT_ACCESS)] = -1,
1786 [C(RESULT_MISS)] = -1,
1791 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1792 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1795 [C(RESULT_ACCESS)] = -1,
1796 [C(RESULT_MISS)] = -1,
1798 [C(OP_PREFETCH)] = {
1799 [C(RESULT_ACCESS)] = -1,
1800 [C(RESULT_MISS)] = -1,
1805 static __initconst const u64 glp_hw_cache_extra_regs
1806 [PERF_COUNT_HW_CACHE_MAX]
1807 [PERF_COUNT_HW_CACHE_OP_MAX]
1808 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1811 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1813 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1817 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1819 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1822 [C(OP_PREFETCH)] = {
1823 [C(RESULT_ACCESS)] = 0x0,
1824 [C(RESULT_MISS)] = 0x0,
1829 #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
1830 #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
1831 #define KNL_MCDRAM_LOCAL BIT_ULL(21)
1832 #define KNL_MCDRAM_FAR BIT_ULL(22)
1833 #define KNL_DDR_LOCAL BIT_ULL(23)
1834 #define KNL_DDR_FAR BIT_ULL(24)
1835 #define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
1836 KNL_DDR_LOCAL | KNL_DDR_FAR)
1837 #define KNL_L2_READ SLM_DMND_READ
1838 #define KNL_L2_WRITE SLM_DMND_WRITE
1839 #define KNL_L2_PREFETCH SLM_DMND_PREFETCH
1840 #define KNL_L2_ACCESS SLM_LLC_ACCESS
1841 #define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
1842 KNL_DRAM_ANY | SNB_SNP_ANY | \
1845 static __initconst const u64 knl_hw_cache_extra_regs
1846 [PERF_COUNT_HW_CACHE_MAX]
1847 [PERF_COUNT_HW_CACHE_OP_MAX]
1848 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1851 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
1852 [C(RESULT_MISS)] = 0,
1855 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
1856 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS,
1858 [C(OP_PREFETCH)] = {
1859 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
1860 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS,
1866 * Used from PMIs where the LBRs are already disabled.
1868 * This function could be called consecutively. It is required to remain in
1869 * disabled state if called consecutively.
1871 * During consecutive calls, the same disable value will be written to related
1872 * registers, so the PMU state remains unchanged.
1874 * intel_bts events don't coexist with intel PMU's BTS events because of
1875 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
1876 * disabled around intel PMU's event batching etc, only inside the PMI handler.
1878 static void __intel_pmu_disable_all(void)
1880 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1882 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1884 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1885 intel_pmu_disable_bts();
1887 intel_pmu_pebs_disable_all();
1890 static void intel_pmu_disable_all(void)
1892 __intel_pmu_disable_all();
1893 intel_pmu_lbr_disable_all();
1896 static void __intel_pmu_enable_all(int added, bool pmi)
1898 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1900 intel_pmu_pebs_enable_all();
1901 intel_pmu_lbr_enable_all(pmi);
1902 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
1903 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
1905 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1906 struct perf_event *event =
1907 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
1909 if (WARN_ON_ONCE(!event))
1912 intel_pmu_enable_bts(event->hw.config);
1916 static void intel_pmu_enable_all(int added)
1918 __intel_pmu_enable_all(added, false);
1923 * Intel Errata AAK100 (model 26)
1924 * Intel Errata AAP53 (model 30)
1925 * Intel Errata BD53 (model 44)
1927 * The official story:
1928 * These chips need to be 'reset' when adding counters by programming the
1929 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
1930 * in sequence on the same PMC or on different PMCs.
1932 * In practise it appears some of these events do in fact count, and
1933 * we need to program all 4 events.
1935 static void intel_pmu_nhm_workaround(void)
1937 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1938 static const unsigned long nhm_magic[4] = {
1944 struct perf_event *event;
1948 * The Errata requires below steps:
1949 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
1950 * 2) Configure 4 PERFEVTSELx with the magic events and clear
1951 * the corresponding PMCx;
1952 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
1953 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
1954 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
1958 * The real steps we choose are a little different from above.
1959 * A) To reduce MSR operations, we don't run step 1) as they
1960 * are already cleared before this function is called;
1961 * B) Call x86_perf_event_update to save PMCx before configuring
1962 * PERFEVTSELx with magic number;
1963 * C) With step 5), we do clear only when the PERFEVTSELx is
1964 * not used currently.
1965 * D) Call x86_perf_event_set_period to restore PMCx;
1968 /* We always operate 4 pairs of PERF Counters */
1969 for (i = 0; i < 4; i++) {
1970 event = cpuc->events[i];
1972 x86_perf_event_update(event);
1975 for (i = 0; i < 4; i++) {
1976 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
1977 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
1980 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
1981 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
1983 for (i = 0; i < 4; i++) {
1984 event = cpuc->events[i];
1987 x86_perf_event_set_period(event);
1988 __x86_pmu_enable_event(&event->hw,
1989 ARCH_PERFMON_EVENTSEL_ENABLE);
1991 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
1995 static void intel_pmu_nhm_enable_all(int added)
1998 intel_pmu_nhm_workaround();
1999 intel_pmu_enable_all(added);
2002 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2004 u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2006 if (cpuc->tfa_shadow != val) {
2007 cpuc->tfa_shadow = val;
2008 wrmsrl(MSR_TSX_FORCE_ABORT, val);
2012 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2015 * We're going to use PMC3, make sure TFA is set before we touch it.
2017 if (cntr == 3 && !cpuc->is_fake)
2018 intel_set_tfa(cpuc, true);
2021 static void intel_tfa_pmu_enable_all(int added)
2023 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2026 * If we find PMC3 is no longer used when we enable the PMU, we can
2029 if (!test_bit(3, cpuc->active_mask))
2030 intel_set_tfa(cpuc, false);
2032 intel_pmu_enable_all(added);
2035 static void enable_counter_freeze(void)
2037 update_debugctlmsr(get_debugctlmsr() |
2038 DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
2041 static void disable_counter_freeze(void)
2043 update_debugctlmsr(get_debugctlmsr() &
2044 ~DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
2047 static inline u64 intel_pmu_get_status(void)
2051 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
2056 static inline void intel_pmu_ack_status(u64 ack)
2058 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2061 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
2063 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
2066 mask = 0xfULL << (idx * 4);
2068 rdmsrl(hwc->config_base, ctrl_val);
2070 wrmsrl(hwc->config_base, ctrl_val);
2073 static inline bool event_is_checkpointed(struct perf_event *event)
2075 return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2078 static void intel_pmu_disable_event(struct perf_event *event)
2080 struct hw_perf_event *hwc = &event->hw;
2081 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2083 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
2084 intel_pmu_disable_bts();
2085 intel_pmu_drain_bts_buffer();
2089 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
2090 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
2091 cpuc->intel_cp_status &= ~(1ull << hwc->idx);
2093 if (unlikely(event->attr.precise_ip))
2094 intel_pmu_pebs_disable(event);
2096 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
2097 intel_pmu_disable_fixed(hwc);
2101 x86_pmu_disable_event(event);
2104 static void intel_pmu_del_event(struct perf_event *event)
2106 if (needs_branch_stack(event))
2107 intel_pmu_lbr_del(event);
2108 if (event->attr.precise_ip)
2109 intel_pmu_pebs_del(event);
2112 static void intel_pmu_read_event(struct perf_event *event)
2114 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2115 intel_pmu_auto_reload_read(event);
2117 x86_perf_event_update(event);
2120 static void intel_pmu_enable_fixed(struct perf_event *event)
2122 struct hw_perf_event *hwc = &event->hw;
2123 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
2124 u64 ctrl_val, mask, bits = 0;
2127 * Enable IRQ generation (0x8), if not PEBS,
2128 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2131 if (!event->attr.precise_ip)
2133 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2135 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2139 * ANY bit is supported in v3 and up
2141 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2145 mask = 0xfULL << (idx * 4);
2147 rdmsrl(hwc->config_base, ctrl_val);
2150 wrmsrl(hwc->config_base, ctrl_val);
2153 static void intel_pmu_enable_event(struct perf_event *event)
2155 struct hw_perf_event *hwc = &event->hw;
2156 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2158 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
2159 if (!__this_cpu_read(cpu_hw_events.enabled))
2162 intel_pmu_enable_bts(hwc->config);
2166 if (event->attr.exclude_host)
2167 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
2168 if (event->attr.exclude_guest)
2169 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
2171 if (unlikely(event_is_checkpointed(event)))
2172 cpuc->intel_cp_status |= (1ull << hwc->idx);
2174 if (unlikely(event->attr.precise_ip))
2175 intel_pmu_pebs_enable(event);
2177 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
2178 intel_pmu_enable_fixed(event);
2182 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2185 static void intel_pmu_add_event(struct perf_event *event)
2187 if (event->attr.precise_ip)
2188 intel_pmu_pebs_add(event);
2189 if (needs_branch_stack(event))
2190 intel_pmu_lbr_add(event);
2194 * Save and restart an expired event. Called by NMI contexts,
2195 * so it has to be careful about preempting normal event ops:
2197 int intel_pmu_save_and_restart(struct perf_event *event)
2199 x86_perf_event_update(event);
2201 * For a checkpointed counter always reset back to 0. This
2202 * avoids a situation where the counter overflows, aborts the
2203 * transaction and is then set back to shortly before the
2204 * overflow, and overflows and aborts again.
2206 if (unlikely(event_is_checkpointed(event))) {
2207 /* No race with NMIs because the counter should not be armed */
2208 wrmsrl(event->hw.event_base, 0);
2209 local64_set(&event->hw.prev_count, 0);
2211 return x86_perf_event_set_period(event);
2214 static void intel_pmu_reset(void)
2216 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2217 unsigned long flags;
2220 if (!x86_pmu.num_counters)
2223 local_irq_save(flags);
2225 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2227 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
2228 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
2229 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
2231 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
2232 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2235 ds->bts_index = ds->bts_buffer_base;
2237 /* Ack all overflows and disable fixed counters */
2238 if (x86_pmu.version >= 2) {
2239 intel_pmu_ack_status(intel_pmu_get_status());
2240 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2243 /* Reset LBRs and LBR freezing */
2244 if (x86_pmu.lbr_nr) {
2245 update_debugctlmsr(get_debugctlmsr() &
2246 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
2249 local_irq_restore(flags);
2252 static int handle_pmi_common(struct pt_regs *regs, u64 status)
2254 struct perf_sample_data data;
2255 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2259 inc_irq_stat(apic_perf_irqs);
2262 * Ignore a range of extra bits in status that do not indicate
2263 * overflow by themselves.
2265 status &= ~(GLOBAL_STATUS_COND_CHG |
2266 GLOBAL_STATUS_ASIF |
2267 GLOBAL_STATUS_LBRS_FROZEN);
2271 * In case multiple PEBS events are sampled at the same time,
2272 * it is possible to have GLOBAL_STATUS bit 62 set indicating
2273 * PEBS buffer overflow and also seeing at most 3 PEBS counters
2274 * having their bits set in the status register. This is a sign
2275 * that there was at least one PEBS record pending at the time
2276 * of the PMU interrupt. PEBS counters must only be processed
2277 * via the drain_pebs() calls and not via the regular sample
2278 * processing loop coming after that the function, otherwise
2279 * phony regular samples may be generated in the sampling buffer
2280 * not marked with the EXACT tag. Another possibility is to have
2281 * one PEBS event and at least one non-PEBS event whic hoverflows
2282 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
2283 * not be set, yet the overflow status bit for the PEBS counter will
2286 * To avoid this problem, we systematically ignore the PEBS-enabled
2287 * counters from the GLOBAL_STATUS mask and we always process PEBS
2288 * events via drain_pebs().
2290 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
2291 status &= ~cpuc->pebs_enabled;
2293 status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
2296 * PEBS overflow sets bit 62 in the global status register
2298 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
2300 x86_pmu.drain_pebs(regs);
2301 status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
2307 if (__test_and_clear_bit(55, (unsigned long *)&status)) {
2309 intel_pt_interrupt();
2313 * Checkpointed counters can lead to 'spurious' PMIs because the
2314 * rollback caused by the PMI will have cleared the overflow status
2315 * bit. Therefore always force probe these counters.
2317 status |= cpuc->intel_cp_status;
2319 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2320 struct perf_event *event = cpuc->events[bit];
2324 if (!test_bit(bit, cpuc->active_mask))
2327 if (!intel_pmu_save_and_restart(event))
2330 perf_sample_data_init(&data, 0, event->hw.last_period);
2332 if (has_branch_stack(event))
2333 data.br_stack = &cpuc->lbr_stack;
2335 if (perf_event_overflow(event, &data, regs))
2336 x86_pmu_stop(event, 0);
2342 static bool disable_counter_freezing = true;
2343 static int __init intel_perf_counter_freezing_setup(char *s)
2347 if (kstrtobool(s, &res))
2350 disable_counter_freezing = !res;
2353 __setup("perf_v4_pmi=", intel_perf_counter_freezing_setup);
2356 * Simplified handler for Arch Perfmon v4:
2357 * - We rely on counter freezing/unfreezing to enable/disable the PMU.
2358 * This is done automatically on PMU ack.
2359 * - Ack the PMU only after the APIC.
2362 static int intel_pmu_handle_irq_v4(struct pt_regs *regs)
2364 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2368 int pmu_enabled = cpuc->enabled;
2371 /* PMU has been disabled because of counter freezing */
2373 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2375 intel_bts_disable_local();
2376 handled = intel_pmu_drain_bts_buffer();
2377 handled += intel_bts_interrupt();
2379 status = intel_pmu_get_status();
2383 intel_pmu_lbr_read();
2384 if (++loops > 100) {
2388 WARN(1, "perfevents: irq loop stuck!\n");
2389 perf_event_print_debug();
2397 handled += handle_pmi_common(regs, status);
2399 /* Ack the PMI in the APIC */
2400 apic_write(APIC_LVTPC, APIC_DM_NMI);
2403 * The counters start counting immediately while ack the status.
2404 * Make it as close as possible to IRET. This avoids bogus
2405 * freezing on Skylake CPUs.
2408 intel_pmu_ack_status(status);
2411 * CPU may issues two PMIs very close to each other.
2412 * When the PMI handler services the first one, the
2413 * GLOBAL_STATUS is already updated to reflect both.
2414 * When it IRETs, the second PMI is immediately
2415 * handled and it sees clear status. At the meantime,
2416 * there may be a third PMI, because the freezing bit
2417 * isn't set since the ack in first PMI handlers.
2418 * Double check if there is more work to be done.
2420 status = intel_pmu_get_status();
2426 intel_bts_enable_local();
2427 cpuc->enabled = pmu_enabled;
2432 * This handler is triggered by the local APIC, so the APIC IRQ handling
2435 static int intel_pmu_handle_irq(struct pt_regs *regs)
2437 struct cpu_hw_events *cpuc;
2443 cpuc = this_cpu_ptr(&cpu_hw_events);
2446 * Save the PMU state.
2447 * It needs to be restored when leaving the handler.
2449 pmu_enabled = cpuc->enabled;
2451 * No known reason to not always do late ACK,
2452 * but just in case do it opt-in.
2454 if (!x86_pmu.late_ack)
2455 apic_write(APIC_LVTPC, APIC_DM_NMI);
2456 intel_bts_disable_local();
2458 __intel_pmu_disable_all();
2459 handled = intel_pmu_drain_bts_buffer();
2460 handled += intel_bts_interrupt();
2461 status = intel_pmu_get_status();
2467 intel_pmu_lbr_read();
2468 intel_pmu_ack_status(status);
2469 if (++loops > 100) {
2473 WARN(1, "perfevents: irq loop stuck!\n");
2474 perf_event_print_debug();
2481 handled += handle_pmi_common(regs, status);
2484 * Repeat if there is more work to be done:
2486 status = intel_pmu_get_status();
2491 /* Only restore PMU state when it's active. See x86_pmu_disable(). */
2492 cpuc->enabled = pmu_enabled;
2494 __intel_pmu_enable_all(0, true);
2495 intel_bts_enable_local();
2498 * Only unmask the NMI after the overflow counters
2499 * have been reset. This avoids spurious NMIs on
2502 if (x86_pmu.late_ack)
2503 apic_write(APIC_LVTPC, APIC_DM_NMI);
2507 static struct event_constraint *
2508 intel_bts_constraints(struct perf_event *event)
2510 if (unlikely(intel_pmu_has_bts(event)))
2511 return &bts_constraint;
2516 static int intel_alt_er(int idx, u64 config)
2520 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
2523 if (idx == EXTRA_REG_RSP_0)
2524 alt_idx = EXTRA_REG_RSP_1;
2526 if (idx == EXTRA_REG_RSP_1)
2527 alt_idx = EXTRA_REG_RSP_0;
2529 if (config & ~x86_pmu.extra_regs[alt_idx].valid_mask)
2535 static void intel_fixup_er(struct perf_event *event, int idx)
2537 event->hw.extra_reg.idx = idx;
2539 if (idx == EXTRA_REG_RSP_0) {
2540 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
2541 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
2542 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
2543 } else if (idx == EXTRA_REG_RSP_1) {
2544 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
2545 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
2546 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
2551 * manage allocation of shared extra msr for certain events
2554 * per-cpu: to be shared between the various events on a single PMU
2555 * per-core: per-cpu + shared by HT threads
2557 static struct event_constraint *
2558 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
2559 struct perf_event *event,
2560 struct hw_perf_event_extra *reg)
2562 struct event_constraint *c = &emptyconstraint;
2563 struct er_account *era;
2564 unsigned long flags;
2568 * reg->alloc can be set due to existing state, so for fake cpuc we
2569 * need to ignore this, otherwise we might fail to allocate proper fake
2570 * state for this extra reg constraint. Also see the comment below.
2572 if (reg->alloc && !cpuc->is_fake)
2573 return NULL; /* call x86_get_event_constraint() */
2576 era = &cpuc->shared_regs->regs[idx];
2578 * we use spin_lock_irqsave() to avoid lockdep issues when
2579 * passing a fake cpuc
2581 raw_spin_lock_irqsave(&era->lock, flags);
2583 if (!atomic_read(&era->ref) || era->config == reg->config) {
2586 * If its a fake cpuc -- as per validate_{group,event}() we
2587 * shouldn't touch event state and we can avoid doing so
2588 * since both will only call get_event_constraints() once
2589 * on each event, this avoids the need for reg->alloc.
2591 * Not doing the ER fixup will only result in era->reg being
2592 * wrong, but since we won't actually try and program hardware
2593 * this isn't a problem either.
2595 if (!cpuc->is_fake) {
2596 if (idx != reg->idx)
2597 intel_fixup_er(event, idx);
2600 * x86_schedule_events() can call get_event_constraints()
2601 * multiple times on events in the case of incremental
2602 * scheduling(). reg->alloc ensures we only do the ER
2608 /* lock in msr value */
2609 era->config = reg->config;
2610 era->reg = reg->reg;
2613 atomic_inc(&era->ref);
2616 * need to call x86_get_event_constraint()
2617 * to check if associated event has constraints
2621 idx = intel_alt_er(idx, reg->config);
2622 if (idx != reg->idx) {
2623 raw_spin_unlock_irqrestore(&era->lock, flags);
2627 raw_spin_unlock_irqrestore(&era->lock, flags);
2633 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
2634 struct hw_perf_event_extra *reg)
2636 struct er_account *era;
2639 * Only put constraint if extra reg was actually allocated. Also takes
2640 * care of event which do not use an extra shared reg.
2642 * Also, if this is a fake cpuc we shouldn't touch any event state
2643 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
2644 * either since it'll be thrown out.
2646 if (!reg->alloc || cpuc->is_fake)
2649 era = &cpuc->shared_regs->regs[reg->idx];
2651 /* one fewer user */
2652 atomic_dec(&era->ref);
2654 /* allocate again next time */
2658 static struct event_constraint *
2659 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
2660 struct perf_event *event)
2662 struct event_constraint *c = NULL, *d;
2663 struct hw_perf_event_extra *xreg, *breg;
2665 xreg = &event->hw.extra_reg;
2666 if (xreg->idx != EXTRA_REG_NONE) {
2667 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
2668 if (c == &emptyconstraint)
2671 breg = &event->hw.branch_reg;
2672 if (breg->idx != EXTRA_REG_NONE) {
2673 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
2674 if (d == &emptyconstraint) {
2675 __intel_shared_reg_put_constraints(cpuc, xreg);
2682 struct event_constraint *
2683 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2684 struct perf_event *event)
2686 struct event_constraint *c;
2688 if (x86_pmu.event_constraints) {
2689 for_each_event_constraint(c, x86_pmu.event_constraints) {
2690 if ((event->hw.config & c->cmask) == c->code) {
2691 event->hw.flags |= c->flags;
2697 return &unconstrained;
2700 static struct event_constraint *
2701 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2702 struct perf_event *event)
2704 struct event_constraint *c;
2706 c = intel_bts_constraints(event);
2710 c = intel_shared_regs_constraints(cpuc, event);
2714 c = intel_pebs_constraints(event);
2718 return x86_get_event_constraints(cpuc, idx, event);
2722 intel_start_scheduling(struct cpu_hw_events *cpuc)
2724 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2725 struct intel_excl_states *xl;
2726 int tid = cpuc->excl_thread_id;
2729 * nothing needed if in group validation mode
2731 if (cpuc->is_fake || !is_ht_workaround_enabled())
2735 * no exclusion needed
2737 if (WARN_ON_ONCE(!excl_cntrs))
2740 xl = &excl_cntrs->states[tid];
2742 xl->sched_started = true;
2744 * lock shared state until we are done scheduling
2745 * in stop_event_scheduling()
2746 * makes scheduling appear as a transaction
2748 raw_spin_lock(&excl_cntrs->lock);
2751 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2753 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2754 struct event_constraint *c = cpuc->event_constraint[idx];
2755 struct intel_excl_states *xl;
2756 int tid = cpuc->excl_thread_id;
2758 if (cpuc->is_fake || !is_ht_workaround_enabled())
2761 if (WARN_ON_ONCE(!excl_cntrs))
2764 if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
2767 xl = &excl_cntrs->states[tid];
2769 lockdep_assert_held(&excl_cntrs->lock);
2771 if (c->flags & PERF_X86_EVENT_EXCL)
2772 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
2774 xl->state[cntr] = INTEL_EXCL_SHARED;
2778 intel_stop_scheduling(struct cpu_hw_events *cpuc)
2780 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2781 struct intel_excl_states *xl;
2782 int tid = cpuc->excl_thread_id;
2785 * nothing needed if in group validation mode
2787 if (cpuc->is_fake || !is_ht_workaround_enabled())
2790 * no exclusion needed
2792 if (WARN_ON_ONCE(!excl_cntrs))
2795 xl = &excl_cntrs->states[tid];
2797 xl->sched_started = false;
2799 * release shared state lock (acquired in intel_start_scheduling())
2801 raw_spin_unlock(&excl_cntrs->lock);
2804 static struct event_constraint *
2805 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
2807 WARN_ON_ONCE(!cpuc->constraint_list);
2809 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
2810 struct event_constraint *cx;
2813 * grab pre-allocated constraint entry
2815 cx = &cpuc->constraint_list[idx];
2818 * initialize dynamic constraint
2819 * with static constraint
2824 * mark constraint as dynamic
2826 cx->flags |= PERF_X86_EVENT_DYNAMIC;
2833 static struct event_constraint *
2834 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
2835 int idx, struct event_constraint *c)
2837 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2838 struct intel_excl_states *xlo;
2839 int tid = cpuc->excl_thread_id;
2843 * validating a group does not require
2844 * enforcing cross-thread exclusion
2846 if (cpuc->is_fake || !is_ht_workaround_enabled())
2850 * no exclusion needed
2852 if (WARN_ON_ONCE(!excl_cntrs))
2856 * because we modify the constraint, we need
2857 * to make a copy. Static constraints come
2858 * from static const tables.
2860 * only needed when constraint has not yet
2861 * been cloned (marked dynamic)
2863 c = dyn_constraint(cpuc, c, idx);
2866 * From here on, the constraint is dynamic.
2867 * Either it was just allocated above, or it
2868 * was allocated during a earlier invocation
2873 * state of sibling HT
2875 xlo = &excl_cntrs->states[tid ^ 1];
2878 * event requires exclusive counter access
2881 is_excl = c->flags & PERF_X86_EVENT_EXCL;
2882 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
2883 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
2884 if (!cpuc->n_excl++)
2885 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
2889 * Modify static constraint with current dynamic
2892 * EXCLUSIVE: sibling counter measuring exclusive event
2893 * SHARED : sibling counter measuring non-exclusive event
2894 * UNUSED : sibling counter unused
2896 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
2898 * exclusive event in sibling counter
2899 * our corresponding counter cannot be used
2900 * regardless of our event
2902 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE)
2903 __clear_bit(i, c->idxmsk);
2905 * if measuring an exclusive event, sibling
2906 * measuring non-exclusive, then counter cannot
2909 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED)
2910 __clear_bit(i, c->idxmsk);
2914 * recompute actual bit weight for scheduling algorithm
2916 c->weight = hweight64(c->idxmsk64);
2919 * if we return an empty mask, then switch
2920 * back to static empty constraint to avoid
2921 * the cost of freeing later on
2924 c = &emptyconstraint;
2929 static struct event_constraint *
2930 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2931 struct perf_event *event)
2933 struct event_constraint *c1 = NULL;
2934 struct event_constraint *c2;
2936 if (idx >= 0) /* fake does < 0 */
2937 c1 = cpuc->event_constraint[idx];
2941 * - static constraint: no change across incremental scheduling calls
2942 * - dynamic constraint: handled by intel_get_excl_constraints()
2944 c2 = __intel_get_event_constraints(cpuc, idx, event);
2945 if (c1 && (c1->flags & PERF_X86_EVENT_DYNAMIC)) {
2946 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
2947 c1->weight = c2->weight;
2951 if (cpuc->excl_cntrs)
2952 return intel_get_excl_constraints(cpuc, event, idx, c2);
2957 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
2958 struct perf_event *event)
2960 struct hw_perf_event *hwc = &event->hw;
2961 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2962 int tid = cpuc->excl_thread_id;
2963 struct intel_excl_states *xl;
2966 * nothing needed if in group validation mode
2971 if (WARN_ON_ONCE(!excl_cntrs))
2974 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
2975 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
2976 if (!--cpuc->n_excl)
2977 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
2981 * If event was actually assigned, then mark the counter state as
2984 if (hwc->idx >= 0) {
2985 xl = &excl_cntrs->states[tid];
2988 * put_constraint may be called from x86_schedule_events()
2989 * which already has the lock held so here make locking
2992 if (!xl->sched_started)
2993 raw_spin_lock(&excl_cntrs->lock);
2995 xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
2997 if (!xl->sched_started)
2998 raw_spin_unlock(&excl_cntrs->lock);
3003 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
3004 struct perf_event *event)
3006 struct hw_perf_event_extra *reg;
3008 reg = &event->hw.extra_reg;
3009 if (reg->idx != EXTRA_REG_NONE)
3010 __intel_shared_reg_put_constraints(cpuc, reg);
3012 reg = &event->hw.branch_reg;
3013 if (reg->idx != EXTRA_REG_NONE)
3014 __intel_shared_reg_put_constraints(cpuc, reg);
3017 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
3018 struct perf_event *event)
3020 intel_put_shared_regs_event_constraints(cpuc, event);
3023 * is PMU has exclusive counter restrictions, then
3024 * all events are subject to and must call the
3025 * put_excl_constraints() routine
3027 if (cpuc->excl_cntrs)
3028 intel_put_excl_constraints(cpuc, event);
3031 static void intel_pebs_aliases_core2(struct perf_event *event)
3033 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3035 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3036 * (0x003c) so that we can use it with PEBS.
3038 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3039 * PEBS capable. However we can use INST_RETIRED.ANY_P
3040 * (0x00c0), which is a PEBS capable event, to get the same
3043 * INST_RETIRED.ANY_P counts the number of cycles that retires
3044 * CNTMASK instructions. By setting CNTMASK to a value (16)
3045 * larger than the maximum number of instructions that can be
3046 * retired per cycle (4) and then inverting the condition, we
3047 * count all cycles that retire 16 or less instructions, which
3050 * Thereby we gain a PEBS capable cycle counter.
3052 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
3054 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3055 event->hw.config = alt_config;
3059 static void intel_pebs_aliases_snb(struct perf_event *event)
3061 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3063 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3064 * (0x003c) so that we can use it with PEBS.
3066 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3067 * PEBS capable. However we can use UOPS_RETIRED.ALL
3068 * (0x01c2), which is a PEBS capable event, to get the same
3071 * UOPS_RETIRED.ALL counts the number of cycles that retires
3072 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
3073 * larger than the maximum number of micro-ops that can be
3074 * retired per cycle (4) and then inverting the condition, we
3075 * count all cycles that retire 16 or less micro-ops, which
3078 * Thereby we gain a PEBS capable cycle counter.
3080 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
3082 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3083 event->hw.config = alt_config;
3087 static void intel_pebs_aliases_precdist(struct perf_event *event)
3089 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3091 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3092 * (0x003c) so that we can use it with PEBS.
3094 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3095 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
3096 * (0x01c0), which is a PEBS capable event, to get the same
3099 * The PREC_DIST event has special support to minimize sample
3100 * shadowing effects. One drawback is that it can be
3101 * only programmed on counter 1, but that seems like an
3102 * acceptable trade off.
3104 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
3106 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3107 event->hw.config = alt_config;
3111 static void intel_pebs_aliases_ivb(struct perf_event *event)
3113 if (event->attr.precise_ip < 3)
3114 return intel_pebs_aliases_snb(event);
3115 return intel_pebs_aliases_precdist(event);
3118 static void intel_pebs_aliases_skl(struct perf_event *event)
3120 if (event->attr.precise_ip < 3)
3121 return intel_pebs_aliases_core2(event);
3122 return intel_pebs_aliases_precdist(event);
3125 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3127 unsigned long flags = x86_pmu.large_pebs_flags;
3129 if (event->attr.use_clockid)
3130 flags &= ~PERF_SAMPLE_TIME;
3131 if (!event->attr.exclude_kernel)
3132 flags &= ~PERF_SAMPLE_REGS_USER;
3133 if (event->attr.sample_regs_user & ~PEBS_REGS)
3134 flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
3138 static int intel_pmu_bts_config(struct perf_event *event)
3140 struct perf_event_attr *attr = &event->attr;
3142 if (unlikely(intel_pmu_has_bts(event))) {
3143 /* BTS is not supported by this architecture. */
3144 if (!x86_pmu.bts_active)
3147 /* BTS is currently only allowed for user-mode. */
3148 if (!attr->exclude_kernel)
3151 /* BTS is not allowed for precise events. */
3152 if (attr->precise_ip)
3155 /* disallow bts if conflicting events are present */
3156 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3159 event->destroy = hw_perf_lbr_event_destroy;
3165 static int core_pmu_hw_config(struct perf_event *event)
3167 int ret = x86_pmu_hw_config(event);
3172 return intel_pmu_bts_config(event);
3175 static int intel_pmu_hw_config(struct perf_event *event)
3177 int ret = x86_pmu_hw_config(event);
3182 ret = intel_pmu_bts_config(event);
3186 if (event->attr.precise_ip) {
3187 if (!event->attr.freq) {
3188 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
3189 if (!(event->attr.sample_type &
3190 ~intel_pmu_large_pebs_flags(event)))
3191 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
3193 if (x86_pmu.pebs_aliases)
3194 x86_pmu.pebs_aliases(event);
3196 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3197 event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
3200 if (needs_branch_stack(event)) {
3201 ret = intel_pmu_setup_lbr_filter(event);
3206 * BTS is set up earlier in this path, so don't account twice
3208 if (!unlikely(intel_pmu_has_bts(event))) {
3209 /* disallow lbr if conflicting events are present */
3210 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3213 event->destroy = hw_perf_lbr_event_destroy;
3217 if (event->attr.type != PERF_TYPE_RAW)
3220 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
3223 if (x86_pmu.version < 3)
3226 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
3229 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
3234 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
3236 if (x86_pmu.guest_get_msrs)
3237 return x86_pmu.guest_get_msrs(nr);
3241 EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
3243 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
3245 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3246 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3248 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
3249 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
3250 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
3252 * If PMU counter has PEBS enabled it is not enough to disable counter
3253 * on a guest entry since PEBS memory write can overshoot guest entry
3254 * and corrupt guest memory. Disabling PEBS solves the problem.
3256 arr[1].msr = MSR_IA32_PEBS_ENABLE;
3257 arr[1].host = cpuc->pebs_enabled;
3264 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
3266 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3267 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3270 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3271 struct perf_event *event = cpuc->events[idx];
3273 arr[idx].msr = x86_pmu_config_addr(idx);
3274 arr[idx].host = arr[idx].guest = 0;
3276 if (!test_bit(idx, cpuc->active_mask))
3279 arr[idx].host = arr[idx].guest =
3280 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
3282 if (event->attr.exclude_host)
3283 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3284 else if (event->attr.exclude_guest)
3285 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3288 *nr = x86_pmu.num_counters;
3292 static void core_pmu_enable_event(struct perf_event *event)
3294 if (!event->attr.exclude_host)
3295 x86_pmu_enable_event(event);
3298 static void core_pmu_enable_all(int added)
3300 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3303 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3304 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
3306 if (!test_bit(idx, cpuc->active_mask) ||
3307 cpuc->events[idx]->attr.exclude_host)
3310 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
3314 static int hsw_hw_config(struct perf_event *event)
3316 int ret = intel_pmu_hw_config(event);
3320 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
3322 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
3325 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
3326 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
3329 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
3330 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
3331 event->attr.precise_ip > 0))
3334 if (event_is_checkpointed(event)) {
3336 * Sampling of checkpointed events can cause situations where
3337 * the CPU constantly aborts because of a overflow, which is
3338 * then checkpointed back and ignored. Forbid checkpointing
3341 * But still allow a long sampling period, so that perf stat
3344 if (event->attr.sample_period > 0 &&
3345 event->attr.sample_period < 0x7fffffff)
3351 static struct event_constraint counter0_constraint =
3352 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
3354 static struct event_constraint counter2_constraint =
3355 EVENT_CONSTRAINT(0, 0x4, 0);
3357 static struct event_constraint *
3358 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3359 struct perf_event *event)
3361 struct event_constraint *c;
3363 c = intel_get_event_constraints(cpuc, idx, event);
3365 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
3366 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
3367 if (c->idxmsk64 & (1U << 2))
3368 return &counter2_constraint;
3369 return &emptyconstraint;
3375 static struct event_constraint *
3376 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3377 struct perf_event *event)
3379 struct event_constraint *c;
3381 /* :ppp means to do reduced skid PEBS which is PMC0 only. */
3382 if (event->attr.precise_ip == 3)
3383 return &counter0_constraint;
3385 c = intel_get_event_constraints(cpuc, idx, event);
3390 static bool allow_tsx_force_abort = true;
3392 static struct event_constraint *
3393 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3394 struct perf_event *event)
3396 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
3399 * Without TFA we must not use PMC3.
3401 if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
3402 c = dyn_constraint(cpuc, c, idx);
3403 c->idxmsk64 &= ~(1ULL << 3);
3413 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
3414 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
3415 * the two to enforce a minimum period of 128 (the smallest value that has bits
3416 * 0-5 cleared and >= 100).
3418 * Because of how the code in x86_perf_event_set_period() works, the truncation
3419 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
3420 * to make up for the 'lost' events due to carrying the 'error' in period_left.
3422 * Therefore the effective (average) period matches the requested period,
3423 * despite coarser hardware granularity.
3425 static u64 bdw_limit_period(struct perf_event *event, u64 left)
3427 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
3428 X86_CONFIG(.event=0xc0, .umask=0x01)) {
3436 PMU_FORMAT_ATTR(event, "config:0-7" );
3437 PMU_FORMAT_ATTR(umask, "config:8-15" );
3438 PMU_FORMAT_ATTR(edge, "config:18" );
3439 PMU_FORMAT_ATTR(pc, "config:19" );
3440 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
3441 PMU_FORMAT_ATTR(inv, "config:23" );
3442 PMU_FORMAT_ATTR(cmask, "config:24-31" );
3443 PMU_FORMAT_ATTR(in_tx, "config:32");
3444 PMU_FORMAT_ATTR(in_tx_cp, "config:33");
3446 static struct attribute *intel_arch_formats_attr[] = {
3447 &format_attr_event.attr,
3448 &format_attr_umask.attr,
3449 &format_attr_edge.attr,
3450 &format_attr_pc.attr,
3451 &format_attr_inv.attr,
3452 &format_attr_cmask.attr,
3456 ssize_t intel_event_sysfs_show(char *page, u64 config)
3458 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
3460 return x86_event_sysfs_show(page, config, event);
3463 static struct intel_shared_regs *allocate_shared_regs(int cpu)
3465 struct intel_shared_regs *regs;
3468 regs = kzalloc_node(sizeof(struct intel_shared_regs),
3469 GFP_KERNEL, cpu_to_node(cpu));
3472 * initialize the locks to keep lockdep happy
3474 for (i = 0; i < EXTRA_REG_MAX; i++)
3475 raw_spin_lock_init(®s->regs[i].lock);
3482 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
3484 struct intel_excl_cntrs *c;
3486 c = kzalloc_node(sizeof(struct intel_excl_cntrs),
3487 GFP_KERNEL, cpu_to_node(cpu));
3489 raw_spin_lock_init(&c->lock);
3496 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
3498 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
3499 cpuc->shared_regs = allocate_shared_regs(cpu);
3500 if (!cpuc->shared_regs)
3504 if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
3505 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
3507 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
3508 if (!cpuc->constraint_list)
3509 goto err_shared_regs;
3512 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3513 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
3514 if (!cpuc->excl_cntrs)
3515 goto err_constraint_list;
3517 cpuc->excl_thread_id = 0;
3522 err_constraint_list:
3523 kfree(cpuc->constraint_list);
3524 cpuc->constraint_list = NULL;
3527 kfree(cpuc->shared_regs);
3528 cpuc->shared_regs = NULL;
3534 static int intel_pmu_cpu_prepare(int cpu)
3536 return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
3539 static void flip_smm_bit(void *data)
3541 unsigned long set = *(unsigned long *)data;
3544 msr_set_bit(MSR_IA32_DEBUGCTLMSR,
3545 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
3547 msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
3548 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
3552 static void intel_pmu_cpu_starting(int cpu)
3554 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
3555 int core_id = topology_core_id(cpu);
3558 init_debug_store_on_cpu(cpu);
3560 * Deal with CPUs that don't clear their LBRs on power-up.
3562 intel_pmu_lbr_reset();
3564 cpuc->lbr_sel = NULL;
3566 if (x86_pmu.version > 1)
3567 flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
3569 if (x86_pmu.counter_freezing)
3570 enable_counter_freeze();
3572 if (!cpuc->shared_regs)
3575 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
3576 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
3577 struct intel_shared_regs *pc;
3579 pc = per_cpu(cpu_hw_events, i).shared_regs;
3580 if (pc && pc->core_id == core_id) {
3581 cpuc->kfree_on_online[0] = cpuc->shared_regs;
3582 cpuc->shared_regs = pc;
3586 cpuc->shared_regs->core_id = core_id;
3587 cpuc->shared_regs->refcnt++;
3590 if (x86_pmu.lbr_sel_map)
3591 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
3593 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3594 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
3595 struct cpu_hw_events *sibling;
3596 struct intel_excl_cntrs *c;
3598 sibling = &per_cpu(cpu_hw_events, i);
3599 c = sibling->excl_cntrs;
3600 if (c && c->core_id == core_id) {
3601 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
3602 cpuc->excl_cntrs = c;
3603 if (!sibling->excl_thread_id)
3604 cpuc->excl_thread_id = 1;
3608 cpuc->excl_cntrs->core_id = core_id;
3609 cpuc->excl_cntrs->refcnt++;
3613 static void free_excl_cntrs(struct cpu_hw_events *cpuc)
3615 struct intel_excl_cntrs *c;
3617 c = cpuc->excl_cntrs;
3619 if (c->core_id == -1 || --c->refcnt == 0)
3621 cpuc->excl_cntrs = NULL;
3624 kfree(cpuc->constraint_list);
3625 cpuc->constraint_list = NULL;
3628 static void intel_pmu_cpu_dying(int cpu)
3630 fini_debug_store_on_cpu(cpu);
3632 if (x86_pmu.counter_freezing)
3633 disable_counter_freeze();
3636 void intel_cpuc_finish(struct cpu_hw_events *cpuc)
3638 struct intel_shared_regs *pc;
3640 pc = cpuc->shared_regs;
3642 if (pc->core_id == -1 || --pc->refcnt == 0)
3644 cpuc->shared_regs = NULL;
3647 free_excl_cntrs(cpuc);
3650 static void intel_pmu_cpu_dead(int cpu)
3652 intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
3655 static void intel_pmu_sched_task(struct perf_event_context *ctx,
3658 intel_pmu_pebs_sched_task(ctx, sched_in);
3659 intel_pmu_lbr_sched_task(ctx, sched_in);
3662 static int intel_pmu_check_period(struct perf_event *event, u64 value)
3664 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
3667 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
3669 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
3671 PMU_FORMAT_ATTR(frontend, "config1:0-23");
3673 static struct attribute *intel_arch3_formats_attr[] = {
3674 &format_attr_event.attr,
3675 &format_attr_umask.attr,
3676 &format_attr_edge.attr,
3677 &format_attr_pc.attr,
3678 &format_attr_any.attr,
3679 &format_attr_inv.attr,
3680 &format_attr_cmask.attr,
3684 static struct attribute *hsw_format_attr[] = {
3685 &format_attr_in_tx.attr,
3686 &format_attr_in_tx_cp.attr,
3687 &format_attr_offcore_rsp.attr,
3688 &format_attr_ldlat.attr,
3692 static struct attribute *nhm_format_attr[] = {
3693 &format_attr_offcore_rsp.attr,
3694 &format_attr_ldlat.attr,
3698 static struct attribute *slm_format_attr[] = {
3699 &format_attr_offcore_rsp.attr,
3703 static struct attribute *skl_format_attr[] = {
3704 &format_attr_frontend.attr,
3708 static __initconst const struct x86_pmu core_pmu = {
3710 .handle_irq = x86_pmu_handle_irq,
3711 .disable_all = x86_pmu_disable_all,
3712 .enable_all = core_pmu_enable_all,
3713 .enable = core_pmu_enable_event,
3714 .disable = x86_pmu_disable_event,
3715 .hw_config = core_pmu_hw_config,
3716 .schedule_events = x86_schedule_events,
3717 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
3718 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
3719 .event_map = intel_pmu_event_map,
3720 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
3722 .large_pebs_flags = LARGE_PEBS_FLAGS,
3725 * Intel PMCs cannot be accessed sanely above 32-bit width,
3726 * so we install an artificial 1<<31 period regardless of
3727 * the generic event period:
3729 .max_period = (1ULL<<31) - 1,
3730 .get_event_constraints = intel_get_event_constraints,
3731 .put_event_constraints = intel_put_event_constraints,
3732 .event_constraints = intel_core_event_constraints,
3733 .guest_get_msrs = core_guest_get_msrs,
3734 .format_attrs = intel_arch_formats_attr,
3735 .events_sysfs_show = intel_event_sysfs_show,
3738 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
3739 * together with PMU version 1 and thus be using core_pmu with
3740 * shared_regs. We need following callbacks here to allocate
3743 .cpu_prepare = intel_pmu_cpu_prepare,
3744 .cpu_starting = intel_pmu_cpu_starting,
3745 .cpu_dying = intel_pmu_cpu_dying,
3746 .cpu_dead = intel_pmu_cpu_dead,
3748 .check_period = intel_pmu_check_period,
3751 static struct attribute *intel_pmu_attrs[];
3753 static __initconst const struct x86_pmu intel_pmu = {
3755 .handle_irq = intel_pmu_handle_irq,
3756 .disable_all = intel_pmu_disable_all,
3757 .enable_all = intel_pmu_enable_all,
3758 .enable = intel_pmu_enable_event,
3759 .disable = intel_pmu_disable_event,
3760 .add = intel_pmu_add_event,
3761 .del = intel_pmu_del_event,
3762 .read = intel_pmu_read_event,
3763 .hw_config = intel_pmu_hw_config,
3764 .schedule_events = x86_schedule_events,
3765 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
3766 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
3767 .event_map = intel_pmu_event_map,
3768 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
3770 .large_pebs_flags = LARGE_PEBS_FLAGS,
3772 * Intel PMCs cannot be accessed sanely above 32 bit width,
3773 * so we install an artificial 1<<31 period regardless of
3774 * the generic event period:
3776 .max_period = (1ULL << 31) - 1,
3777 .get_event_constraints = intel_get_event_constraints,
3778 .put_event_constraints = intel_put_event_constraints,
3779 .pebs_aliases = intel_pebs_aliases_core2,
3781 .format_attrs = intel_arch3_formats_attr,
3782 .events_sysfs_show = intel_event_sysfs_show,
3784 .attrs = intel_pmu_attrs,
3786 .cpu_prepare = intel_pmu_cpu_prepare,
3787 .cpu_starting = intel_pmu_cpu_starting,
3788 .cpu_dying = intel_pmu_cpu_dying,
3789 .cpu_dead = intel_pmu_cpu_dead,
3791 .guest_get_msrs = intel_guest_get_msrs,
3792 .sched_task = intel_pmu_sched_task,
3794 .check_period = intel_pmu_check_period,
3797 static __init void intel_clovertown_quirk(void)
3800 * PEBS is unreliable due to:
3802 * AJ67 - PEBS may experience CPL leaks
3803 * AJ68 - PEBS PMI may be delayed by one event
3804 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
3805 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
3807 * AJ67 could be worked around by restricting the OS/USR flags.
3808 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
3810 * AJ106 could possibly be worked around by not allowing LBR
3811 * usage from PEBS, including the fixup.
3812 * AJ68 could possibly be worked around by always programming
3813 * a pebs_event_reset[0] value and coping with the lost events.
3815 * But taken together it might just make sense to not enable PEBS on
3818 pr_warn("PEBS disabled due to CPU errata\n");
3820 x86_pmu.pebs_constraints = NULL;
3823 static int intel_snb_pebs_broken(int cpu)
3825 u32 rev = UINT_MAX; /* default to broken for unknown models */
3827 switch (cpu_data(cpu).x86_model) {
3828 case INTEL_FAM6_SANDYBRIDGE:
3832 case INTEL_FAM6_SANDYBRIDGE_X:
3833 switch (cpu_data(cpu).x86_stepping) {
3834 case 6: rev = 0x618; break;
3835 case 7: rev = 0x70c; break;
3839 return (cpu_data(cpu).microcode < rev);
3842 static void intel_snb_check_microcode(void)
3844 int pebs_broken = 0;
3847 for_each_online_cpu(cpu) {
3848 if ((pebs_broken = intel_snb_pebs_broken(cpu)))
3852 if (pebs_broken == x86_pmu.pebs_broken)
3856 * Serialized by the microcode lock..
3858 if (x86_pmu.pebs_broken) {
3859 pr_info("PEBS enabled due to microcode update\n");
3860 x86_pmu.pebs_broken = 0;
3862 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
3863 x86_pmu.pebs_broken = 1;
3867 static bool is_lbr_from(unsigned long msr)
3869 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
3871 return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
3875 * Under certain circumstances, access certain MSR may cause #GP.
3876 * The function tests if the input MSR can be safely accessed.
3878 static bool check_msr(unsigned long msr, u64 mask)
3880 u64 val_old, val_new, val_tmp;
3883 * Read the current value, change it and read it back to see if it
3884 * matches, this is needed to detect certain hardware emulators
3885 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
3887 if (rdmsrl_safe(msr, &val_old))
3891 * Only change the bits which can be updated by wrmsrl.
3893 val_tmp = val_old ^ mask;
3895 if (is_lbr_from(msr))
3896 val_tmp = lbr_from_signext_quirk_wr(val_tmp);
3898 if (wrmsrl_safe(msr, val_tmp) ||
3899 rdmsrl_safe(msr, &val_new))
3903 * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
3904 * should equal rdmsrl()'s even with the quirk.
3906 if (val_new != val_tmp)
3909 if (is_lbr_from(msr))
3910 val_old = lbr_from_signext_quirk_wr(val_old);
3912 /* Here it's sure that the MSR can be safely accessed.
3913 * Restore the old value and return.
3915 wrmsrl(msr, val_old);
3920 static __init void intel_sandybridge_quirk(void)
3922 x86_pmu.check_microcode = intel_snb_check_microcode;
3924 intel_snb_check_microcode();
3928 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
3929 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
3930 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
3931 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
3932 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
3933 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
3934 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
3935 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
3938 static __init void intel_arch_events_quirk(void)
3942 /* disable event that reported as not presend by cpuid */
3943 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
3944 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
3945 pr_warn("CPUID marked event: \'%s\' unavailable\n",
3946 intel_arch_events_map[bit].name);
3950 static __init void intel_nehalem_quirk(void)
3952 union cpuid10_ebx ebx;
3954 ebx.full = x86_pmu.events_maskl;
3955 if (ebx.split.no_branch_misses_retired) {
3957 * Erratum AAJ80 detected, we work it around by using
3958 * the BR_MISP_EXEC.ANY event. This will over-count
3959 * branch-misses, but it's still much better than the
3960 * architectural event which is often completely bogus:
3962 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
3963 ebx.split.no_branch_misses_retired = 0;
3964 x86_pmu.events_maskl = ebx.full;
3965 pr_info("CPU erratum AAJ80 worked around\n");
3969 static bool intel_glp_counter_freezing_broken(int cpu)
3971 u32 rev = UINT_MAX; /* default to broken for unknown stepping */
3973 switch (cpu_data(cpu).x86_stepping) {
3982 return (cpu_data(cpu).microcode < rev);
3985 static __init void intel_glp_counter_freezing_quirk(void)
3987 /* Check if it's already disabled */
3988 if (disable_counter_freezing)
3992 * If the system starts with the wrong ucode, leave the
3993 * counter-freezing feature permanently disabled.
3995 if (intel_glp_counter_freezing_broken(raw_smp_processor_id())) {
3996 pr_info("PMU counter freezing disabled due to CPU errata,"
3997 "please upgrade microcode\n");
3998 x86_pmu.counter_freezing = false;
3999 x86_pmu.handle_irq = intel_pmu_handle_irq;
4004 * enable software workaround for errata:
4009 * Only needed when HT is enabled. However detecting
4010 * if HT is enabled is difficult (model specific). So instead,
4011 * we enable the workaround in the early boot, and verify if
4012 * it is needed in a later initcall phase once we have valid
4013 * topology information to check if HT is actually enabled
4015 static __init void intel_ht_bug(void)
4017 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
4019 x86_pmu.start_scheduling = intel_start_scheduling;
4020 x86_pmu.commit_scheduling = intel_commit_scheduling;
4021 x86_pmu.stop_scheduling = intel_stop_scheduling;
4024 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
4025 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
4027 /* Haswell special events */
4028 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
4029 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
4030 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
4031 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
4032 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
4033 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
4034 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
4035 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
4036 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
4037 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
4038 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
4039 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
4041 static struct attribute *hsw_events_attrs[] = {
4042 EVENT_PTR(td_slots_issued),
4043 EVENT_PTR(td_slots_retired),
4044 EVENT_PTR(td_fetch_bubbles),
4045 EVENT_PTR(td_total_slots),
4046 EVENT_PTR(td_total_slots_scale),
4047 EVENT_PTR(td_recovery_bubbles),
4048 EVENT_PTR(td_recovery_bubbles_scale),
4052 static struct attribute *hsw_mem_events_attrs[] = {
4053 EVENT_PTR(mem_ld_hsw),
4054 EVENT_PTR(mem_st_hsw),
4058 static struct attribute *hsw_tsx_events_attrs[] = {
4059 EVENT_PTR(tx_start),
4060 EVENT_PTR(tx_commit),
4061 EVENT_PTR(tx_abort),
4062 EVENT_PTR(tx_capacity),
4063 EVENT_PTR(tx_conflict),
4064 EVENT_PTR(el_start),
4065 EVENT_PTR(el_commit),
4066 EVENT_PTR(el_abort),
4067 EVENT_PTR(el_capacity),
4068 EVENT_PTR(el_conflict),
4069 EVENT_PTR(cycles_t),
4070 EVENT_PTR(cycles_ct),
4074 static ssize_t freeze_on_smi_show(struct device *cdev,
4075 struct device_attribute *attr,
4078 return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
4081 static DEFINE_MUTEX(freeze_on_smi_mutex);
4083 static ssize_t freeze_on_smi_store(struct device *cdev,
4084 struct device_attribute *attr,
4085 const char *buf, size_t count)
4090 ret = kstrtoul(buf, 0, &val);
4097 mutex_lock(&freeze_on_smi_mutex);
4099 if (x86_pmu.attr_freeze_on_smi == val)
4102 x86_pmu.attr_freeze_on_smi = val;
4105 on_each_cpu(flip_smm_bit, &val, 1);
4108 mutex_unlock(&freeze_on_smi_mutex);
4113 static DEVICE_ATTR_RW(freeze_on_smi);
4115 static ssize_t branches_show(struct device *cdev,
4116 struct device_attribute *attr,
4119 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
4122 static DEVICE_ATTR_RO(branches);
4124 static struct attribute *lbr_attrs[] = {
4125 &dev_attr_branches.attr,
4129 static char pmu_name_str[30];
4131 static ssize_t pmu_name_show(struct device *cdev,
4132 struct device_attribute *attr,
4135 return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str);
4138 static DEVICE_ATTR_RO(pmu_name);
4140 static struct attribute *intel_pmu_caps_attrs[] = {
4141 &dev_attr_pmu_name.attr,
4145 DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
4147 static struct attribute *intel_pmu_attrs[] = {
4148 &dev_attr_freeze_on_smi.attr,
4149 NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
4153 static __init struct attribute **
4154 get_events_attrs(struct attribute **base,
4155 struct attribute **mem,
4156 struct attribute **tsx)
4158 struct attribute **attrs = base;
4159 struct attribute **old;
4161 if (mem && x86_pmu.pebs)
4162 attrs = merge_attr(attrs, mem);
4164 if (tsx && boot_cpu_has(X86_FEATURE_RTM)) {
4166 attrs = merge_attr(attrs, tsx);
4174 __init int intel_pmu_init(void)
4176 struct attribute **extra_attr = NULL;
4177 struct attribute **mem_attr = NULL;
4178 struct attribute **tsx_attr = NULL;
4179 struct attribute **to_free = NULL;
4180 union cpuid10_edx edx;
4181 union cpuid10_eax eax;
4182 union cpuid10_ebx ebx;
4183 struct event_constraint *c;
4184 unsigned int unused;
4185 struct extra_reg *er;
4189 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
4190 switch (boot_cpu_data.x86) {
4192 return p6_pmu_init();
4194 return knc_pmu_init();
4196 return p4_pmu_init();
4202 * Check whether the Architectural PerfMon supports
4203 * Branch Misses Retired hw_event or not.
4205 cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
4206 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
4209 version = eax.split.version_id;
4213 x86_pmu = intel_pmu;
4215 x86_pmu.version = version;
4216 x86_pmu.num_counters = eax.split.num_counters;
4217 x86_pmu.cntval_bits = eax.split.bit_width;
4218 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
4220 x86_pmu.events_maskl = ebx.full;
4221 x86_pmu.events_mask_len = eax.split.mask_length;
4223 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
4226 * Quirk: v2 perfmon does not report fixed-purpose events, so
4227 * assume at least 3 events, when not running in a hypervisor:
4230 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
4232 x86_pmu.num_counters_fixed =
4233 max((int)edx.split.num_counters_fixed, assume);
4237 x86_pmu.counter_freezing = !disable_counter_freezing;
4239 if (boot_cpu_has(X86_FEATURE_PDCM)) {
4242 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
4243 x86_pmu.intel_cap.capabilities = capabilities;
4248 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
4251 * Install the hw-cache-events table:
4253 switch (boot_cpu_data.x86_model) {
4254 case INTEL_FAM6_CORE_YONAH:
4255 pr_cont("Core events, ");
4259 case INTEL_FAM6_CORE2_MEROM:
4260 x86_add_quirk(intel_clovertown_quirk);
4261 case INTEL_FAM6_CORE2_MEROM_L:
4262 case INTEL_FAM6_CORE2_PENRYN:
4263 case INTEL_FAM6_CORE2_DUNNINGTON:
4264 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
4265 sizeof(hw_cache_event_ids));
4267 intel_pmu_lbr_init_core();
4269 x86_pmu.event_constraints = intel_core2_event_constraints;
4270 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
4271 pr_cont("Core2 events, ");
4275 case INTEL_FAM6_NEHALEM:
4276 case INTEL_FAM6_NEHALEM_EP:
4277 case INTEL_FAM6_NEHALEM_EX:
4278 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
4279 sizeof(hw_cache_event_ids));
4280 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
4281 sizeof(hw_cache_extra_regs));
4283 intel_pmu_lbr_init_nhm();
4285 x86_pmu.event_constraints = intel_nehalem_event_constraints;
4286 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
4287 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4288 x86_pmu.extra_regs = intel_nehalem_extra_regs;
4290 mem_attr = nhm_mem_events_attrs;
4292 /* UOPS_ISSUED.STALLED_CYCLES */
4293 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4294 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4295 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
4296 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4297 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
4299 intel_pmu_pebs_data_source_nhm();
4300 x86_add_quirk(intel_nehalem_quirk);
4301 x86_pmu.pebs_no_tlb = 1;
4302 extra_attr = nhm_format_attr;
4304 pr_cont("Nehalem events, ");
4308 case INTEL_FAM6_ATOM_BONNELL:
4309 case INTEL_FAM6_ATOM_BONNELL_MID:
4310 case INTEL_FAM6_ATOM_SALTWELL:
4311 case INTEL_FAM6_ATOM_SALTWELL_MID:
4312 case INTEL_FAM6_ATOM_SALTWELL_TABLET:
4313 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
4314 sizeof(hw_cache_event_ids));
4316 intel_pmu_lbr_init_atom();
4318 x86_pmu.event_constraints = intel_gen_event_constraints;
4319 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
4320 x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
4321 pr_cont("Atom events, ");
4325 case INTEL_FAM6_ATOM_SILVERMONT:
4326 case INTEL_FAM6_ATOM_SILVERMONT_X:
4327 case INTEL_FAM6_ATOM_SILVERMONT_MID:
4328 case INTEL_FAM6_ATOM_AIRMONT:
4329 case INTEL_FAM6_ATOM_AIRMONT_MID:
4330 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
4331 sizeof(hw_cache_event_ids));
4332 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
4333 sizeof(hw_cache_extra_regs));
4335 intel_pmu_lbr_init_slm();
4337 x86_pmu.event_constraints = intel_slm_event_constraints;
4338 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
4339 x86_pmu.extra_regs = intel_slm_extra_regs;
4340 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4341 x86_pmu.cpu_events = slm_events_attrs;
4342 extra_attr = slm_format_attr;
4343 pr_cont("Silvermont events, ");
4344 name = "silvermont";
4347 case INTEL_FAM6_ATOM_GOLDMONT:
4348 case INTEL_FAM6_ATOM_GOLDMONT_X:
4349 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
4350 sizeof(hw_cache_event_ids));
4351 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
4352 sizeof(hw_cache_extra_regs));
4354 intel_pmu_lbr_init_skl();
4356 x86_pmu.event_constraints = intel_slm_event_constraints;
4357 x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
4358 x86_pmu.extra_regs = intel_glm_extra_regs;
4360 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
4361 * for precise cycles.
4362 * :pp is identical to :ppp
4364 x86_pmu.pebs_aliases = NULL;
4365 x86_pmu.pebs_prec_dist = true;
4366 x86_pmu.lbr_pt_coexist = true;
4367 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4368 x86_pmu.cpu_events = glm_events_attrs;
4369 extra_attr = slm_format_attr;
4370 pr_cont("Goldmont events, ");
4374 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
4375 x86_add_quirk(intel_glp_counter_freezing_quirk);
4376 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
4377 sizeof(hw_cache_event_ids));
4378 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
4379 sizeof(hw_cache_extra_regs));
4381 intel_pmu_lbr_init_skl();
4383 x86_pmu.event_constraints = intel_slm_event_constraints;
4384 x86_pmu.extra_regs = intel_glm_extra_regs;
4386 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
4387 * for precise cycles.
4389 x86_pmu.pebs_aliases = NULL;
4390 x86_pmu.pebs_prec_dist = true;
4391 x86_pmu.lbr_pt_coexist = true;
4392 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4393 x86_pmu.flags |= PMU_FL_PEBS_ALL;
4394 x86_pmu.get_event_constraints = glp_get_event_constraints;
4395 x86_pmu.cpu_events = glm_events_attrs;
4396 /* Goldmont Plus has 4-wide pipeline */
4397 event_attr_td_total_slots_scale_glm.event_str = "4";
4398 extra_attr = slm_format_attr;
4399 pr_cont("Goldmont plus events, ");
4400 name = "goldmont_plus";
4403 case INTEL_FAM6_WESTMERE:
4404 case INTEL_FAM6_WESTMERE_EP:
4405 case INTEL_FAM6_WESTMERE_EX:
4406 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
4407 sizeof(hw_cache_event_ids));
4408 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
4409 sizeof(hw_cache_extra_regs));
4411 intel_pmu_lbr_init_nhm();
4413 x86_pmu.event_constraints = intel_westmere_event_constraints;
4414 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4415 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
4416 x86_pmu.extra_regs = intel_westmere_extra_regs;
4417 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4419 mem_attr = nhm_mem_events_attrs;
4421 /* UOPS_ISSUED.STALLED_CYCLES */
4422 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4423 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4424 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
4425 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4426 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
4428 intel_pmu_pebs_data_source_nhm();
4429 extra_attr = nhm_format_attr;
4430 pr_cont("Westmere events, ");
4434 case INTEL_FAM6_SANDYBRIDGE:
4435 case INTEL_FAM6_SANDYBRIDGE_X:
4436 x86_add_quirk(intel_sandybridge_quirk);
4437 x86_add_quirk(intel_ht_bug);
4438 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
4439 sizeof(hw_cache_event_ids));
4440 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
4441 sizeof(hw_cache_extra_regs));
4443 intel_pmu_lbr_init_snb();
4445 x86_pmu.event_constraints = intel_snb_event_constraints;
4446 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
4447 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
4448 if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
4449 x86_pmu.extra_regs = intel_snbep_extra_regs;
4451 x86_pmu.extra_regs = intel_snb_extra_regs;
4454 /* all extra regs are per-cpu when HT is on */
4455 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4456 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4458 x86_pmu.cpu_events = snb_events_attrs;
4459 mem_attr = snb_mem_events_attrs;
4461 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
4462 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4463 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4464 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
4465 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4466 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
4468 extra_attr = nhm_format_attr;
4470 pr_cont("SandyBridge events, ");
4471 name = "sandybridge";
4474 case INTEL_FAM6_IVYBRIDGE:
4475 case INTEL_FAM6_IVYBRIDGE_X:
4476 x86_add_quirk(intel_ht_bug);
4477 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
4478 sizeof(hw_cache_event_ids));
4479 /* dTLB-load-misses on IVB is different than SNB */
4480 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
4482 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
4483 sizeof(hw_cache_extra_regs));
4485 intel_pmu_lbr_init_snb();
4487 x86_pmu.event_constraints = intel_ivb_event_constraints;
4488 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
4489 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4490 x86_pmu.pebs_prec_dist = true;
4491 if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
4492 x86_pmu.extra_regs = intel_snbep_extra_regs;
4494 x86_pmu.extra_regs = intel_snb_extra_regs;
4495 /* all extra regs are per-cpu when HT is on */
4496 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4497 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4499 x86_pmu.cpu_events = snb_events_attrs;
4500 mem_attr = snb_mem_events_attrs;
4502 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
4503 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4504 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4506 extra_attr = nhm_format_attr;
4508 pr_cont("IvyBridge events, ");
4513 case INTEL_FAM6_HASWELL_CORE:
4514 case INTEL_FAM6_HASWELL_X:
4515 case INTEL_FAM6_HASWELL_ULT:
4516 case INTEL_FAM6_HASWELL_GT3E:
4517 x86_add_quirk(intel_ht_bug);
4518 x86_pmu.late_ack = true;
4519 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4520 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4522 intel_pmu_lbr_init_hsw();
4524 x86_pmu.event_constraints = intel_hsw_event_constraints;
4525 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
4526 x86_pmu.extra_regs = intel_snbep_extra_regs;
4527 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4528 x86_pmu.pebs_prec_dist = true;
4529 /* all extra regs are per-cpu when HT is on */
4530 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4531 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4533 x86_pmu.hw_config = hsw_hw_config;
4534 x86_pmu.get_event_constraints = hsw_get_event_constraints;
4535 x86_pmu.cpu_events = hsw_events_attrs;
4536 x86_pmu.lbr_double_abort = true;
4537 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4538 hsw_format_attr : nhm_format_attr;
4539 mem_attr = hsw_mem_events_attrs;
4540 tsx_attr = hsw_tsx_events_attrs;
4541 pr_cont("Haswell events, ");
4545 case INTEL_FAM6_BROADWELL_CORE:
4546 case INTEL_FAM6_BROADWELL_XEON_D:
4547 case INTEL_FAM6_BROADWELL_GT3E:
4548 case INTEL_FAM6_BROADWELL_X:
4549 x86_pmu.late_ack = true;
4550 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4551 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4553 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
4554 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
4555 BDW_L3_MISS|HSW_SNOOP_DRAM;
4556 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
4558 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
4559 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
4560 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
4561 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
4563 intel_pmu_lbr_init_hsw();
4565 x86_pmu.event_constraints = intel_bdw_event_constraints;
4566 x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
4567 x86_pmu.extra_regs = intel_snbep_extra_regs;
4568 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4569 x86_pmu.pebs_prec_dist = true;
4570 /* all extra regs are per-cpu when HT is on */
4571 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4572 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4574 x86_pmu.hw_config = hsw_hw_config;
4575 x86_pmu.get_event_constraints = hsw_get_event_constraints;
4576 x86_pmu.cpu_events = hsw_events_attrs;
4577 x86_pmu.limit_period = bdw_limit_period;
4578 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4579 hsw_format_attr : nhm_format_attr;
4580 mem_attr = hsw_mem_events_attrs;
4581 tsx_attr = hsw_tsx_events_attrs;
4582 pr_cont("Broadwell events, ");
4586 case INTEL_FAM6_XEON_PHI_KNL:
4587 case INTEL_FAM6_XEON_PHI_KNM:
4588 memcpy(hw_cache_event_ids,
4589 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4590 memcpy(hw_cache_extra_regs,
4591 knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4592 intel_pmu_lbr_init_knl();
4594 x86_pmu.event_constraints = intel_slm_event_constraints;
4595 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
4596 x86_pmu.extra_regs = intel_knl_extra_regs;
4598 /* all extra regs are per-cpu when HT is on */
4599 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4600 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4601 extra_attr = slm_format_attr;
4602 pr_cont("Knights Landing/Mill events, ");
4603 name = "knights-landing";
4606 case INTEL_FAM6_SKYLAKE_MOBILE:
4607 case INTEL_FAM6_SKYLAKE_DESKTOP:
4608 case INTEL_FAM6_SKYLAKE_X:
4609 case INTEL_FAM6_KABYLAKE_MOBILE:
4610 case INTEL_FAM6_KABYLAKE_DESKTOP:
4611 x86_pmu.late_ack = true;
4612 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4613 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4614 intel_pmu_lbr_init_skl();
4616 /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
4617 event_attr_td_recovery_bubbles.event_str_noht =
4618 "event=0xd,umask=0x1,cmask=1";
4619 event_attr_td_recovery_bubbles.event_str_ht =
4620 "event=0xd,umask=0x1,cmask=1,any=1";
4622 x86_pmu.event_constraints = intel_skl_event_constraints;
4623 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
4624 x86_pmu.extra_regs = intel_skl_extra_regs;
4625 x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
4626 x86_pmu.pebs_prec_dist = true;
4627 /* all extra regs are per-cpu when HT is on */
4628 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4629 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4631 x86_pmu.hw_config = hsw_hw_config;
4632 x86_pmu.get_event_constraints = hsw_get_event_constraints;
4633 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4634 hsw_format_attr : nhm_format_attr;
4635 extra_attr = merge_attr(extra_attr, skl_format_attr);
4636 to_free = extra_attr;
4637 x86_pmu.cpu_events = hsw_events_attrs;
4638 mem_attr = hsw_mem_events_attrs;
4639 tsx_attr = hsw_tsx_events_attrs;
4640 intel_pmu_pebs_data_source_skl(
4641 boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
4643 if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
4644 x86_pmu.flags |= PMU_FL_TFA;
4645 x86_pmu.get_event_constraints = tfa_get_event_constraints;
4646 x86_pmu.enable_all = intel_tfa_pmu_enable_all;
4647 x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
4648 intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr;
4651 pr_cont("Skylake events, ");
4656 switch (x86_pmu.version) {
4658 x86_pmu.event_constraints = intel_v1_event_constraints;
4659 pr_cont("generic architected perfmon v1, ");
4660 name = "generic_arch_v1";
4664 * default constraints for v2 and up
4666 x86_pmu.event_constraints = intel_gen_event_constraints;
4667 pr_cont("generic architected perfmon, ");
4668 name = "generic_arch_v2+";
4673 snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
4675 if (version >= 2 && extra_attr) {
4676 x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
4678 WARN_ON(!x86_pmu.format_attrs);
4681 x86_pmu.cpu_events = get_events_attrs(x86_pmu.cpu_events,
4682 mem_attr, tsx_attr);
4684 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
4685 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
4686 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
4687 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
4689 x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
4691 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
4692 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
4693 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
4694 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
4697 x86_pmu.intel_ctrl |=
4698 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
4700 if (x86_pmu.event_constraints) {
4702 * event on fixed counter2 (REF_CYCLES) only works on this
4703 * counter, so do not extend mask to generic counters
4705 for_each_event_constraint(c, x86_pmu.event_constraints) {
4706 if (c->cmask == FIXED_EVENT_FLAGS
4707 && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
4708 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
4711 ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
4712 c->weight = hweight64(c->idxmsk64);
4717 * Access LBR MSR may cause #GP under certain circumstances.
4718 * E.g. KVM doesn't support LBR MSR
4719 * Check all LBT MSR here.
4720 * Disable LBR access if any LBR MSRs can not be accessed.
4722 if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
4724 for (i = 0; i < x86_pmu.lbr_nr; i++) {
4725 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
4726 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
4730 x86_pmu.caps_attrs = intel_pmu_caps_attrs;
4732 if (x86_pmu.lbr_nr) {
4733 x86_pmu.caps_attrs = merge_attr(x86_pmu.caps_attrs, lbr_attrs);
4734 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
4738 * Access extra MSR may cause #GP under certain circumstances.
4739 * E.g. KVM doesn't support offcore event
4740 * Check all extra_regs here.
4742 if (x86_pmu.extra_regs) {
4743 for (er = x86_pmu.extra_regs; er->msr; er++) {
4744 er->extra_msr_access = check_msr(er->msr, 0x11UL);
4745 /* Disable LBR select mapping */
4746 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
4747 x86_pmu.lbr_sel_map = NULL;
4751 /* Support full width counters using alternative MSR range */
4752 if (x86_pmu.intel_cap.full_width_write) {
4753 x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
4754 x86_pmu.perfctr = MSR_IA32_PMC0;
4755 pr_cont("full-width counters, ");
4759 * For arch perfmon 4 use counter freezing to avoid
4760 * several MSR accesses in the PMI.
4762 if (x86_pmu.counter_freezing)
4763 x86_pmu.handle_irq = intel_pmu_handle_irq_v4;
4770 * HT bug: phase 2 init
4771 * Called once we have valid topology information to check
4772 * whether or not HT is enabled
4773 * If HT is off, then we disable the workaround
4775 static __init int fixup_ht_bug(void)
4779 * problem not present on this CPU model, nothing to do
4781 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
4784 if (topology_max_smt_threads() > 1) {
4785 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
4791 hardlockup_detector_perf_stop();
4793 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
4795 x86_pmu.start_scheduling = NULL;
4796 x86_pmu.commit_scheduling = NULL;
4797 x86_pmu.stop_scheduling = NULL;
4799 hardlockup_detector_perf_restart();
4801 for_each_online_cpu(c)
4802 free_excl_cntrs(&per_cpu(cpu_hw_events, c));
4805 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
4808 subsys_initcall(fixup_ht_bug)