1 // SPDX-License-Identifier: GPL-2.0-only
5 * Used to coordinate shared registers between HT threads or
6 * among events on a single PMU.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/stddef.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/nmi.h>
18 #include <asm/cpufeature.h>
19 #include <asm/hardirq.h>
20 #include <asm/intel-family.h>
21 #include <asm/intel_pt.h>
23 #include <asm/cpu_device_id.h>
25 #include "../perf_event.h"
28 * Intel PerfMon, used on Core and later.
30 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
32 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
33 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
34 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
35 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
36 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
37 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
38 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
39 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
42 static struct event_constraint intel_core_event_constraints[] __read_mostly =
44 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
45 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
46 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
47 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
48 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
49 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
53 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
55 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
56 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
57 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
58 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
59 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
60 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
61 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
62 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
63 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
64 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
65 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
66 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
67 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
71 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
73 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
74 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
75 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
76 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
77 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
78 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
79 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
80 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
81 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
82 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
83 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
87 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
89 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
90 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
91 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
95 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
97 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
98 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
99 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
100 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
101 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
102 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
103 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
107 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
109 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
110 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
111 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
112 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
113 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
114 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
115 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
116 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
117 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
118 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
119 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
120 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
123 * When HT is off these events can only run on the bottom 4 counters
124 * When HT is on, they are impacted by the HT bug and require EXCL access
126 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
127 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
128 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
129 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
134 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
136 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
137 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
138 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
139 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
140 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
141 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
142 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
143 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
144 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
145 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
146 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
147 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
148 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
151 * When HT is off these events can only run on the bottom 4 counters
152 * When HT is on, they are impacted by the HT bug and require EXCL access
154 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
155 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
156 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
157 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
162 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
164 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
165 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
166 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
167 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
171 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
176 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
178 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
179 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
180 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
184 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
186 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
187 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
188 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
192 static struct event_constraint intel_skl_event_constraints[] = {
193 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
194 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
195 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
196 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
199 * when HT is off, these can only run on the bottom 4 counters
201 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
202 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
203 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
204 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
205 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
210 static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
211 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
212 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
216 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
217 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
218 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
219 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
220 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
224 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
225 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
226 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
227 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
228 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
232 static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
233 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
234 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
235 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
237 * Note the low 8 bits eventsel code is not a continuous field, containing
238 * some #GPing bits. These are masked out.
240 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
244 static struct event_constraint intel_icl_event_constraints[] = {
245 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
246 INTEL_UEVENT_CONSTRAINT(0x1c0, 0), /* INST_RETIRED.PREC_DIST */
247 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
248 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
249 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
250 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
251 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
252 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
253 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
254 INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
255 INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
256 INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */
257 INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf),
258 INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
259 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */
260 INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
261 INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */
262 INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
263 INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
264 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
265 INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
269 static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
270 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
271 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
272 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
273 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
277 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
278 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
279 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
281 static struct attribute *nhm_mem_events_attrs[] = {
282 EVENT_PTR(mem_ld_nhm),
287 * topdown events for Intel Core CPUs.
289 * The events are all in slots, which is a free slot in a 4 wide
290 * pipeline. Some events are already reported in slots, for cycle
291 * events we multiply by the pipeline width (4).
293 * With Hyper Threading on, topdown metrics are either summed or averaged
294 * between the threads of a core: (count_t0 + count_t1).
296 * For the average case the metric is always scaled to pipeline width,
297 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
300 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
301 "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */
302 "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */
303 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
304 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
305 "event=0xe,umask=0x1"); /* uops_issued.any */
306 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
307 "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */
308 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
309 "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */
310 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
311 "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */
312 "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */
313 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
316 EVENT_ATTR_STR(slots, slots, "event=0x00,umask=0x4");
317 EVENT_ATTR_STR(topdown-retiring, td_retiring, "event=0x00,umask=0x80");
318 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec, "event=0x00,umask=0x81");
319 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound, "event=0x00,umask=0x82");
320 EVENT_ATTR_STR(topdown-be-bound, td_be_bound, "event=0x00,umask=0x83");
322 static struct attribute *snb_events_attrs[] = {
323 EVENT_PTR(td_slots_issued),
324 EVENT_PTR(td_slots_retired),
325 EVENT_PTR(td_fetch_bubbles),
326 EVENT_PTR(td_total_slots),
327 EVENT_PTR(td_total_slots_scale),
328 EVENT_PTR(td_recovery_bubbles),
329 EVENT_PTR(td_recovery_bubbles_scale),
333 static struct attribute *snb_mem_events_attrs[] = {
334 EVENT_PTR(mem_ld_snb),
335 EVENT_PTR(mem_st_snb),
339 static struct event_constraint intel_hsw_event_constraints[] = {
340 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
341 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
342 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
343 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
344 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
345 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
346 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
347 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
348 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
349 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
350 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
351 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
354 * When HT is off these events can only run on the bottom 4 counters
355 * When HT is on, they are impacted by the HT bug and require EXCL access
357 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
358 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
359 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
360 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
365 static struct event_constraint intel_bdw_event_constraints[] = {
366 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
367 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
368 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
369 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
370 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
372 * when HT is off, these can only run on the bottom 4 counters
374 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
375 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
376 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
377 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
381 static u64 intel_pmu_event_map(int hw_event)
383 return intel_perfmon_event_map[hw_event];
387 * Notes on the events:
388 * - data reads do not include code reads (comparable to earlier tables)
389 * - data counts include speculative execution (except L1 write, dtlb, bpu)
390 * - remote node access includes remote memory, remote cache, remote mmio.
391 * - prefetches are not included in the counts.
392 * - icache miss does not include decoded icache
395 #define SKL_DEMAND_DATA_RD BIT_ULL(0)
396 #define SKL_DEMAND_RFO BIT_ULL(1)
397 #define SKL_ANY_RESPONSE BIT_ULL(16)
398 #define SKL_SUPPLIER_NONE BIT_ULL(17)
399 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26)
400 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27)
401 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28)
402 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29)
403 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \
404 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
405 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
406 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
407 #define SKL_SPL_HIT BIT_ULL(30)
408 #define SKL_SNOOP_NONE BIT_ULL(31)
409 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32)
410 #define SKL_SNOOP_MISS BIT_ULL(33)
411 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34)
412 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35)
413 #define SKL_SNOOP_HITM BIT_ULL(36)
414 #define SKL_SNOOP_NON_DRAM BIT_ULL(37)
415 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \
416 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
417 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
418 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
419 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD
420 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \
421 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
422 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
423 SKL_SNOOP_HITM|SKL_SPL_HIT)
424 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO
425 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE
426 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
427 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
428 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
430 static __initconst const u64 skl_hw_cache_event_ids
431 [PERF_COUNT_HW_CACHE_MAX]
432 [PERF_COUNT_HW_CACHE_OP_MAX]
433 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
437 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
438 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
441 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
442 [ C(RESULT_MISS) ] = 0x0,
444 [ C(OP_PREFETCH) ] = {
445 [ C(RESULT_ACCESS) ] = 0x0,
446 [ C(RESULT_MISS) ] = 0x0,
451 [ C(RESULT_ACCESS) ] = 0x0,
452 [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */
455 [ C(RESULT_ACCESS) ] = -1,
456 [ C(RESULT_MISS) ] = -1,
458 [ C(OP_PREFETCH) ] = {
459 [ C(RESULT_ACCESS) ] = 0x0,
460 [ C(RESULT_MISS) ] = 0x0,
465 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
466 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
469 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
470 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
472 [ C(OP_PREFETCH) ] = {
473 [ C(RESULT_ACCESS) ] = 0x0,
474 [ C(RESULT_MISS) ] = 0x0,
479 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
480 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
483 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
484 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
486 [ C(OP_PREFETCH) ] = {
487 [ C(RESULT_ACCESS) ] = 0x0,
488 [ C(RESULT_MISS) ] = 0x0,
493 [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */
494 [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */
497 [ C(RESULT_ACCESS) ] = -1,
498 [ C(RESULT_MISS) ] = -1,
500 [ C(OP_PREFETCH) ] = {
501 [ C(RESULT_ACCESS) ] = -1,
502 [ C(RESULT_MISS) ] = -1,
507 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
508 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
511 [ C(RESULT_ACCESS) ] = -1,
512 [ C(RESULT_MISS) ] = -1,
514 [ C(OP_PREFETCH) ] = {
515 [ C(RESULT_ACCESS) ] = -1,
516 [ C(RESULT_MISS) ] = -1,
521 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
522 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
525 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
526 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
528 [ C(OP_PREFETCH) ] = {
529 [ C(RESULT_ACCESS) ] = 0x0,
530 [ C(RESULT_MISS) ] = 0x0,
535 static __initconst const u64 skl_hw_cache_extra_regs
536 [PERF_COUNT_HW_CACHE_MAX]
537 [PERF_COUNT_HW_CACHE_OP_MAX]
538 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
542 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
543 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
544 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
545 SKL_L3_MISS|SKL_ANY_SNOOP|
549 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
550 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
551 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
552 SKL_L3_MISS|SKL_ANY_SNOOP|
555 [ C(OP_PREFETCH) ] = {
556 [ C(RESULT_ACCESS) ] = 0x0,
557 [ C(RESULT_MISS) ] = 0x0,
562 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
563 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
564 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
565 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
568 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
569 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
570 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
571 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
573 [ C(OP_PREFETCH) ] = {
574 [ C(RESULT_ACCESS) ] = 0x0,
575 [ C(RESULT_MISS) ] = 0x0,
580 #define SNB_DMND_DATA_RD (1ULL << 0)
581 #define SNB_DMND_RFO (1ULL << 1)
582 #define SNB_DMND_IFETCH (1ULL << 2)
583 #define SNB_DMND_WB (1ULL << 3)
584 #define SNB_PF_DATA_RD (1ULL << 4)
585 #define SNB_PF_RFO (1ULL << 5)
586 #define SNB_PF_IFETCH (1ULL << 6)
587 #define SNB_LLC_DATA_RD (1ULL << 7)
588 #define SNB_LLC_RFO (1ULL << 8)
589 #define SNB_LLC_IFETCH (1ULL << 9)
590 #define SNB_BUS_LOCKS (1ULL << 10)
591 #define SNB_STRM_ST (1ULL << 11)
592 #define SNB_OTHER (1ULL << 15)
593 #define SNB_RESP_ANY (1ULL << 16)
594 #define SNB_NO_SUPP (1ULL << 17)
595 #define SNB_LLC_HITM (1ULL << 18)
596 #define SNB_LLC_HITE (1ULL << 19)
597 #define SNB_LLC_HITS (1ULL << 20)
598 #define SNB_LLC_HITF (1ULL << 21)
599 #define SNB_LOCAL (1ULL << 22)
600 #define SNB_REMOTE (0xffULL << 23)
601 #define SNB_SNP_NONE (1ULL << 31)
602 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
603 #define SNB_SNP_MISS (1ULL << 33)
604 #define SNB_NO_FWD (1ULL << 34)
605 #define SNB_SNP_FWD (1ULL << 35)
606 #define SNB_HITM (1ULL << 36)
607 #define SNB_NON_DRAM (1ULL << 37)
609 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
610 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
611 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
613 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
614 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
617 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
618 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
620 #define SNB_L3_ACCESS SNB_RESP_ANY
621 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
623 static __initconst const u64 snb_hw_cache_extra_regs
624 [PERF_COUNT_HW_CACHE_MAX]
625 [PERF_COUNT_HW_CACHE_OP_MAX]
626 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
630 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
631 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
634 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
635 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
637 [ C(OP_PREFETCH) ] = {
638 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
639 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
644 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
645 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
648 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
649 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
651 [ C(OP_PREFETCH) ] = {
652 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
653 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
658 static __initconst const u64 snb_hw_cache_event_ids
659 [PERF_COUNT_HW_CACHE_MAX]
660 [PERF_COUNT_HW_CACHE_OP_MAX]
661 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
665 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
666 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
669 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
670 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
672 [ C(OP_PREFETCH) ] = {
673 [ C(RESULT_ACCESS) ] = 0x0,
674 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
679 [ C(RESULT_ACCESS) ] = 0x0,
680 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
683 [ C(RESULT_ACCESS) ] = -1,
684 [ C(RESULT_MISS) ] = -1,
686 [ C(OP_PREFETCH) ] = {
687 [ C(RESULT_ACCESS) ] = 0x0,
688 [ C(RESULT_MISS) ] = 0x0,
693 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
694 [ C(RESULT_ACCESS) ] = 0x01b7,
695 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
696 [ C(RESULT_MISS) ] = 0x01b7,
699 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
700 [ C(RESULT_ACCESS) ] = 0x01b7,
701 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
702 [ C(RESULT_MISS) ] = 0x01b7,
704 [ C(OP_PREFETCH) ] = {
705 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
706 [ C(RESULT_ACCESS) ] = 0x01b7,
707 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
708 [ C(RESULT_MISS) ] = 0x01b7,
713 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
714 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
717 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
718 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
720 [ C(OP_PREFETCH) ] = {
721 [ C(RESULT_ACCESS) ] = 0x0,
722 [ C(RESULT_MISS) ] = 0x0,
727 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
728 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
731 [ C(RESULT_ACCESS) ] = -1,
732 [ C(RESULT_MISS) ] = -1,
734 [ C(OP_PREFETCH) ] = {
735 [ C(RESULT_ACCESS) ] = -1,
736 [ C(RESULT_MISS) ] = -1,
741 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
742 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
745 [ C(RESULT_ACCESS) ] = -1,
746 [ C(RESULT_MISS) ] = -1,
748 [ C(OP_PREFETCH) ] = {
749 [ C(RESULT_ACCESS) ] = -1,
750 [ C(RESULT_MISS) ] = -1,
755 [ C(RESULT_ACCESS) ] = 0x01b7,
756 [ C(RESULT_MISS) ] = 0x01b7,
759 [ C(RESULT_ACCESS) ] = 0x01b7,
760 [ C(RESULT_MISS) ] = 0x01b7,
762 [ C(OP_PREFETCH) ] = {
763 [ C(RESULT_ACCESS) ] = 0x01b7,
764 [ C(RESULT_MISS) ] = 0x01b7,
771 * Notes on the events:
772 * - data reads do not include code reads (comparable to earlier tables)
773 * - data counts include speculative execution (except L1 write, dtlb, bpu)
774 * - remote node access includes remote memory, remote cache, remote mmio.
775 * - prefetches are not included in the counts because they are not
779 #define HSW_DEMAND_DATA_RD BIT_ULL(0)
780 #define HSW_DEMAND_RFO BIT_ULL(1)
781 #define HSW_ANY_RESPONSE BIT_ULL(16)
782 #define HSW_SUPPLIER_NONE BIT_ULL(17)
783 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
784 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
785 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
786 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
787 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
788 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
789 HSW_L3_MISS_REMOTE_HOP2P)
790 #define HSW_SNOOP_NONE BIT_ULL(31)
791 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
792 #define HSW_SNOOP_MISS BIT_ULL(33)
793 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
794 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
795 #define HSW_SNOOP_HITM BIT_ULL(36)
796 #define HSW_SNOOP_NON_DRAM BIT_ULL(37)
797 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
798 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
799 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
800 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
801 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
802 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
803 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO
804 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
805 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
806 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE
808 #define BDW_L3_MISS_LOCAL BIT(26)
809 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
810 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
811 HSW_L3_MISS_REMOTE_HOP2P)
814 static __initconst const u64 hsw_hw_cache_event_ids
815 [PERF_COUNT_HW_CACHE_MAX]
816 [PERF_COUNT_HW_CACHE_OP_MAX]
817 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
821 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
822 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
825 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
826 [ C(RESULT_MISS) ] = 0x0,
828 [ C(OP_PREFETCH) ] = {
829 [ C(RESULT_ACCESS) ] = 0x0,
830 [ C(RESULT_MISS) ] = 0x0,
835 [ C(RESULT_ACCESS) ] = 0x0,
836 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
839 [ C(RESULT_ACCESS) ] = -1,
840 [ C(RESULT_MISS) ] = -1,
842 [ C(OP_PREFETCH) ] = {
843 [ C(RESULT_ACCESS) ] = 0x0,
844 [ C(RESULT_MISS) ] = 0x0,
849 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
850 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
853 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
854 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
856 [ C(OP_PREFETCH) ] = {
857 [ C(RESULT_ACCESS) ] = 0x0,
858 [ C(RESULT_MISS) ] = 0x0,
863 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
864 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
867 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
868 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
870 [ C(OP_PREFETCH) ] = {
871 [ C(RESULT_ACCESS) ] = 0x0,
872 [ C(RESULT_MISS) ] = 0x0,
877 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
878 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
881 [ C(RESULT_ACCESS) ] = -1,
882 [ C(RESULT_MISS) ] = -1,
884 [ C(OP_PREFETCH) ] = {
885 [ C(RESULT_ACCESS) ] = -1,
886 [ C(RESULT_MISS) ] = -1,
891 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
892 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
895 [ C(RESULT_ACCESS) ] = -1,
896 [ C(RESULT_MISS) ] = -1,
898 [ C(OP_PREFETCH) ] = {
899 [ C(RESULT_ACCESS) ] = -1,
900 [ C(RESULT_MISS) ] = -1,
905 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
906 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
909 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
910 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
912 [ C(OP_PREFETCH) ] = {
913 [ C(RESULT_ACCESS) ] = 0x0,
914 [ C(RESULT_MISS) ] = 0x0,
919 static __initconst const u64 hsw_hw_cache_extra_regs
920 [PERF_COUNT_HW_CACHE_MAX]
921 [PERF_COUNT_HW_CACHE_OP_MAX]
922 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
926 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
928 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
929 HSW_L3_MISS|HSW_ANY_SNOOP,
932 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
934 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
935 HSW_L3_MISS|HSW_ANY_SNOOP,
937 [ C(OP_PREFETCH) ] = {
938 [ C(RESULT_ACCESS) ] = 0x0,
939 [ C(RESULT_MISS) ] = 0x0,
944 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
945 HSW_L3_MISS_LOCAL_DRAM|
947 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
952 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
953 HSW_L3_MISS_LOCAL_DRAM|
955 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
959 [ C(OP_PREFETCH) ] = {
960 [ C(RESULT_ACCESS) ] = 0x0,
961 [ C(RESULT_MISS) ] = 0x0,
966 static __initconst const u64 westmere_hw_cache_event_ids
967 [PERF_COUNT_HW_CACHE_MAX]
968 [PERF_COUNT_HW_CACHE_OP_MAX]
969 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
973 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
974 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
977 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
978 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
980 [ C(OP_PREFETCH) ] = {
981 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
982 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
987 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
988 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
991 [ C(RESULT_ACCESS) ] = -1,
992 [ C(RESULT_MISS) ] = -1,
994 [ C(OP_PREFETCH) ] = {
995 [ C(RESULT_ACCESS) ] = 0x0,
996 [ C(RESULT_MISS) ] = 0x0,
1001 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1002 [ C(RESULT_ACCESS) ] = 0x01b7,
1003 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1004 [ C(RESULT_MISS) ] = 0x01b7,
1007 * Use RFO, not WRITEBACK, because a write miss would typically occur
1011 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1012 [ C(RESULT_ACCESS) ] = 0x01b7,
1013 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1014 [ C(RESULT_MISS) ] = 0x01b7,
1016 [ C(OP_PREFETCH) ] = {
1017 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1018 [ C(RESULT_ACCESS) ] = 0x01b7,
1019 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1020 [ C(RESULT_MISS) ] = 0x01b7,
1025 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1026 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1029 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1030 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1032 [ C(OP_PREFETCH) ] = {
1033 [ C(RESULT_ACCESS) ] = 0x0,
1034 [ C(RESULT_MISS) ] = 0x0,
1039 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1040 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
1043 [ C(RESULT_ACCESS) ] = -1,
1044 [ C(RESULT_MISS) ] = -1,
1046 [ C(OP_PREFETCH) ] = {
1047 [ C(RESULT_ACCESS) ] = -1,
1048 [ C(RESULT_MISS) ] = -1,
1053 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1054 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1057 [ C(RESULT_ACCESS) ] = -1,
1058 [ C(RESULT_MISS) ] = -1,
1060 [ C(OP_PREFETCH) ] = {
1061 [ C(RESULT_ACCESS) ] = -1,
1062 [ C(RESULT_MISS) ] = -1,
1067 [ C(RESULT_ACCESS) ] = 0x01b7,
1068 [ C(RESULT_MISS) ] = 0x01b7,
1071 [ C(RESULT_ACCESS) ] = 0x01b7,
1072 [ C(RESULT_MISS) ] = 0x01b7,
1074 [ C(OP_PREFETCH) ] = {
1075 [ C(RESULT_ACCESS) ] = 0x01b7,
1076 [ C(RESULT_MISS) ] = 0x01b7,
1082 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1083 * See IA32 SDM Vol 3B 30.6.1.3
1086 #define NHM_DMND_DATA_RD (1 << 0)
1087 #define NHM_DMND_RFO (1 << 1)
1088 #define NHM_DMND_IFETCH (1 << 2)
1089 #define NHM_DMND_WB (1 << 3)
1090 #define NHM_PF_DATA_RD (1 << 4)
1091 #define NHM_PF_DATA_RFO (1 << 5)
1092 #define NHM_PF_IFETCH (1 << 6)
1093 #define NHM_OFFCORE_OTHER (1 << 7)
1094 #define NHM_UNCORE_HIT (1 << 8)
1095 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
1096 #define NHM_OTHER_CORE_HITM (1 << 10)
1098 #define NHM_REMOTE_CACHE_FWD (1 << 12)
1099 #define NHM_REMOTE_DRAM (1 << 13)
1100 #define NHM_LOCAL_DRAM (1 << 14)
1101 #define NHM_NON_DRAM (1 << 15)
1103 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1104 #define NHM_REMOTE (NHM_REMOTE_DRAM)
1106 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
1107 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
1108 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1110 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1111 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1112 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
1114 static __initconst const u64 nehalem_hw_cache_extra_regs
1115 [PERF_COUNT_HW_CACHE_MAX]
1116 [PERF_COUNT_HW_CACHE_OP_MAX]
1117 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1121 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1122 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
1125 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1126 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
1128 [ C(OP_PREFETCH) ] = {
1129 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1130 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1135 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1136 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
1139 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1140 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
1142 [ C(OP_PREFETCH) ] = {
1143 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1144 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1149 static __initconst const u64 nehalem_hw_cache_event_ids
1150 [PERF_COUNT_HW_CACHE_MAX]
1151 [PERF_COUNT_HW_CACHE_OP_MAX]
1152 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1156 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1157 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1160 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1161 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1163 [ C(OP_PREFETCH) ] = {
1164 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1165 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1170 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1171 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1174 [ C(RESULT_ACCESS) ] = -1,
1175 [ C(RESULT_MISS) ] = -1,
1177 [ C(OP_PREFETCH) ] = {
1178 [ C(RESULT_ACCESS) ] = 0x0,
1179 [ C(RESULT_MISS) ] = 0x0,
1184 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1185 [ C(RESULT_ACCESS) ] = 0x01b7,
1186 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1187 [ C(RESULT_MISS) ] = 0x01b7,
1190 * Use RFO, not WRITEBACK, because a write miss would typically occur
1194 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1195 [ C(RESULT_ACCESS) ] = 0x01b7,
1196 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1197 [ C(RESULT_MISS) ] = 0x01b7,
1199 [ C(OP_PREFETCH) ] = {
1200 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1201 [ C(RESULT_ACCESS) ] = 0x01b7,
1202 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1203 [ C(RESULT_MISS) ] = 0x01b7,
1208 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1209 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1212 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1213 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1215 [ C(OP_PREFETCH) ] = {
1216 [ C(RESULT_ACCESS) ] = 0x0,
1217 [ C(RESULT_MISS) ] = 0x0,
1222 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1223 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
1226 [ C(RESULT_ACCESS) ] = -1,
1227 [ C(RESULT_MISS) ] = -1,
1229 [ C(OP_PREFETCH) ] = {
1230 [ C(RESULT_ACCESS) ] = -1,
1231 [ C(RESULT_MISS) ] = -1,
1236 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1237 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1240 [ C(RESULT_ACCESS) ] = -1,
1241 [ C(RESULT_MISS) ] = -1,
1243 [ C(OP_PREFETCH) ] = {
1244 [ C(RESULT_ACCESS) ] = -1,
1245 [ C(RESULT_MISS) ] = -1,
1250 [ C(RESULT_ACCESS) ] = 0x01b7,
1251 [ C(RESULT_MISS) ] = 0x01b7,
1254 [ C(RESULT_ACCESS) ] = 0x01b7,
1255 [ C(RESULT_MISS) ] = 0x01b7,
1257 [ C(OP_PREFETCH) ] = {
1258 [ C(RESULT_ACCESS) ] = 0x01b7,
1259 [ C(RESULT_MISS) ] = 0x01b7,
1264 static __initconst const u64 core2_hw_cache_event_ids
1265 [PERF_COUNT_HW_CACHE_MAX]
1266 [PERF_COUNT_HW_CACHE_OP_MAX]
1267 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1271 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
1272 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
1275 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
1276 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
1278 [ C(OP_PREFETCH) ] = {
1279 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
1280 [ C(RESULT_MISS) ] = 0,
1285 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
1286 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
1289 [ C(RESULT_ACCESS) ] = -1,
1290 [ C(RESULT_MISS) ] = -1,
1292 [ C(OP_PREFETCH) ] = {
1293 [ C(RESULT_ACCESS) ] = 0,
1294 [ C(RESULT_MISS) ] = 0,
1299 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1300 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1303 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1304 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1306 [ C(OP_PREFETCH) ] = {
1307 [ C(RESULT_ACCESS) ] = 0,
1308 [ C(RESULT_MISS) ] = 0,
1313 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1314 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
1317 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1318 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
1320 [ C(OP_PREFETCH) ] = {
1321 [ C(RESULT_ACCESS) ] = 0,
1322 [ C(RESULT_MISS) ] = 0,
1327 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1328 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
1331 [ C(RESULT_ACCESS) ] = -1,
1332 [ C(RESULT_MISS) ] = -1,
1334 [ C(OP_PREFETCH) ] = {
1335 [ C(RESULT_ACCESS) ] = -1,
1336 [ C(RESULT_MISS) ] = -1,
1341 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1342 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1345 [ C(RESULT_ACCESS) ] = -1,
1346 [ C(RESULT_MISS) ] = -1,
1348 [ C(OP_PREFETCH) ] = {
1349 [ C(RESULT_ACCESS) ] = -1,
1350 [ C(RESULT_MISS) ] = -1,
1355 static __initconst const u64 atom_hw_cache_event_ids
1356 [PERF_COUNT_HW_CACHE_MAX]
1357 [PERF_COUNT_HW_CACHE_OP_MAX]
1358 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1362 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
1363 [ C(RESULT_MISS) ] = 0,
1366 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
1367 [ C(RESULT_MISS) ] = 0,
1369 [ C(OP_PREFETCH) ] = {
1370 [ C(RESULT_ACCESS) ] = 0x0,
1371 [ C(RESULT_MISS) ] = 0,
1376 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1377 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1380 [ C(RESULT_ACCESS) ] = -1,
1381 [ C(RESULT_MISS) ] = -1,
1383 [ C(OP_PREFETCH) ] = {
1384 [ C(RESULT_ACCESS) ] = 0,
1385 [ C(RESULT_MISS) ] = 0,
1390 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1391 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1394 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1395 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1397 [ C(OP_PREFETCH) ] = {
1398 [ C(RESULT_ACCESS) ] = 0,
1399 [ C(RESULT_MISS) ] = 0,
1404 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1405 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1408 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1409 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1411 [ C(OP_PREFETCH) ] = {
1412 [ C(RESULT_ACCESS) ] = 0,
1413 [ C(RESULT_MISS) ] = 0,
1418 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1419 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1422 [ C(RESULT_ACCESS) ] = -1,
1423 [ C(RESULT_MISS) ] = -1,
1425 [ C(OP_PREFETCH) ] = {
1426 [ C(RESULT_ACCESS) ] = -1,
1427 [ C(RESULT_MISS) ] = -1,
1432 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1433 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1436 [ C(RESULT_ACCESS) ] = -1,
1437 [ C(RESULT_MISS) ] = -1,
1439 [ C(OP_PREFETCH) ] = {
1440 [ C(RESULT_ACCESS) ] = -1,
1441 [ C(RESULT_MISS) ] = -1,
1446 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1447 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1448 /* no_alloc_cycles.not_delivered */
1449 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1450 "event=0xca,umask=0x50");
1451 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1452 /* uops_retired.all */
1453 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1454 "event=0xc2,umask=0x10");
1455 /* uops_retired.all */
1456 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1457 "event=0xc2,umask=0x10");
1459 static struct attribute *slm_events_attrs[] = {
1460 EVENT_PTR(td_total_slots_slm),
1461 EVENT_PTR(td_total_slots_scale_slm),
1462 EVENT_PTR(td_fetch_bubbles_slm),
1463 EVENT_PTR(td_fetch_bubbles_scale_slm),
1464 EVENT_PTR(td_slots_issued_slm),
1465 EVENT_PTR(td_slots_retired_slm),
1469 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1471 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1472 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1473 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1477 #define SLM_DMND_READ SNB_DMND_DATA_RD
1478 #define SLM_DMND_WRITE SNB_DMND_RFO
1479 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1481 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1482 #define SLM_LLC_ACCESS SNB_RESP_ANY
1483 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1485 static __initconst const u64 slm_hw_cache_extra_regs
1486 [PERF_COUNT_HW_CACHE_MAX]
1487 [PERF_COUNT_HW_CACHE_OP_MAX]
1488 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1492 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1493 [ C(RESULT_MISS) ] = 0,
1496 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1497 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1499 [ C(OP_PREFETCH) ] = {
1500 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1501 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1506 static __initconst const u64 slm_hw_cache_event_ids
1507 [PERF_COUNT_HW_CACHE_MAX]
1508 [PERF_COUNT_HW_CACHE_OP_MAX]
1509 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1513 [ C(RESULT_ACCESS) ] = 0,
1514 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
1517 [ C(RESULT_ACCESS) ] = 0,
1518 [ C(RESULT_MISS) ] = 0,
1520 [ C(OP_PREFETCH) ] = {
1521 [ C(RESULT_ACCESS) ] = 0,
1522 [ C(RESULT_MISS) ] = 0,
1527 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1528 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
1531 [ C(RESULT_ACCESS) ] = -1,
1532 [ C(RESULT_MISS) ] = -1,
1534 [ C(OP_PREFETCH) ] = {
1535 [ C(RESULT_ACCESS) ] = 0,
1536 [ C(RESULT_MISS) ] = 0,
1541 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1542 [ C(RESULT_ACCESS) ] = 0x01b7,
1543 [ C(RESULT_MISS) ] = 0,
1546 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1547 [ C(RESULT_ACCESS) ] = 0x01b7,
1548 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1549 [ C(RESULT_MISS) ] = 0x01b7,
1551 [ C(OP_PREFETCH) ] = {
1552 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1553 [ C(RESULT_ACCESS) ] = 0x01b7,
1554 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1555 [ C(RESULT_MISS) ] = 0x01b7,
1560 [ C(RESULT_ACCESS) ] = 0,
1561 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
1564 [ C(RESULT_ACCESS) ] = 0,
1565 [ C(RESULT_MISS) ] = 0,
1567 [ C(OP_PREFETCH) ] = {
1568 [ C(RESULT_ACCESS) ] = 0,
1569 [ C(RESULT_MISS) ] = 0,
1574 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1575 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1578 [ C(RESULT_ACCESS) ] = -1,
1579 [ C(RESULT_MISS) ] = -1,
1581 [ C(OP_PREFETCH) ] = {
1582 [ C(RESULT_ACCESS) ] = -1,
1583 [ C(RESULT_MISS) ] = -1,
1588 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1589 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1592 [ C(RESULT_ACCESS) ] = -1,
1593 [ C(RESULT_MISS) ] = -1,
1595 [ C(OP_PREFETCH) ] = {
1596 [ C(RESULT_ACCESS) ] = -1,
1597 [ C(RESULT_MISS) ] = -1,
1602 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1603 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1604 /* UOPS_NOT_DELIVERED.ANY */
1605 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1606 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1607 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1608 /* UOPS_RETIRED.ANY */
1609 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1610 /* UOPS_ISSUED.ANY */
1611 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1613 static struct attribute *glm_events_attrs[] = {
1614 EVENT_PTR(td_total_slots_glm),
1615 EVENT_PTR(td_total_slots_scale_glm),
1616 EVENT_PTR(td_fetch_bubbles_glm),
1617 EVENT_PTR(td_recovery_bubbles_glm),
1618 EVENT_PTR(td_slots_issued_glm),
1619 EVENT_PTR(td_slots_retired_glm),
1623 static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1624 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1625 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1626 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1630 #define GLM_DEMAND_DATA_RD BIT_ULL(0)
1631 #define GLM_DEMAND_RFO BIT_ULL(1)
1632 #define GLM_ANY_RESPONSE BIT_ULL(16)
1633 #define GLM_SNP_NONE_OR_MISS BIT_ULL(33)
1634 #define GLM_DEMAND_READ GLM_DEMAND_DATA_RD
1635 #define GLM_DEMAND_WRITE GLM_DEMAND_RFO
1636 #define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1637 #define GLM_LLC_ACCESS GLM_ANY_RESPONSE
1638 #define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1639 #define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM)
1641 static __initconst const u64 glm_hw_cache_event_ids
1642 [PERF_COUNT_HW_CACHE_MAX]
1643 [PERF_COUNT_HW_CACHE_OP_MAX]
1644 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1647 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1648 [C(RESULT_MISS)] = 0x0,
1651 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1652 [C(RESULT_MISS)] = 0x0,
1654 [C(OP_PREFETCH)] = {
1655 [C(RESULT_ACCESS)] = 0x0,
1656 [C(RESULT_MISS)] = 0x0,
1661 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1662 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1665 [C(RESULT_ACCESS)] = -1,
1666 [C(RESULT_MISS)] = -1,
1668 [C(OP_PREFETCH)] = {
1669 [C(RESULT_ACCESS)] = 0x0,
1670 [C(RESULT_MISS)] = 0x0,
1675 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1676 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1679 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1680 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1682 [C(OP_PREFETCH)] = {
1683 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1684 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1689 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1690 [C(RESULT_MISS)] = 0x0,
1693 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1694 [C(RESULT_MISS)] = 0x0,
1696 [C(OP_PREFETCH)] = {
1697 [C(RESULT_ACCESS)] = 0x0,
1698 [C(RESULT_MISS)] = 0x0,
1703 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1704 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1707 [C(RESULT_ACCESS)] = -1,
1708 [C(RESULT_MISS)] = -1,
1710 [C(OP_PREFETCH)] = {
1711 [C(RESULT_ACCESS)] = -1,
1712 [C(RESULT_MISS)] = -1,
1717 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1718 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1721 [C(RESULT_ACCESS)] = -1,
1722 [C(RESULT_MISS)] = -1,
1724 [C(OP_PREFETCH)] = {
1725 [C(RESULT_ACCESS)] = -1,
1726 [C(RESULT_MISS)] = -1,
1731 static __initconst const u64 glm_hw_cache_extra_regs
1732 [PERF_COUNT_HW_CACHE_MAX]
1733 [PERF_COUNT_HW_CACHE_OP_MAX]
1734 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1737 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1739 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1743 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1745 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1748 [C(OP_PREFETCH)] = {
1749 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH|
1751 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH|
1757 static __initconst const u64 glp_hw_cache_event_ids
1758 [PERF_COUNT_HW_CACHE_MAX]
1759 [PERF_COUNT_HW_CACHE_OP_MAX]
1760 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1763 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1764 [C(RESULT_MISS)] = 0x0,
1767 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1768 [C(RESULT_MISS)] = 0x0,
1770 [C(OP_PREFETCH)] = {
1771 [C(RESULT_ACCESS)] = 0x0,
1772 [C(RESULT_MISS)] = 0x0,
1777 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1778 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1781 [C(RESULT_ACCESS)] = -1,
1782 [C(RESULT_MISS)] = -1,
1784 [C(OP_PREFETCH)] = {
1785 [C(RESULT_ACCESS)] = 0x0,
1786 [C(RESULT_MISS)] = 0x0,
1791 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1792 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1795 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1796 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1798 [C(OP_PREFETCH)] = {
1799 [C(RESULT_ACCESS)] = 0x0,
1800 [C(RESULT_MISS)] = 0x0,
1805 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1806 [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
1809 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1810 [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
1812 [C(OP_PREFETCH)] = {
1813 [C(RESULT_ACCESS)] = 0x0,
1814 [C(RESULT_MISS)] = 0x0,
1819 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1820 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1823 [C(RESULT_ACCESS)] = -1,
1824 [C(RESULT_MISS)] = -1,
1826 [C(OP_PREFETCH)] = {
1827 [C(RESULT_ACCESS)] = -1,
1828 [C(RESULT_MISS)] = -1,
1833 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1834 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1837 [C(RESULT_ACCESS)] = -1,
1838 [C(RESULT_MISS)] = -1,
1840 [C(OP_PREFETCH)] = {
1841 [C(RESULT_ACCESS)] = -1,
1842 [C(RESULT_MISS)] = -1,
1847 static __initconst const u64 glp_hw_cache_extra_regs
1848 [PERF_COUNT_HW_CACHE_MAX]
1849 [PERF_COUNT_HW_CACHE_OP_MAX]
1850 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1853 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1855 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1859 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1861 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1864 [C(OP_PREFETCH)] = {
1865 [C(RESULT_ACCESS)] = 0x0,
1866 [C(RESULT_MISS)] = 0x0,
1871 #define TNT_LOCAL_DRAM BIT_ULL(26)
1872 #define TNT_DEMAND_READ GLM_DEMAND_DATA_RD
1873 #define TNT_DEMAND_WRITE GLM_DEMAND_RFO
1874 #define TNT_LLC_ACCESS GLM_ANY_RESPONSE
1875 #define TNT_SNP_ANY (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
1876 SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
1877 #define TNT_LLC_MISS (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
1879 static __initconst const u64 tnt_hw_cache_extra_regs
1880 [PERF_COUNT_HW_CACHE_MAX]
1881 [PERF_COUNT_HW_CACHE_OP_MAX]
1882 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1885 [C(RESULT_ACCESS)] = TNT_DEMAND_READ|
1887 [C(RESULT_MISS)] = TNT_DEMAND_READ|
1891 [C(RESULT_ACCESS)] = TNT_DEMAND_WRITE|
1893 [C(RESULT_MISS)] = TNT_DEMAND_WRITE|
1896 [C(OP_PREFETCH)] = {
1897 [C(RESULT_ACCESS)] = 0x0,
1898 [C(RESULT_MISS)] = 0x0,
1903 static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
1904 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1905 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0),
1906 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1),
1910 #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
1911 #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
1912 #define KNL_MCDRAM_LOCAL BIT_ULL(21)
1913 #define KNL_MCDRAM_FAR BIT_ULL(22)
1914 #define KNL_DDR_LOCAL BIT_ULL(23)
1915 #define KNL_DDR_FAR BIT_ULL(24)
1916 #define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
1917 KNL_DDR_LOCAL | KNL_DDR_FAR)
1918 #define KNL_L2_READ SLM_DMND_READ
1919 #define KNL_L2_WRITE SLM_DMND_WRITE
1920 #define KNL_L2_PREFETCH SLM_DMND_PREFETCH
1921 #define KNL_L2_ACCESS SLM_LLC_ACCESS
1922 #define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
1923 KNL_DRAM_ANY | SNB_SNP_ANY | \
1926 static __initconst const u64 knl_hw_cache_extra_regs
1927 [PERF_COUNT_HW_CACHE_MAX]
1928 [PERF_COUNT_HW_CACHE_OP_MAX]
1929 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1932 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
1933 [C(RESULT_MISS)] = 0,
1936 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
1937 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS,
1939 [C(OP_PREFETCH)] = {
1940 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
1941 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS,
1947 * Used from PMIs where the LBRs are already disabled.
1949 * This function could be called consecutively. It is required to remain in
1950 * disabled state if called consecutively.
1952 * During consecutive calls, the same disable value will be written to related
1953 * registers, so the PMU state remains unchanged.
1955 * intel_bts events don't coexist with intel PMU's BTS events because of
1956 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
1957 * disabled around intel PMU's event batching etc, only inside the PMI handler.
1959 * Avoid PEBS_ENABLE MSR access in PMIs.
1960 * The GLOBAL_CTRL has been disabled. All the counters do not count anymore.
1961 * It doesn't matter if the PEBS is enabled or not.
1962 * Usually, the PEBS status are not changed in PMIs. It's unnecessary to
1963 * access PEBS_ENABLE MSR in disable_all()/enable_all().
1964 * However, there are some cases which may change PEBS status, e.g. PMI
1965 * throttle. The PEBS_ENABLE should be updated where the status changes.
1967 static void __intel_pmu_disable_all(void)
1969 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1971 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1973 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1974 intel_pmu_disable_bts();
1977 static void intel_pmu_disable_all(void)
1979 __intel_pmu_disable_all();
1980 intel_pmu_pebs_disable_all();
1981 intel_pmu_lbr_disable_all();
1984 static void __intel_pmu_enable_all(int added, bool pmi)
1986 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1988 intel_pmu_lbr_enable_all(pmi);
1989 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
1990 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
1992 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1993 struct perf_event *event =
1994 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
1996 if (WARN_ON_ONCE(!event))
1999 intel_pmu_enable_bts(event->hw.config);
2003 static void intel_pmu_enable_all(int added)
2005 intel_pmu_pebs_enable_all();
2006 __intel_pmu_enable_all(added, false);
2011 * Intel Errata AAK100 (model 26)
2012 * Intel Errata AAP53 (model 30)
2013 * Intel Errata BD53 (model 44)
2015 * The official story:
2016 * These chips need to be 'reset' when adding counters by programming the
2017 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
2018 * in sequence on the same PMC or on different PMCs.
2020 * In practise it appears some of these events do in fact count, and
2021 * we need to program all 4 events.
2023 static void intel_pmu_nhm_workaround(void)
2025 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2026 static const unsigned long nhm_magic[4] = {
2032 struct perf_event *event;
2036 * The Errata requires below steps:
2037 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
2038 * 2) Configure 4 PERFEVTSELx with the magic events and clear
2039 * the corresponding PMCx;
2040 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
2041 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
2042 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
2046 * The real steps we choose are a little different from above.
2047 * A) To reduce MSR operations, we don't run step 1) as they
2048 * are already cleared before this function is called;
2049 * B) Call x86_perf_event_update to save PMCx before configuring
2050 * PERFEVTSELx with magic number;
2051 * C) With step 5), we do clear only when the PERFEVTSELx is
2052 * not used currently.
2053 * D) Call x86_perf_event_set_period to restore PMCx;
2056 /* We always operate 4 pairs of PERF Counters */
2057 for (i = 0; i < 4; i++) {
2058 event = cpuc->events[i];
2060 x86_perf_event_update(event);
2063 for (i = 0; i < 4; i++) {
2064 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
2065 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
2068 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
2069 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
2071 for (i = 0; i < 4; i++) {
2072 event = cpuc->events[i];
2075 x86_perf_event_set_period(event);
2076 __x86_pmu_enable_event(&event->hw,
2077 ARCH_PERFMON_EVENTSEL_ENABLE);
2079 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
2083 static void intel_pmu_nhm_enable_all(int added)
2086 intel_pmu_nhm_workaround();
2087 intel_pmu_enable_all(added);
2090 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2092 u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2094 if (cpuc->tfa_shadow != val) {
2095 cpuc->tfa_shadow = val;
2096 wrmsrl(MSR_TSX_FORCE_ABORT, val);
2100 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2103 * We're going to use PMC3, make sure TFA is set before we touch it.
2106 intel_set_tfa(cpuc, true);
2109 static void intel_tfa_pmu_enable_all(int added)
2111 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2114 * If we find PMC3 is no longer used when we enable the PMU, we can
2117 if (!test_bit(3, cpuc->active_mask))
2118 intel_set_tfa(cpuc, false);
2120 intel_pmu_enable_all(added);
2123 static void enable_counter_freeze(void)
2125 update_debugctlmsr(get_debugctlmsr() |
2126 DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
2129 static void disable_counter_freeze(void)
2131 update_debugctlmsr(get_debugctlmsr() &
2132 ~DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
2135 static inline u64 intel_pmu_get_status(void)
2139 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
2144 static inline void intel_pmu_ack_status(u64 ack)
2146 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2149 static inline bool event_is_checkpointed(struct perf_event *event)
2151 return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2154 static inline void intel_set_masks(struct perf_event *event, int idx)
2156 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2158 if (event->attr.exclude_host)
2159 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2160 if (event->attr.exclude_guest)
2161 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2162 if (event_is_checkpointed(event))
2163 __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2166 static inline void intel_clear_masks(struct perf_event *event, int idx)
2168 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2170 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2171 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2172 __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2175 static void intel_pmu_disable_fixed(struct perf_event *event)
2177 struct hw_perf_event *hwc = &event->hw;
2181 if (is_topdown_idx(idx)) {
2182 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2185 * When there are other active TopDown events,
2186 * don't disable the fixed counter 3.
2188 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2190 idx = INTEL_PMC_IDX_FIXED_SLOTS;
2193 intel_clear_masks(event, idx);
2195 mask = 0xfULL << ((idx - INTEL_PMC_IDX_FIXED) * 4);
2196 rdmsrl(hwc->config_base, ctrl_val);
2198 wrmsrl(hwc->config_base, ctrl_val);
2201 static void intel_pmu_disable_event(struct perf_event *event)
2203 struct hw_perf_event *hwc = &event->hw;
2207 case 0 ... INTEL_PMC_IDX_FIXED - 1:
2208 intel_clear_masks(event, idx);
2209 x86_pmu_disable_event(event);
2211 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2212 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2213 intel_pmu_disable_fixed(event);
2215 case INTEL_PMC_IDX_FIXED_BTS:
2216 intel_pmu_disable_bts();
2217 intel_pmu_drain_bts_buffer();
2219 case INTEL_PMC_IDX_FIXED_VLBR:
2220 intel_clear_masks(event, idx);
2223 intel_clear_masks(event, idx);
2224 pr_warn("Failed to disable the event with invalid index %d\n",
2230 * Needs to be called after x86_pmu_disable_event,
2231 * so we don't trigger the event without PEBS bit set.
2233 if (unlikely(event->attr.precise_ip))
2234 intel_pmu_pebs_disable(event);
2237 static void intel_pmu_del_event(struct perf_event *event)
2239 if (needs_branch_stack(event))
2240 intel_pmu_lbr_del(event);
2241 if (event->attr.precise_ip)
2242 intel_pmu_pebs_del(event);
2245 static int icl_set_topdown_event_period(struct perf_event *event)
2247 struct hw_perf_event *hwc = &event->hw;
2248 s64 left = local64_read(&hwc->period_left);
2251 * The values in PERF_METRICS MSR are derived from fixed counter 3.
2252 * Software should start both registers, PERF_METRICS and fixed
2253 * counter 3, from zero.
2254 * Clear PERF_METRICS and Fixed counter 3 in initialization.
2255 * After that, both MSRs will be cleared for each read.
2256 * Don't need to clear them again.
2258 if (left == x86_pmu.max_period) {
2259 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2260 wrmsrl(MSR_PERF_METRICS, 0);
2261 local64_set(&hwc->period_left, 0);
2264 perf_event_update_userpage(event);
2269 static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx)
2274 * The metric is reported as an 8bit integer fraction
2275 * suming up to 0xff.
2276 * slots-in-metric = (Metric / 0xff) * slots
2278 val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff;
2279 return mul_u64_u32_div(slots, val, 0xff);
2282 static void __icl_update_topdown_event(struct perf_event *event,
2283 u64 slots, u64 metrics)
2285 int idx = event->hw.idx;
2288 if (is_metric_idx(idx))
2289 delta = icl_get_metrics_event_value(metrics, slots, idx);
2293 local64_add(delta, &event->count);
2297 * Update all active Topdown events.
2299 * The PERF_METRICS and Fixed counter 3 are read separately. The values may be
2300 * modify by a NMI. PMU has to be disabled before calling this function.
2302 static u64 icl_update_topdown_event(struct perf_event *event)
2304 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2305 struct perf_event *other;
2309 /* read Fixed counter 3 */
2310 rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots);
2314 /* read PERF_METRICS */
2315 rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
2317 for_each_set_bit(idx, cpuc->active_mask, INTEL_PMC_IDX_TD_BE_BOUND + 1) {
2318 if (!is_topdown_idx(idx))
2320 other = cpuc->events[idx];
2321 __icl_update_topdown_event(other, slots, metrics);
2325 * Check and update this event, which may have been cleared
2326 * in active_mask e.g. x86_pmu_stop()
2328 if (event && !test_bit(event->hw.idx, cpuc->active_mask))
2329 __icl_update_topdown_event(event, slots, metrics);
2331 /* The fixed counter 3 has to be written before the PERF_METRICS. */
2332 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2333 wrmsrl(MSR_PERF_METRICS, 0);
2338 static void intel_pmu_read_topdown_event(struct perf_event *event)
2340 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2342 /* Only need to call update_topdown_event() once for group read. */
2343 if ((cpuc->txn_flags & PERF_PMU_TXN_READ) &&
2344 !is_slots_event(event))
2347 perf_pmu_disable(event->pmu);
2348 x86_pmu.update_topdown_event(event);
2349 perf_pmu_enable(event->pmu);
2352 static void intel_pmu_read_event(struct perf_event *event)
2354 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2355 intel_pmu_auto_reload_read(event);
2356 else if (is_topdown_count(event) && x86_pmu.update_topdown_event)
2357 intel_pmu_read_topdown_event(event);
2359 x86_perf_event_update(event);
2362 static void intel_pmu_enable_fixed(struct perf_event *event)
2364 struct hw_perf_event *hwc = &event->hw;
2365 u64 ctrl_val, mask, bits = 0;
2368 if (is_topdown_idx(idx)) {
2369 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2371 * When there are other active TopDown events,
2372 * don't enable the fixed counter 3 again.
2374 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2377 idx = INTEL_PMC_IDX_FIXED_SLOTS;
2380 intel_set_masks(event, idx);
2383 * Enable IRQ generation (0x8), if not PEBS,
2384 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2387 if (!event->attr.precise_ip)
2389 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2391 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2395 * ANY bit is supported in v3 and up
2397 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2400 idx -= INTEL_PMC_IDX_FIXED;
2402 mask = 0xfULL << (idx * 4);
2404 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
2405 bits |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
2406 mask |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
2409 rdmsrl(hwc->config_base, ctrl_val);
2412 wrmsrl(hwc->config_base, ctrl_val);
2415 static void intel_pmu_enable_event(struct perf_event *event)
2417 struct hw_perf_event *hwc = &event->hw;
2420 if (unlikely(event->attr.precise_ip))
2421 intel_pmu_pebs_enable(event);
2424 case 0 ... INTEL_PMC_IDX_FIXED - 1:
2425 intel_set_masks(event, idx);
2426 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2428 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2429 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2430 intel_pmu_enable_fixed(event);
2432 case INTEL_PMC_IDX_FIXED_BTS:
2433 if (!__this_cpu_read(cpu_hw_events.enabled))
2435 intel_pmu_enable_bts(hwc->config);
2437 case INTEL_PMC_IDX_FIXED_VLBR:
2438 intel_set_masks(event, idx);
2441 pr_warn("Failed to enable the event with invalid index %d\n",
2446 static void intel_pmu_add_event(struct perf_event *event)
2448 if (event->attr.precise_ip)
2449 intel_pmu_pebs_add(event);
2450 if (needs_branch_stack(event))
2451 intel_pmu_lbr_add(event);
2455 * Save and restart an expired event. Called by NMI contexts,
2456 * so it has to be careful about preempting normal event ops:
2458 int intel_pmu_save_and_restart(struct perf_event *event)
2460 x86_perf_event_update(event);
2462 * For a checkpointed counter always reset back to 0. This
2463 * avoids a situation where the counter overflows, aborts the
2464 * transaction and is then set back to shortly before the
2465 * overflow, and overflows and aborts again.
2467 if (unlikely(event_is_checkpointed(event))) {
2468 /* No race with NMIs because the counter should not be armed */
2469 wrmsrl(event->hw.event_base, 0);
2470 local64_set(&event->hw.prev_count, 0);
2472 return x86_perf_event_set_period(event);
2475 static void intel_pmu_reset(void)
2477 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2478 unsigned long flags;
2481 if (!x86_pmu.num_counters)
2484 local_irq_save(flags);
2486 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2488 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
2489 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
2490 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
2492 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
2493 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2496 ds->bts_index = ds->bts_buffer_base;
2498 /* Ack all overflows and disable fixed counters */
2499 if (x86_pmu.version >= 2) {
2500 intel_pmu_ack_status(intel_pmu_get_status());
2501 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2504 /* Reset LBRs and LBR freezing */
2505 if (x86_pmu.lbr_nr) {
2506 update_debugctlmsr(get_debugctlmsr() &
2507 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
2510 local_irq_restore(flags);
2513 static int handle_pmi_common(struct pt_regs *regs, u64 status)
2515 struct perf_sample_data data;
2516 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2520 inc_irq_stat(apic_perf_irqs);
2523 * Ignore a range of extra bits in status that do not indicate
2524 * overflow by themselves.
2526 status &= ~(GLOBAL_STATUS_COND_CHG |
2527 GLOBAL_STATUS_ASIF |
2528 GLOBAL_STATUS_LBRS_FROZEN);
2532 * In case multiple PEBS events are sampled at the same time,
2533 * it is possible to have GLOBAL_STATUS bit 62 set indicating
2534 * PEBS buffer overflow and also seeing at most 3 PEBS counters
2535 * having their bits set in the status register. This is a sign
2536 * that there was at least one PEBS record pending at the time
2537 * of the PMU interrupt. PEBS counters must only be processed
2538 * via the drain_pebs() calls and not via the regular sample
2539 * processing loop coming after that the function, otherwise
2540 * phony regular samples may be generated in the sampling buffer
2541 * not marked with the EXACT tag. Another possibility is to have
2542 * one PEBS event and at least one non-PEBS event whic hoverflows
2543 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
2544 * not be set, yet the overflow status bit for the PEBS counter will
2547 * To avoid this problem, we systematically ignore the PEBS-enabled
2548 * counters from the GLOBAL_STATUS mask and we always process PEBS
2549 * events via drain_pebs().
2551 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
2552 status &= ~cpuc->pebs_enabled;
2554 status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
2557 * PEBS overflow sets bit 62 in the global status register
2559 if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) {
2560 u64 pebs_enabled = cpuc->pebs_enabled;
2563 x86_pmu.drain_pebs(regs);
2564 status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
2567 * PMI throttle may be triggered, which stops the PEBS event.
2568 * Although cpuc->pebs_enabled is updated accordingly, the
2569 * MSR_IA32_PEBS_ENABLE is not updated. Because the
2570 * cpuc->enabled has been forced to 0 in PMI.
2571 * Update the MSR if pebs_enabled is changed.
2573 if (pebs_enabled != cpuc->pebs_enabled)
2574 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
2580 if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
2582 if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() &&
2583 perf_guest_cbs->handle_intel_pt_intr))
2584 perf_guest_cbs->handle_intel_pt_intr();
2586 intel_pt_interrupt();
2590 * Intel Perf mertrics
2592 if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) {
2594 if (x86_pmu.update_topdown_event)
2595 x86_pmu.update_topdown_event(NULL);
2599 * Checkpointed counters can lead to 'spurious' PMIs because the
2600 * rollback caused by the PMI will have cleared the overflow status
2601 * bit. Therefore always force probe these counters.
2603 status |= cpuc->intel_cp_status;
2605 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2606 struct perf_event *event = cpuc->events[bit];
2610 if (!test_bit(bit, cpuc->active_mask))
2613 if (!intel_pmu_save_and_restart(event))
2616 perf_sample_data_init(&data, 0, event->hw.last_period);
2618 if (has_branch_stack(event))
2619 data.br_stack = &cpuc->lbr_stack;
2621 if (perf_event_overflow(event, &data, regs))
2622 x86_pmu_stop(event, 0);
2628 static bool disable_counter_freezing = true;
2629 static int __init intel_perf_counter_freezing_setup(char *s)
2633 if (kstrtobool(s, &res))
2636 disable_counter_freezing = !res;
2639 __setup("perf_v4_pmi=", intel_perf_counter_freezing_setup);
2642 * Simplified handler for Arch Perfmon v4:
2643 * - We rely on counter freezing/unfreezing to enable/disable the PMU.
2644 * This is done automatically on PMU ack.
2645 * - Ack the PMU only after the APIC.
2648 static int intel_pmu_handle_irq_v4(struct pt_regs *regs)
2650 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2654 int pmu_enabled = cpuc->enabled;
2657 /* PMU has been disabled because of counter freezing */
2659 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2661 intel_bts_disable_local();
2662 handled = intel_pmu_drain_bts_buffer();
2663 handled += intel_bts_interrupt();
2665 status = intel_pmu_get_status();
2669 intel_pmu_lbr_read();
2670 if (++loops > 100) {
2674 WARN(1, "perfevents: irq loop stuck!\n");
2675 perf_event_print_debug();
2683 handled += handle_pmi_common(regs, status);
2685 /* Ack the PMI in the APIC */
2686 apic_write(APIC_LVTPC, APIC_DM_NMI);
2689 * The counters start counting immediately while ack the status.
2690 * Make it as close as possible to IRET. This avoids bogus
2691 * freezing on Skylake CPUs.
2694 intel_pmu_ack_status(status);
2697 * CPU may issues two PMIs very close to each other.
2698 * When the PMI handler services the first one, the
2699 * GLOBAL_STATUS is already updated to reflect both.
2700 * When it IRETs, the second PMI is immediately
2701 * handled and it sees clear status. At the meantime,
2702 * there may be a third PMI, because the freezing bit
2703 * isn't set since the ack in first PMI handlers.
2704 * Double check if there is more work to be done.
2706 status = intel_pmu_get_status();
2712 intel_bts_enable_local();
2713 cpuc->enabled = pmu_enabled;
2718 * This handler is triggered by the local APIC, so the APIC IRQ handling
2721 static int intel_pmu_handle_irq(struct pt_regs *regs)
2723 struct cpu_hw_events *cpuc;
2729 cpuc = this_cpu_ptr(&cpu_hw_events);
2732 * Save the PMU state.
2733 * It needs to be restored when leaving the handler.
2735 pmu_enabled = cpuc->enabled;
2737 * No known reason to not always do late ACK,
2738 * but just in case do it opt-in.
2740 if (!x86_pmu.late_ack)
2741 apic_write(APIC_LVTPC, APIC_DM_NMI);
2742 intel_bts_disable_local();
2744 __intel_pmu_disable_all();
2745 handled = intel_pmu_drain_bts_buffer();
2746 handled += intel_bts_interrupt();
2747 status = intel_pmu_get_status();
2753 intel_pmu_lbr_read();
2754 intel_pmu_ack_status(status);
2755 if (++loops > 100) {
2759 WARN(1, "perfevents: irq loop stuck!\n");
2760 perf_event_print_debug();
2767 handled += handle_pmi_common(regs, status);
2770 * Repeat if there is more work to be done:
2772 status = intel_pmu_get_status();
2777 /* Only restore PMU state when it's active. See x86_pmu_disable(). */
2778 cpuc->enabled = pmu_enabled;
2780 __intel_pmu_enable_all(0, true);
2781 intel_bts_enable_local();
2784 * Only unmask the NMI after the overflow counters
2785 * have been reset. This avoids spurious NMIs on
2788 if (x86_pmu.late_ack)
2789 apic_write(APIC_LVTPC, APIC_DM_NMI);
2793 static struct event_constraint *
2794 intel_bts_constraints(struct perf_event *event)
2796 if (unlikely(intel_pmu_has_bts(event)))
2797 return &bts_constraint;
2803 * Note: matches a fake event, like Fixed2.
2805 static struct event_constraint *
2806 intel_vlbr_constraints(struct perf_event *event)
2808 struct event_constraint *c = &vlbr_constraint;
2810 if (unlikely(constraint_match(c, event->hw.config)))
2816 static int intel_alt_er(int idx, u64 config)
2820 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
2823 if (idx == EXTRA_REG_RSP_0)
2824 alt_idx = EXTRA_REG_RSP_1;
2826 if (idx == EXTRA_REG_RSP_1)
2827 alt_idx = EXTRA_REG_RSP_0;
2829 if (config & ~x86_pmu.extra_regs[alt_idx].valid_mask)
2835 static void intel_fixup_er(struct perf_event *event, int idx)
2837 event->hw.extra_reg.idx = idx;
2839 if (idx == EXTRA_REG_RSP_0) {
2840 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
2841 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
2842 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
2843 } else if (idx == EXTRA_REG_RSP_1) {
2844 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
2845 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
2846 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
2851 * manage allocation of shared extra msr for certain events
2854 * per-cpu: to be shared between the various events on a single PMU
2855 * per-core: per-cpu + shared by HT threads
2857 static struct event_constraint *
2858 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
2859 struct perf_event *event,
2860 struct hw_perf_event_extra *reg)
2862 struct event_constraint *c = &emptyconstraint;
2863 struct er_account *era;
2864 unsigned long flags;
2868 * reg->alloc can be set due to existing state, so for fake cpuc we
2869 * need to ignore this, otherwise we might fail to allocate proper fake
2870 * state for this extra reg constraint. Also see the comment below.
2872 if (reg->alloc && !cpuc->is_fake)
2873 return NULL; /* call x86_get_event_constraint() */
2876 era = &cpuc->shared_regs->regs[idx];
2878 * we use spin_lock_irqsave() to avoid lockdep issues when
2879 * passing a fake cpuc
2881 raw_spin_lock_irqsave(&era->lock, flags);
2883 if (!atomic_read(&era->ref) || era->config == reg->config) {
2886 * If its a fake cpuc -- as per validate_{group,event}() we
2887 * shouldn't touch event state and we can avoid doing so
2888 * since both will only call get_event_constraints() once
2889 * on each event, this avoids the need for reg->alloc.
2891 * Not doing the ER fixup will only result in era->reg being
2892 * wrong, but since we won't actually try and program hardware
2893 * this isn't a problem either.
2895 if (!cpuc->is_fake) {
2896 if (idx != reg->idx)
2897 intel_fixup_er(event, idx);
2900 * x86_schedule_events() can call get_event_constraints()
2901 * multiple times on events in the case of incremental
2902 * scheduling(). reg->alloc ensures we only do the ER
2908 /* lock in msr value */
2909 era->config = reg->config;
2910 era->reg = reg->reg;
2913 atomic_inc(&era->ref);
2916 * need to call x86_get_event_constraint()
2917 * to check if associated event has constraints
2921 idx = intel_alt_er(idx, reg->config);
2922 if (idx != reg->idx) {
2923 raw_spin_unlock_irqrestore(&era->lock, flags);
2927 raw_spin_unlock_irqrestore(&era->lock, flags);
2933 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
2934 struct hw_perf_event_extra *reg)
2936 struct er_account *era;
2939 * Only put constraint if extra reg was actually allocated. Also takes
2940 * care of event which do not use an extra shared reg.
2942 * Also, if this is a fake cpuc we shouldn't touch any event state
2943 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
2944 * either since it'll be thrown out.
2946 if (!reg->alloc || cpuc->is_fake)
2949 era = &cpuc->shared_regs->regs[reg->idx];
2951 /* one fewer user */
2952 atomic_dec(&era->ref);
2954 /* allocate again next time */
2958 static struct event_constraint *
2959 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
2960 struct perf_event *event)
2962 struct event_constraint *c = NULL, *d;
2963 struct hw_perf_event_extra *xreg, *breg;
2965 xreg = &event->hw.extra_reg;
2966 if (xreg->idx != EXTRA_REG_NONE) {
2967 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
2968 if (c == &emptyconstraint)
2971 breg = &event->hw.branch_reg;
2972 if (breg->idx != EXTRA_REG_NONE) {
2973 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
2974 if (d == &emptyconstraint) {
2975 __intel_shared_reg_put_constraints(cpuc, xreg);
2982 struct event_constraint *
2983 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2984 struct perf_event *event)
2986 struct event_constraint *c;
2988 if (x86_pmu.event_constraints) {
2989 for_each_event_constraint(c, x86_pmu.event_constraints) {
2990 if (constraint_match(c, event->hw.config)) {
2991 event->hw.flags |= c->flags;
2997 return &unconstrained;
3000 static struct event_constraint *
3001 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3002 struct perf_event *event)
3004 struct event_constraint *c;
3006 c = intel_vlbr_constraints(event);
3010 c = intel_bts_constraints(event);
3014 c = intel_shared_regs_constraints(cpuc, event);
3018 c = intel_pebs_constraints(event);
3022 return x86_get_event_constraints(cpuc, idx, event);
3026 intel_start_scheduling(struct cpu_hw_events *cpuc)
3028 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3029 struct intel_excl_states *xl;
3030 int tid = cpuc->excl_thread_id;
3033 * nothing needed if in group validation mode
3035 if (cpuc->is_fake || !is_ht_workaround_enabled())
3039 * no exclusion needed
3041 if (WARN_ON_ONCE(!excl_cntrs))
3044 xl = &excl_cntrs->states[tid];
3046 xl->sched_started = true;
3048 * lock shared state until we are done scheduling
3049 * in stop_event_scheduling()
3050 * makes scheduling appear as a transaction
3052 raw_spin_lock(&excl_cntrs->lock);
3055 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
3057 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3058 struct event_constraint *c = cpuc->event_constraint[idx];
3059 struct intel_excl_states *xl;
3060 int tid = cpuc->excl_thread_id;
3062 if (cpuc->is_fake || !is_ht_workaround_enabled())
3065 if (WARN_ON_ONCE(!excl_cntrs))
3068 if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
3071 xl = &excl_cntrs->states[tid];
3073 lockdep_assert_held(&excl_cntrs->lock);
3075 if (c->flags & PERF_X86_EVENT_EXCL)
3076 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
3078 xl->state[cntr] = INTEL_EXCL_SHARED;
3082 intel_stop_scheduling(struct cpu_hw_events *cpuc)
3084 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3085 struct intel_excl_states *xl;
3086 int tid = cpuc->excl_thread_id;
3089 * nothing needed if in group validation mode
3091 if (cpuc->is_fake || !is_ht_workaround_enabled())
3094 * no exclusion needed
3096 if (WARN_ON_ONCE(!excl_cntrs))
3099 xl = &excl_cntrs->states[tid];
3101 xl->sched_started = false;
3103 * release shared state lock (acquired in intel_start_scheduling())
3105 raw_spin_unlock(&excl_cntrs->lock);
3108 static struct event_constraint *
3109 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
3111 WARN_ON_ONCE(!cpuc->constraint_list);
3113 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
3114 struct event_constraint *cx;
3117 * grab pre-allocated constraint entry
3119 cx = &cpuc->constraint_list[idx];
3122 * initialize dynamic constraint
3123 * with static constraint
3128 * mark constraint as dynamic
3130 cx->flags |= PERF_X86_EVENT_DYNAMIC;
3137 static struct event_constraint *
3138 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
3139 int idx, struct event_constraint *c)
3141 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3142 struct intel_excl_states *xlo;
3143 int tid = cpuc->excl_thread_id;
3147 * validating a group does not require
3148 * enforcing cross-thread exclusion
3150 if (cpuc->is_fake || !is_ht_workaround_enabled())
3154 * no exclusion needed
3156 if (WARN_ON_ONCE(!excl_cntrs))
3160 * because we modify the constraint, we need
3161 * to make a copy. Static constraints come
3162 * from static const tables.
3164 * only needed when constraint has not yet
3165 * been cloned (marked dynamic)
3167 c = dyn_constraint(cpuc, c, idx);
3170 * From here on, the constraint is dynamic.
3171 * Either it was just allocated above, or it
3172 * was allocated during a earlier invocation
3177 * state of sibling HT
3179 xlo = &excl_cntrs->states[tid ^ 1];
3182 * event requires exclusive counter access
3185 is_excl = c->flags & PERF_X86_EVENT_EXCL;
3186 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
3187 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
3188 if (!cpuc->n_excl++)
3189 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
3193 * Modify static constraint with current dynamic
3196 * EXCLUSIVE: sibling counter measuring exclusive event
3197 * SHARED : sibling counter measuring non-exclusive event
3198 * UNUSED : sibling counter unused
3201 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
3203 * exclusive event in sibling counter
3204 * our corresponding counter cannot be used
3205 * regardless of our event
3207 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
3208 __clear_bit(i, c->idxmsk);
3213 * if measuring an exclusive event, sibling
3214 * measuring non-exclusive, then counter cannot
3217 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
3218 __clear_bit(i, c->idxmsk);
3225 * if we return an empty mask, then switch
3226 * back to static empty constraint to avoid
3227 * the cost of freeing later on
3230 c = &emptyconstraint;
3237 static struct event_constraint *
3238 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3239 struct perf_event *event)
3241 struct event_constraint *c1, *c2;
3243 c1 = cpuc->event_constraint[idx];
3247 * - static constraint: no change across incremental scheduling calls
3248 * - dynamic constraint: handled by intel_get_excl_constraints()
3250 c2 = __intel_get_event_constraints(cpuc, idx, event);
3252 WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
3253 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
3254 c1->weight = c2->weight;
3258 if (cpuc->excl_cntrs)
3259 return intel_get_excl_constraints(cpuc, event, idx, c2);
3264 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
3265 struct perf_event *event)
3267 struct hw_perf_event *hwc = &event->hw;
3268 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3269 int tid = cpuc->excl_thread_id;
3270 struct intel_excl_states *xl;
3273 * nothing needed if in group validation mode
3278 if (WARN_ON_ONCE(!excl_cntrs))
3281 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
3282 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
3283 if (!--cpuc->n_excl)
3284 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
3288 * If event was actually assigned, then mark the counter state as
3291 if (hwc->idx >= 0) {
3292 xl = &excl_cntrs->states[tid];
3295 * put_constraint may be called from x86_schedule_events()
3296 * which already has the lock held so here make locking
3299 if (!xl->sched_started)
3300 raw_spin_lock(&excl_cntrs->lock);
3302 xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
3304 if (!xl->sched_started)
3305 raw_spin_unlock(&excl_cntrs->lock);
3310 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
3311 struct perf_event *event)
3313 struct hw_perf_event_extra *reg;
3315 reg = &event->hw.extra_reg;
3316 if (reg->idx != EXTRA_REG_NONE)
3317 __intel_shared_reg_put_constraints(cpuc, reg);
3319 reg = &event->hw.branch_reg;
3320 if (reg->idx != EXTRA_REG_NONE)
3321 __intel_shared_reg_put_constraints(cpuc, reg);
3324 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
3325 struct perf_event *event)
3327 intel_put_shared_regs_event_constraints(cpuc, event);
3330 * is PMU has exclusive counter restrictions, then
3331 * all events are subject to and must call the
3332 * put_excl_constraints() routine
3334 if (cpuc->excl_cntrs)
3335 intel_put_excl_constraints(cpuc, event);
3338 static void intel_pebs_aliases_core2(struct perf_event *event)
3340 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3342 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3343 * (0x003c) so that we can use it with PEBS.
3345 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3346 * PEBS capable. However we can use INST_RETIRED.ANY_P
3347 * (0x00c0), which is a PEBS capable event, to get the same
3350 * INST_RETIRED.ANY_P counts the number of cycles that retires
3351 * CNTMASK instructions. By setting CNTMASK to a value (16)
3352 * larger than the maximum number of instructions that can be
3353 * retired per cycle (4) and then inverting the condition, we
3354 * count all cycles that retire 16 or less instructions, which
3357 * Thereby we gain a PEBS capable cycle counter.
3359 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
3361 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3362 event->hw.config = alt_config;
3366 static void intel_pebs_aliases_snb(struct perf_event *event)
3368 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3370 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3371 * (0x003c) so that we can use it with PEBS.
3373 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3374 * PEBS capable. However we can use UOPS_RETIRED.ALL
3375 * (0x01c2), which is a PEBS capable event, to get the same
3378 * UOPS_RETIRED.ALL counts the number of cycles that retires
3379 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
3380 * larger than the maximum number of micro-ops that can be
3381 * retired per cycle (4) and then inverting the condition, we
3382 * count all cycles that retire 16 or less micro-ops, which
3385 * Thereby we gain a PEBS capable cycle counter.
3387 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
3389 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3390 event->hw.config = alt_config;
3394 static void intel_pebs_aliases_precdist(struct perf_event *event)
3396 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3398 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3399 * (0x003c) so that we can use it with PEBS.
3401 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3402 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
3403 * (0x01c0), which is a PEBS capable event, to get the same
3406 * The PREC_DIST event has special support to minimize sample
3407 * shadowing effects. One drawback is that it can be
3408 * only programmed on counter 1, but that seems like an
3409 * acceptable trade off.
3411 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
3413 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3414 event->hw.config = alt_config;
3418 static void intel_pebs_aliases_ivb(struct perf_event *event)
3420 if (event->attr.precise_ip < 3)
3421 return intel_pebs_aliases_snb(event);
3422 return intel_pebs_aliases_precdist(event);
3425 static void intel_pebs_aliases_skl(struct perf_event *event)
3427 if (event->attr.precise_ip < 3)
3428 return intel_pebs_aliases_core2(event);
3429 return intel_pebs_aliases_precdist(event);
3432 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3434 unsigned long flags = x86_pmu.large_pebs_flags;
3436 if (event->attr.use_clockid)
3437 flags &= ~PERF_SAMPLE_TIME;
3438 if (!event->attr.exclude_kernel)
3439 flags &= ~PERF_SAMPLE_REGS_USER;
3440 if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
3441 flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
3445 static int intel_pmu_bts_config(struct perf_event *event)
3447 struct perf_event_attr *attr = &event->attr;
3449 if (unlikely(intel_pmu_has_bts(event))) {
3450 /* BTS is not supported by this architecture. */
3451 if (!x86_pmu.bts_active)
3454 /* BTS is currently only allowed for user-mode. */
3455 if (!attr->exclude_kernel)
3458 /* BTS is not allowed for precise events. */
3459 if (attr->precise_ip)
3462 /* disallow bts if conflicting events are present */
3463 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3466 event->destroy = hw_perf_lbr_event_destroy;
3472 static int core_pmu_hw_config(struct perf_event *event)
3474 int ret = x86_pmu_hw_config(event);
3479 return intel_pmu_bts_config(event);
3482 static int intel_pmu_hw_config(struct perf_event *event)
3484 int ret = x86_pmu_hw_config(event);
3489 ret = intel_pmu_bts_config(event);
3493 if (event->attr.precise_ip) {
3494 if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
3495 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
3496 if (!(event->attr.sample_type &
3497 ~intel_pmu_large_pebs_flags(event)))
3498 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
3500 if (x86_pmu.pebs_aliases)
3501 x86_pmu.pebs_aliases(event);
3503 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3504 event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
3507 if (needs_branch_stack(event)) {
3508 ret = intel_pmu_setup_lbr_filter(event);
3513 * BTS is set up earlier in this path, so don't account twice
3515 if (!unlikely(intel_pmu_has_bts(event))) {
3516 /* disallow lbr if conflicting events are present */
3517 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3520 event->destroy = hw_perf_lbr_event_destroy;
3524 if (event->attr.aux_output) {
3525 if (!event->attr.precise_ip)
3528 event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT;
3531 if (event->attr.type != PERF_TYPE_RAW)
3535 * Config Topdown slots and metric events
3537 * The slots event on Fixed Counter 3 can support sampling,
3538 * which will be handled normally in x86_perf_event_update().
3540 * Metric events don't support sampling and require being paired
3541 * with a slots event as group leader. When the slots event
3542 * is used in a metrics group, it too cannot support sampling.
3544 if (x86_pmu.intel_cap.perf_metrics && is_topdown_event(event)) {
3545 if (event->attr.config1 || event->attr.config2)
3549 * The TopDown metrics events and slots event don't
3550 * support any filters.
3552 if (event->attr.config & X86_ALL_EVENT_FLAGS)
3555 if (is_metric_event(event)) {
3556 struct perf_event *leader = event->group_leader;
3558 /* The metric events don't support sampling. */
3559 if (is_sampling_event(event))
3562 /* The metric events require a slots group leader. */
3563 if (!is_slots_event(leader))
3567 * The leader/SLOTS must not be a sampling event for
3568 * metric use; hardware requires it starts at 0 when used
3569 * in conjunction with MSR_PERF_METRICS.
3571 if (is_sampling_event(leader))
3574 event->event_caps |= PERF_EV_CAP_SIBLING;
3576 * Only once we have a METRICs sibling do we
3577 * need TopDown magic.
3579 leader->hw.flags |= PERF_X86_EVENT_TOPDOWN;
3580 event->hw.flags |= PERF_X86_EVENT_TOPDOWN;
3582 event->hw.flags &= ~PERF_X86_EVENT_RDPMC_ALLOWED;
3586 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
3589 if (x86_pmu.version < 3)
3592 ret = perf_allow_cpu(&event->attr);
3596 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
3601 #ifdef CONFIG_RETPOLINE
3602 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr);
3603 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr);
3606 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
3608 #ifdef CONFIG_RETPOLINE
3609 if (x86_pmu.guest_get_msrs == intel_guest_get_msrs)
3610 return intel_guest_get_msrs(nr);
3611 else if (x86_pmu.guest_get_msrs == core_guest_get_msrs)
3612 return core_guest_get_msrs(nr);
3614 if (x86_pmu.guest_get_msrs)
3615 return x86_pmu.guest_get_msrs(nr);
3619 EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
3621 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
3623 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3624 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3626 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
3627 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
3628 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
3629 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
3630 arr[0].guest &= ~cpuc->pebs_enabled;
3632 arr[0].guest &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
3635 if (x86_pmu.pebs && x86_pmu.pebs_no_isolation) {
3637 * If PMU counter has PEBS enabled it is not enough to
3638 * disable counter on a guest entry since PEBS memory
3639 * write can overshoot guest entry and corrupt guest
3640 * memory. Disabling PEBS solves the problem.
3642 * Don't do this if the CPU already enforces it.
3644 arr[1].msr = MSR_IA32_PEBS_ENABLE;
3645 arr[1].host = cpuc->pebs_enabled;
3653 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
3655 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3656 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3659 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3660 struct perf_event *event = cpuc->events[idx];
3662 arr[idx].msr = x86_pmu_config_addr(idx);
3663 arr[idx].host = arr[idx].guest = 0;
3665 if (!test_bit(idx, cpuc->active_mask))
3668 arr[idx].host = arr[idx].guest =
3669 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
3671 if (event->attr.exclude_host)
3672 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3673 else if (event->attr.exclude_guest)
3674 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3677 *nr = x86_pmu.num_counters;
3681 static void core_pmu_enable_event(struct perf_event *event)
3683 if (!event->attr.exclude_host)
3684 x86_pmu_enable_event(event);
3687 static void core_pmu_enable_all(int added)
3689 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3692 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3693 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
3695 if (!test_bit(idx, cpuc->active_mask) ||
3696 cpuc->events[idx]->attr.exclude_host)
3699 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
3703 static int hsw_hw_config(struct perf_event *event)
3705 int ret = intel_pmu_hw_config(event);
3709 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
3711 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
3714 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
3715 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
3718 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
3719 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
3720 event->attr.precise_ip > 0))
3723 if (event_is_checkpointed(event)) {
3725 * Sampling of checkpointed events can cause situations where
3726 * the CPU constantly aborts because of a overflow, which is
3727 * then checkpointed back and ignored. Forbid checkpointing
3730 * But still allow a long sampling period, so that perf stat
3733 if (event->attr.sample_period > 0 &&
3734 event->attr.sample_period < 0x7fffffff)
3740 static struct event_constraint counter0_constraint =
3741 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
3743 static struct event_constraint counter2_constraint =
3744 EVENT_CONSTRAINT(0, 0x4, 0);
3746 static struct event_constraint fixed0_constraint =
3747 FIXED_EVENT_CONSTRAINT(0x00c0, 0);
3749 static struct event_constraint fixed0_counter0_constraint =
3750 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
3752 static struct event_constraint *
3753 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3754 struct perf_event *event)
3756 struct event_constraint *c;
3758 c = intel_get_event_constraints(cpuc, idx, event);
3760 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
3761 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
3762 if (c->idxmsk64 & (1U << 2))
3763 return &counter2_constraint;
3764 return &emptyconstraint;
3770 static struct event_constraint *
3771 icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3772 struct perf_event *event)
3775 * Fixed counter 0 has less skid.
3776 * Force instruction:ppp in Fixed counter 0
3778 if ((event->attr.precise_ip == 3) &&
3779 constraint_match(&fixed0_constraint, event->hw.config))
3780 return &fixed0_constraint;
3782 return hsw_get_event_constraints(cpuc, idx, event);
3785 static struct event_constraint *
3786 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3787 struct perf_event *event)
3789 struct event_constraint *c;
3791 /* :ppp means to do reduced skid PEBS which is PMC0 only. */
3792 if (event->attr.precise_ip == 3)
3793 return &counter0_constraint;
3795 c = intel_get_event_constraints(cpuc, idx, event);
3800 static struct event_constraint *
3801 tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3802 struct perf_event *event)
3804 struct event_constraint *c;
3807 * :ppp means to do reduced skid PEBS,
3808 * which is available on PMC0 and fixed counter 0.
3810 if (event->attr.precise_ip == 3) {
3811 /* Force instruction:ppp on PMC0 and Fixed counter 0 */
3812 if (constraint_match(&fixed0_constraint, event->hw.config))
3813 return &fixed0_counter0_constraint;
3815 return &counter0_constraint;
3818 c = intel_get_event_constraints(cpuc, idx, event);
3823 static bool allow_tsx_force_abort = true;
3825 static struct event_constraint *
3826 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3827 struct perf_event *event)
3829 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
3832 * Without TFA we must not use PMC3.
3834 if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
3835 c = dyn_constraint(cpuc, c, idx);
3836 c->idxmsk64 &= ~(1ULL << 3);
3846 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
3847 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
3848 * the two to enforce a minimum period of 128 (the smallest value that has bits
3849 * 0-5 cleared and >= 100).
3851 * Because of how the code in x86_perf_event_set_period() works, the truncation
3852 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
3853 * to make up for the 'lost' events due to carrying the 'error' in period_left.
3855 * Therefore the effective (average) period matches the requested period,
3856 * despite coarser hardware granularity.
3858 static u64 bdw_limit_period(struct perf_event *event, u64 left)
3860 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
3861 X86_CONFIG(.event=0xc0, .umask=0x01)) {
3869 static u64 nhm_limit_period(struct perf_event *event, u64 left)
3871 return max(left, 32ULL);
3874 PMU_FORMAT_ATTR(event, "config:0-7" );
3875 PMU_FORMAT_ATTR(umask, "config:8-15" );
3876 PMU_FORMAT_ATTR(edge, "config:18" );
3877 PMU_FORMAT_ATTR(pc, "config:19" );
3878 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
3879 PMU_FORMAT_ATTR(inv, "config:23" );
3880 PMU_FORMAT_ATTR(cmask, "config:24-31" );
3881 PMU_FORMAT_ATTR(in_tx, "config:32");
3882 PMU_FORMAT_ATTR(in_tx_cp, "config:33");
3884 static struct attribute *intel_arch_formats_attr[] = {
3885 &format_attr_event.attr,
3886 &format_attr_umask.attr,
3887 &format_attr_edge.attr,
3888 &format_attr_pc.attr,
3889 &format_attr_inv.attr,
3890 &format_attr_cmask.attr,
3894 ssize_t intel_event_sysfs_show(char *page, u64 config)
3896 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
3898 return x86_event_sysfs_show(page, config, event);
3901 static struct intel_shared_regs *allocate_shared_regs(int cpu)
3903 struct intel_shared_regs *regs;
3906 regs = kzalloc_node(sizeof(struct intel_shared_regs),
3907 GFP_KERNEL, cpu_to_node(cpu));
3910 * initialize the locks to keep lockdep happy
3912 for (i = 0; i < EXTRA_REG_MAX; i++)
3913 raw_spin_lock_init(®s->regs[i].lock);
3920 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
3922 struct intel_excl_cntrs *c;
3924 c = kzalloc_node(sizeof(struct intel_excl_cntrs),
3925 GFP_KERNEL, cpu_to_node(cpu));
3927 raw_spin_lock_init(&c->lock);
3934 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
3936 cpuc->pebs_record_size = x86_pmu.pebs_record_size;
3938 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
3939 cpuc->shared_regs = allocate_shared_regs(cpu);
3940 if (!cpuc->shared_regs)
3944 if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
3945 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
3947 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
3948 if (!cpuc->constraint_list)
3949 goto err_shared_regs;
3952 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3953 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
3954 if (!cpuc->excl_cntrs)
3955 goto err_constraint_list;
3957 cpuc->excl_thread_id = 0;
3962 err_constraint_list:
3963 kfree(cpuc->constraint_list);
3964 cpuc->constraint_list = NULL;
3967 kfree(cpuc->shared_regs);
3968 cpuc->shared_regs = NULL;
3974 static int intel_pmu_cpu_prepare(int cpu)
3976 return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
3979 static void flip_smm_bit(void *data)
3981 unsigned long set = *(unsigned long *)data;
3984 msr_set_bit(MSR_IA32_DEBUGCTLMSR,
3985 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
3987 msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
3988 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
3992 static void intel_pmu_cpu_starting(int cpu)
3994 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
3995 int core_id = topology_core_id(cpu);
3998 init_debug_store_on_cpu(cpu);
4000 * Deal with CPUs that don't clear their LBRs on power-up.
4002 intel_pmu_lbr_reset();
4004 cpuc->lbr_sel = NULL;
4006 if (x86_pmu.flags & PMU_FL_TFA) {
4007 WARN_ON_ONCE(cpuc->tfa_shadow);
4008 cpuc->tfa_shadow = ~0ULL;
4009 intel_set_tfa(cpuc, false);
4012 if (x86_pmu.version > 1)
4013 flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
4015 if (x86_pmu.counter_freezing)
4016 enable_counter_freeze();
4018 if (!cpuc->shared_regs)
4021 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
4022 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
4023 struct intel_shared_regs *pc;
4025 pc = per_cpu(cpu_hw_events, i).shared_regs;
4026 if (pc && pc->core_id == core_id) {
4027 cpuc->kfree_on_online[0] = cpuc->shared_regs;
4028 cpuc->shared_regs = pc;
4032 cpuc->shared_regs->core_id = core_id;
4033 cpuc->shared_regs->refcnt++;
4036 if (x86_pmu.lbr_sel_map)
4037 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
4039 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4040 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
4041 struct cpu_hw_events *sibling;
4042 struct intel_excl_cntrs *c;
4044 sibling = &per_cpu(cpu_hw_events, i);
4045 c = sibling->excl_cntrs;
4046 if (c && c->core_id == core_id) {
4047 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
4048 cpuc->excl_cntrs = c;
4049 if (!sibling->excl_thread_id)
4050 cpuc->excl_thread_id = 1;
4054 cpuc->excl_cntrs->core_id = core_id;
4055 cpuc->excl_cntrs->refcnt++;
4059 static void free_excl_cntrs(struct cpu_hw_events *cpuc)
4061 struct intel_excl_cntrs *c;
4063 c = cpuc->excl_cntrs;
4065 if (c->core_id == -1 || --c->refcnt == 0)
4067 cpuc->excl_cntrs = NULL;
4070 kfree(cpuc->constraint_list);
4071 cpuc->constraint_list = NULL;
4074 static void intel_pmu_cpu_dying(int cpu)
4076 fini_debug_store_on_cpu(cpu);
4078 if (x86_pmu.counter_freezing)
4079 disable_counter_freeze();
4082 void intel_cpuc_finish(struct cpu_hw_events *cpuc)
4084 struct intel_shared_regs *pc;
4086 pc = cpuc->shared_regs;
4088 if (pc->core_id == -1 || --pc->refcnt == 0)
4090 cpuc->shared_regs = NULL;
4093 free_excl_cntrs(cpuc);
4096 static void intel_pmu_cpu_dead(int cpu)
4098 intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
4101 static void intel_pmu_sched_task(struct perf_event_context *ctx,
4104 intel_pmu_pebs_sched_task(ctx, sched_in);
4105 intel_pmu_lbr_sched_task(ctx, sched_in);
4108 static void intel_pmu_swap_task_ctx(struct perf_event_context *prev,
4109 struct perf_event_context *next)
4111 intel_pmu_lbr_swap_task_ctx(prev, next);
4114 static int intel_pmu_check_period(struct perf_event *event, u64 value)
4116 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
4119 static int intel_pmu_aux_output_match(struct perf_event *event)
4121 if (!x86_pmu.intel_cap.pebs_output_pt_available)
4124 return is_intel_pt_event(event);
4127 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
4129 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
4131 PMU_FORMAT_ATTR(frontend, "config1:0-23");
4133 static struct attribute *intel_arch3_formats_attr[] = {
4134 &format_attr_event.attr,
4135 &format_attr_umask.attr,
4136 &format_attr_edge.attr,
4137 &format_attr_pc.attr,
4138 &format_attr_any.attr,
4139 &format_attr_inv.attr,
4140 &format_attr_cmask.attr,
4144 static struct attribute *hsw_format_attr[] = {
4145 &format_attr_in_tx.attr,
4146 &format_attr_in_tx_cp.attr,
4147 &format_attr_offcore_rsp.attr,
4148 &format_attr_ldlat.attr,
4152 static struct attribute *nhm_format_attr[] = {
4153 &format_attr_offcore_rsp.attr,
4154 &format_attr_ldlat.attr,
4158 static struct attribute *slm_format_attr[] = {
4159 &format_attr_offcore_rsp.attr,
4163 static struct attribute *skl_format_attr[] = {
4164 &format_attr_frontend.attr,
4168 static __initconst const struct x86_pmu core_pmu = {
4170 .handle_irq = x86_pmu_handle_irq,
4171 .disable_all = x86_pmu_disable_all,
4172 .enable_all = core_pmu_enable_all,
4173 .enable = core_pmu_enable_event,
4174 .disable = x86_pmu_disable_event,
4175 .hw_config = core_pmu_hw_config,
4176 .schedule_events = x86_schedule_events,
4177 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
4178 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
4179 .event_map = intel_pmu_event_map,
4180 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
4182 .large_pebs_flags = LARGE_PEBS_FLAGS,
4185 * Intel PMCs cannot be accessed sanely above 32-bit width,
4186 * so we install an artificial 1<<31 period regardless of
4187 * the generic event period:
4189 .max_period = (1ULL<<31) - 1,
4190 .get_event_constraints = intel_get_event_constraints,
4191 .put_event_constraints = intel_put_event_constraints,
4192 .event_constraints = intel_core_event_constraints,
4193 .guest_get_msrs = core_guest_get_msrs,
4194 .format_attrs = intel_arch_formats_attr,
4195 .events_sysfs_show = intel_event_sysfs_show,
4198 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
4199 * together with PMU version 1 and thus be using core_pmu with
4200 * shared_regs. We need following callbacks here to allocate
4203 .cpu_prepare = intel_pmu_cpu_prepare,
4204 .cpu_starting = intel_pmu_cpu_starting,
4205 .cpu_dying = intel_pmu_cpu_dying,
4206 .cpu_dead = intel_pmu_cpu_dead,
4208 .check_period = intel_pmu_check_period,
4210 .lbr_reset = intel_pmu_lbr_reset_64,
4211 .lbr_read = intel_pmu_lbr_read_64,
4212 .lbr_save = intel_pmu_lbr_save,
4213 .lbr_restore = intel_pmu_lbr_restore,
4216 static __initconst const struct x86_pmu intel_pmu = {
4218 .handle_irq = intel_pmu_handle_irq,
4219 .disable_all = intel_pmu_disable_all,
4220 .enable_all = intel_pmu_enable_all,
4221 .enable = intel_pmu_enable_event,
4222 .disable = intel_pmu_disable_event,
4223 .add = intel_pmu_add_event,
4224 .del = intel_pmu_del_event,
4225 .read = intel_pmu_read_event,
4226 .hw_config = intel_pmu_hw_config,
4227 .schedule_events = x86_schedule_events,
4228 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
4229 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
4230 .event_map = intel_pmu_event_map,
4231 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
4233 .large_pebs_flags = LARGE_PEBS_FLAGS,
4235 * Intel PMCs cannot be accessed sanely above 32 bit width,
4236 * so we install an artificial 1<<31 period regardless of
4237 * the generic event period:
4239 .max_period = (1ULL << 31) - 1,
4240 .get_event_constraints = intel_get_event_constraints,
4241 .put_event_constraints = intel_put_event_constraints,
4242 .pebs_aliases = intel_pebs_aliases_core2,
4244 .format_attrs = intel_arch3_formats_attr,
4245 .events_sysfs_show = intel_event_sysfs_show,
4247 .cpu_prepare = intel_pmu_cpu_prepare,
4248 .cpu_starting = intel_pmu_cpu_starting,
4249 .cpu_dying = intel_pmu_cpu_dying,
4250 .cpu_dead = intel_pmu_cpu_dead,
4252 .guest_get_msrs = intel_guest_get_msrs,
4253 .sched_task = intel_pmu_sched_task,
4254 .swap_task_ctx = intel_pmu_swap_task_ctx,
4256 .check_period = intel_pmu_check_period,
4258 .aux_output_match = intel_pmu_aux_output_match,
4260 .lbr_reset = intel_pmu_lbr_reset_64,
4261 .lbr_read = intel_pmu_lbr_read_64,
4262 .lbr_save = intel_pmu_lbr_save,
4263 .lbr_restore = intel_pmu_lbr_restore,
4266 static __init void intel_clovertown_quirk(void)
4269 * PEBS is unreliable due to:
4271 * AJ67 - PEBS may experience CPL leaks
4272 * AJ68 - PEBS PMI may be delayed by one event
4273 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
4274 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
4276 * AJ67 could be worked around by restricting the OS/USR flags.
4277 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
4279 * AJ106 could possibly be worked around by not allowing LBR
4280 * usage from PEBS, including the fixup.
4281 * AJ68 could possibly be worked around by always programming
4282 * a pebs_event_reset[0] value and coping with the lost events.
4284 * But taken together it might just make sense to not enable PEBS on
4287 pr_warn("PEBS disabled due to CPU errata\n");
4289 x86_pmu.pebs_constraints = NULL;
4292 static const struct x86_cpu_desc isolation_ucodes[] = {
4293 INTEL_CPU_DESC(INTEL_FAM6_HASWELL, 3, 0x0000001f),
4294 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_L, 1, 0x0000001e),
4295 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_G, 1, 0x00000015),
4296 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037),
4297 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a),
4298 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL, 4, 0x00000023),
4299 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_G, 1, 0x00000014),
4300 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 2, 0x00000010),
4301 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009),
4302 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009),
4303 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002),
4304 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014),
4305 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021),
4306 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000),
4307 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c),
4308 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c),
4309 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e),
4310 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 9, 0x0000004e),
4311 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 10, 0x0000004e),
4312 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 11, 0x0000004e),
4313 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 12, 0x0000004e),
4314 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 10, 0x0000004e),
4315 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 11, 0x0000004e),
4316 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 12, 0x0000004e),
4317 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 13, 0x0000004e),
4321 static void intel_check_pebs_isolation(void)
4323 x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes);
4326 static __init void intel_pebs_isolation_quirk(void)
4328 WARN_ON_ONCE(x86_pmu.check_microcode);
4329 x86_pmu.check_microcode = intel_check_pebs_isolation;
4330 intel_check_pebs_isolation();
4333 static const struct x86_cpu_desc pebs_ucodes[] = {
4334 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE, 7, 0x00000028),
4335 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 6, 0x00000618),
4336 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 7, 0x0000070c),
4340 static bool intel_snb_pebs_broken(void)
4342 return !x86_cpu_has_min_microcode_rev(pebs_ucodes);
4345 static void intel_snb_check_microcode(void)
4347 if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
4351 * Serialized by the microcode lock..
4353 if (x86_pmu.pebs_broken) {
4354 pr_info("PEBS enabled due to microcode update\n");
4355 x86_pmu.pebs_broken = 0;
4357 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
4358 x86_pmu.pebs_broken = 1;
4362 static bool is_lbr_from(unsigned long msr)
4364 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
4366 return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
4370 * Under certain circumstances, access certain MSR may cause #GP.
4371 * The function tests if the input MSR can be safely accessed.
4373 static bool check_msr(unsigned long msr, u64 mask)
4375 u64 val_old, val_new, val_tmp;
4378 * Disable the check for real HW, so we don't
4379 * mess with potentionaly enabled registers:
4381 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
4385 * Read the current value, change it and read it back to see if it
4386 * matches, this is needed to detect certain hardware emulators
4387 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
4389 if (rdmsrl_safe(msr, &val_old))
4393 * Only change the bits which can be updated by wrmsrl.
4395 val_tmp = val_old ^ mask;
4397 if (is_lbr_from(msr))
4398 val_tmp = lbr_from_signext_quirk_wr(val_tmp);
4400 if (wrmsrl_safe(msr, val_tmp) ||
4401 rdmsrl_safe(msr, &val_new))
4405 * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
4406 * should equal rdmsrl()'s even with the quirk.
4408 if (val_new != val_tmp)
4411 if (is_lbr_from(msr))
4412 val_old = lbr_from_signext_quirk_wr(val_old);
4414 /* Here it's sure that the MSR can be safely accessed.
4415 * Restore the old value and return.
4417 wrmsrl(msr, val_old);
4422 static __init void intel_sandybridge_quirk(void)
4424 x86_pmu.check_microcode = intel_snb_check_microcode;
4426 intel_snb_check_microcode();
4430 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
4431 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
4432 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
4433 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
4434 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
4435 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
4436 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
4437 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
4440 static __init void intel_arch_events_quirk(void)
4444 /* disable event that reported as not presend by cpuid */
4445 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
4446 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
4447 pr_warn("CPUID marked event: \'%s\' unavailable\n",
4448 intel_arch_events_map[bit].name);
4452 static __init void intel_nehalem_quirk(void)
4454 union cpuid10_ebx ebx;
4456 ebx.full = x86_pmu.events_maskl;
4457 if (ebx.split.no_branch_misses_retired) {
4459 * Erratum AAJ80 detected, we work it around by using
4460 * the BR_MISP_EXEC.ANY event. This will over-count
4461 * branch-misses, but it's still much better than the
4462 * architectural event which is often completely bogus:
4464 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
4465 ebx.split.no_branch_misses_retired = 0;
4466 x86_pmu.events_maskl = ebx.full;
4467 pr_info("CPU erratum AAJ80 worked around\n");
4471 static const struct x86_cpu_desc counter_freezing_ucodes[] = {
4472 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 2, 0x0000000e),
4473 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 9, 0x0000002e),
4474 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 10, 0x00000008),
4475 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_D, 1, 0x00000028),
4476 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 1, 0x00000028),
4477 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 8, 0x00000006),
4481 static bool intel_counter_freezing_broken(void)
4483 return !x86_cpu_has_min_microcode_rev(counter_freezing_ucodes);
4486 static __init void intel_counter_freezing_quirk(void)
4488 /* Check if it's already disabled */
4489 if (disable_counter_freezing)
4493 * If the system starts with the wrong ucode, leave the
4494 * counter-freezing feature permanently disabled.
4496 if (intel_counter_freezing_broken()) {
4497 pr_info("PMU counter freezing disabled due to CPU errata,"
4498 "please upgrade microcode\n");
4499 x86_pmu.counter_freezing = false;
4500 x86_pmu.handle_irq = intel_pmu_handle_irq;
4505 * enable software workaround for errata:
4510 * Only needed when HT is enabled. However detecting
4511 * if HT is enabled is difficult (model specific). So instead,
4512 * we enable the workaround in the early boot, and verify if
4513 * it is needed in a later initcall phase once we have valid
4514 * topology information to check if HT is actually enabled
4516 static __init void intel_ht_bug(void)
4518 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
4520 x86_pmu.start_scheduling = intel_start_scheduling;
4521 x86_pmu.commit_scheduling = intel_commit_scheduling;
4522 x86_pmu.stop_scheduling = intel_stop_scheduling;
4525 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
4526 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
4528 /* Haswell special events */
4529 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
4530 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
4531 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
4532 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
4533 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
4534 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
4535 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
4536 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
4537 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
4538 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
4539 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
4540 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
4542 static struct attribute *hsw_events_attrs[] = {
4543 EVENT_PTR(td_slots_issued),
4544 EVENT_PTR(td_slots_retired),
4545 EVENT_PTR(td_fetch_bubbles),
4546 EVENT_PTR(td_total_slots),
4547 EVENT_PTR(td_total_slots_scale),
4548 EVENT_PTR(td_recovery_bubbles),
4549 EVENT_PTR(td_recovery_bubbles_scale),
4553 static struct attribute *hsw_mem_events_attrs[] = {
4554 EVENT_PTR(mem_ld_hsw),
4555 EVENT_PTR(mem_st_hsw),
4559 static struct attribute *hsw_tsx_events_attrs[] = {
4560 EVENT_PTR(tx_start),
4561 EVENT_PTR(tx_commit),
4562 EVENT_PTR(tx_abort),
4563 EVENT_PTR(tx_capacity),
4564 EVENT_PTR(tx_conflict),
4565 EVENT_PTR(el_start),
4566 EVENT_PTR(el_commit),
4567 EVENT_PTR(el_abort),
4568 EVENT_PTR(el_capacity),
4569 EVENT_PTR(el_conflict),
4570 EVENT_PTR(cycles_t),
4571 EVENT_PTR(cycles_ct),
4575 EVENT_ATTR_STR(tx-capacity-read, tx_capacity_read, "event=0x54,umask=0x80");
4576 EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2");
4577 EVENT_ATTR_STR(el-capacity-read, el_capacity_read, "event=0x54,umask=0x80");
4578 EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2");
4580 static struct attribute *icl_events_attrs[] = {
4581 EVENT_PTR(mem_ld_hsw),
4582 EVENT_PTR(mem_st_hsw),
4586 static struct attribute *icl_td_events_attrs[] = {
4588 EVENT_PTR(td_retiring),
4589 EVENT_PTR(td_bad_spec),
4590 EVENT_PTR(td_fe_bound),
4591 EVENT_PTR(td_be_bound),
4595 static struct attribute *icl_tsx_events_attrs[] = {
4596 EVENT_PTR(tx_start),
4597 EVENT_PTR(tx_abort),
4598 EVENT_PTR(tx_commit),
4599 EVENT_PTR(tx_capacity_read),
4600 EVENT_PTR(tx_capacity_write),
4601 EVENT_PTR(tx_conflict),
4602 EVENT_PTR(el_start),
4603 EVENT_PTR(el_abort),
4604 EVENT_PTR(el_commit),
4605 EVENT_PTR(el_capacity_read),
4606 EVENT_PTR(el_capacity_write),
4607 EVENT_PTR(el_conflict),
4608 EVENT_PTR(cycles_t),
4609 EVENT_PTR(cycles_ct),
4613 static ssize_t freeze_on_smi_show(struct device *cdev,
4614 struct device_attribute *attr,
4617 return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
4620 static DEFINE_MUTEX(freeze_on_smi_mutex);
4622 static ssize_t freeze_on_smi_store(struct device *cdev,
4623 struct device_attribute *attr,
4624 const char *buf, size_t count)
4629 ret = kstrtoul(buf, 0, &val);
4636 mutex_lock(&freeze_on_smi_mutex);
4638 if (x86_pmu.attr_freeze_on_smi == val)
4641 x86_pmu.attr_freeze_on_smi = val;
4644 on_each_cpu(flip_smm_bit, &val, 1);
4647 mutex_unlock(&freeze_on_smi_mutex);
4652 static void update_tfa_sched(void *ignored)
4654 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4657 * check if PMC3 is used
4658 * and if so force schedule out for all event types all contexts
4660 if (test_bit(3, cpuc->active_mask))
4661 perf_pmu_resched(x86_get_pmu());
4664 static ssize_t show_sysctl_tfa(struct device *cdev,
4665 struct device_attribute *attr,
4668 return snprintf(buf, 40, "%d\n", allow_tsx_force_abort);
4671 static ssize_t set_sysctl_tfa(struct device *cdev,
4672 struct device_attribute *attr,
4673 const char *buf, size_t count)
4678 ret = kstrtobool(buf, &val);
4683 if (val == allow_tsx_force_abort)
4686 allow_tsx_force_abort = val;
4689 on_each_cpu(update_tfa_sched, NULL, 1);
4696 static DEVICE_ATTR_RW(freeze_on_smi);
4698 static ssize_t branches_show(struct device *cdev,
4699 struct device_attribute *attr,
4702 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
4705 static DEVICE_ATTR_RO(branches);
4707 static struct attribute *lbr_attrs[] = {
4708 &dev_attr_branches.attr,
4712 static char pmu_name_str[30];
4714 static ssize_t pmu_name_show(struct device *cdev,
4715 struct device_attribute *attr,
4718 return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str);
4721 static DEVICE_ATTR_RO(pmu_name);
4723 static struct attribute *intel_pmu_caps_attrs[] = {
4724 &dev_attr_pmu_name.attr,
4728 static DEVICE_ATTR(allow_tsx_force_abort, 0644,
4732 static struct attribute *intel_pmu_attrs[] = {
4733 &dev_attr_freeze_on_smi.attr,
4734 &dev_attr_allow_tsx_force_abort.attr,
4739 tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i)
4741 return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0;
4745 pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
4747 return x86_pmu.pebs ? attr->mode : 0;
4751 lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
4753 return x86_pmu.lbr_nr ? attr->mode : 0;
4757 exra_is_visible(struct kobject *kobj, struct attribute *attr, int i)
4759 return x86_pmu.version >= 2 ? attr->mode : 0;
4763 default_is_visible(struct kobject *kobj, struct attribute *attr, int i)
4765 if (attr == &dev_attr_allow_tsx_force_abort.attr)
4766 return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0;
4771 static struct attribute_group group_events_td = {
4775 static struct attribute_group group_events_mem = {
4777 .is_visible = pebs_is_visible,
4780 static struct attribute_group group_events_tsx = {
4782 .is_visible = tsx_is_visible,
4785 static struct attribute_group group_caps_gen = {
4787 .attrs = intel_pmu_caps_attrs,
4790 static struct attribute_group group_caps_lbr = {
4793 .is_visible = lbr_is_visible,
4796 static struct attribute_group group_format_extra = {
4798 .is_visible = exra_is_visible,
4801 static struct attribute_group group_format_extra_skl = {
4803 .is_visible = exra_is_visible,
4806 static struct attribute_group group_default = {
4807 .attrs = intel_pmu_attrs,
4808 .is_visible = default_is_visible,
4811 static const struct attribute_group *attr_update[] = {
4817 &group_format_extra,
4818 &group_format_extra_skl,
4823 static struct attribute *empty_attrs;
4825 __init int intel_pmu_init(void)
4827 struct attribute **extra_skl_attr = &empty_attrs;
4828 struct attribute **extra_attr = &empty_attrs;
4829 struct attribute **td_attr = &empty_attrs;
4830 struct attribute **mem_attr = &empty_attrs;
4831 struct attribute **tsx_attr = &empty_attrs;
4832 union cpuid10_edx edx;
4833 union cpuid10_eax eax;
4834 union cpuid10_ebx ebx;
4835 struct event_constraint *c;
4836 unsigned int unused;
4837 struct extra_reg *er;
4842 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
4843 switch (boot_cpu_data.x86) {
4845 return p6_pmu_init();
4847 return knc_pmu_init();
4849 return p4_pmu_init();
4855 * Check whether the Architectural PerfMon supports
4856 * Branch Misses Retired hw_event or not.
4858 cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
4859 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
4862 version = eax.split.version_id;
4866 x86_pmu = intel_pmu;
4868 x86_pmu.version = version;
4869 x86_pmu.num_counters = eax.split.num_counters;
4870 x86_pmu.cntval_bits = eax.split.bit_width;
4871 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
4873 x86_pmu.events_maskl = ebx.full;
4874 x86_pmu.events_mask_len = eax.split.mask_length;
4876 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
4879 * Quirk: v2 perfmon does not report fixed-purpose events, so
4880 * assume at least 3 events, when not running in a hypervisor:
4883 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
4885 x86_pmu.num_counters_fixed =
4886 max((int)edx.split.num_counters_fixed, assume);
4890 x86_pmu.counter_freezing = !disable_counter_freezing;
4892 if (boot_cpu_has(X86_FEATURE_PDCM)) {
4895 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
4896 x86_pmu.intel_cap.capabilities = capabilities;
4899 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) {
4900 x86_pmu.lbr_reset = intel_pmu_lbr_reset_32;
4901 x86_pmu.lbr_read = intel_pmu_lbr_read_32;
4904 if (boot_cpu_has(X86_FEATURE_ARCH_LBR))
4905 intel_pmu_arch_lbr_init();
4909 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
4912 * Install the hw-cache-events table:
4914 switch (boot_cpu_data.x86_model) {
4915 case INTEL_FAM6_CORE_YONAH:
4916 pr_cont("Core events, ");
4920 case INTEL_FAM6_CORE2_MEROM:
4921 x86_add_quirk(intel_clovertown_quirk);
4924 case INTEL_FAM6_CORE2_MEROM_L:
4925 case INTEL_FAM6_CORE2_PENRYN:
4926 case INTEL_FAM6_CORE2_DUNNINGTON:
4927 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
4928 sizeof(hw_cache_event_ids));
4930 intel_pmu_lbr_init_core();
4932 x86_pmu.event_constraints = intel_core2_event_constraints;
4933 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
4934 pr_cont("Core2 events, ");
4938 case INTEL_FAM6_NEHALEM:
4939 case INTEL_FAM6_NEHALEM_EP:
4940 case INTEL_FAM6_NEHALEM_EX:
4941 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
4942 sizeof(hw_cache_event_ids));
4943 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
4944 sizeof(hw_cache_extra_regs));
4946 intel_pmu_lbr_init_nhm();
4948 x86_pmu.event_constraints = intel_nehalem_event_constraints;
4949 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
4950 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4951 x86_pmu.extra_regs = intel_nehalem_extra_regs;
4952 x86_pmu.limit_period = nhm_limit_period;
4954 mem_attr = nhm_mem_events_attrs;
4956 /* UOPS_ISSUED.STALLED_CYCLES */
4957 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4958 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4959 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
4960 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4961 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
4963 intel_pmu_pebs_data_source_nhm();
4964 x86_add_quirk(intel_nehalem_quirk);
4965 x86_pmu.pebs_no_tlb = 1;
4966 extra_attr = nhm_format_attr;
4968 pr_cont("Nehalem events, ");
4972 case INTEL_FAM6_ATOM_BONNELL:
4973 case INTEL_FAM6_ATOM_BONNELL_MID:
4974 case INTEL_FAM6_ATOM_SALTWELL:
4975 case INTEL_FAM6_ATOM_SALTWELL_MID:
4976 case INTEL_FAM6_ATOM_SALTWELL_TABLET:
4977 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
4978 sizeof(hw_cache_event_ids));
4980 intel_pmu_lbr_init_atom();
4982 x86_pmu.event_constraints = intel_gen_event_constraints;
4983 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
4984 x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
4985 pr_cont("Atom events, ");
4989 case INTEL_FAM6_ATOM_SILVERMONT:
4990 case INTEL_FAM6_ATOM_SILVERMONT_D:
4991 case INTEL_FAM6_ATOM_SILVERMONT_MID:
4992 case INTEL_FAM6_ATOM_AIRMONT:
4993 case INTEL_FAM6_ATOM_AIRMONT_MID:
4994 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
4995 sizeof(hw_cache_event_ids));
4996 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
4997 sizeof(hw_cache_extra_regs));
4999 intel_pmu_lbr_init_slm();
5001 x86_pmu.event_constraints = intel_slm_event_constraints;
5002 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
5003 x86_pmu.extra_regs = intel_slm_extra_regs;
5004 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5005 td_attr = slm_events_attrs;
5006 extra_attr = slm_format_attr;
5007 pr_cont("Silvermont events, ");
5008 name = "silvermont";
5011 case INTEL_FAM6_ATOM_GOLDMONT:
5012 case INTEL_FAM6_ATOM_GOLDMONT_D:
5013 x86_add_quirk(intel_counter_freezing_quirk);
5014 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
5015 sizeof(hw_cache_event_ids));
5016 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
5017 sizeof(hw_cache_extra_regs));
5019 intel_pmu_lbr_init_skl();
5021 x86_pmu.event_constraints = intel_slm_event_constraints;
5022 x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
5023 x86_pmu.extra_regs = intel_glm_extra_regs;
5025 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
5026 * for precise cycles.
5027 * :pp is identical to :ppp
5029 x86_pmu.pebs_aliases = NULL;
5030 x86_pmu.pebs_prec_dist = true;
5031 x86_pmu.lbr_pt_coexist = true;
5032 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5033 td_attr = glm_events_attrs;
5034 extra_attr = slm_format_attr;
5035 pr_cont("Goldmont events, ");
5039 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
5040 x86_add_quirk(intel_counter_freezing_quirk);
5041 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
5042 sizeof(hw_cache_event_ids));
5043 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
5044 sizeof(hw_cache_extra_regs));
5046 intel_pmu_lbr_init_skl();
5048 x86_pmu.event_constraints = intel_slm_event_constraints;
5049 x86_pmu.extra_regs = intel_glm_extra_regs;
5051 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
5052 * for precise cycles.
5054 x86_pmu.pebs_aliases = NULL;
5055 x86_pmu.pebs_prec_dist = true;
5056 x86_pmu.lbr_pt_coexist = true;
5057 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5058 x86_pmu.flags |= PMU_FL_PEBS_ALL;
5059 x86_pmu.get_event_constraints = glp_get_event_constraints;
5060 td_attr = glm_events_attrs;
5061 /* Goldmont Plus has 4-wide pipeline */
5062 event_attr_td_total_slots_scale_glm.event_str = "4";
5063 extra_attr = slm_format_attr;
5064 pr_cont("Goldmont plus events, ");
5065 name = "goldmont_plus";
5068 case INTEL_FAM6_ATOM_TREMONT_D:
5069 case INTEL_FAM6_ATOM_TREMONT:
5070 x86_pmu.late_ack = true;
5071 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
5072 sizeof(hw_cache_event_ids));
5073 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
5074 sizeof(hw_cache_extra_regs));
5075 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
5077 intel_pmu_lbr_init_skl();
5079 x86_pmu.event_constraints = intel_slm_event_constraints;
5080 x86_pmu.extra_regs = intel_tnt_extra_regs;
5082 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
5083 * for precise cycles.
5085 x86_pmu.pebs_aliases = NULL;
5086 x86_pmu.pebs_prec_dist = true;
5087 x86_pmu.lbr_pt_coexist = true;
5088 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5089 x86_pmu.get_event_constraints = tnt_get_event_constraints;
5090 extra_attr = slm_format_attr;
5091 pr_cont("Tremont events, ");
5095 case INTEL_FAM6_WESTMERE:
5096 case INTEL_FAM6_WESTMERE_EP:
5097 case INTEL_FAM6_WESTMERE_EX:
5098 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
5099 sizeof(hw_cache_event_ids));
5100 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
5101 sizeof(hw_cache_extra_regs));
5103 intel_pmu_lbr_init_nhm();
5105 x86_pmu.event_constraints = intel_westmere_event_constraints;
5106 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
5107 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
5108 x86_pmu.extra_regs = intel_westmere_extra_regs;
5109 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5111 mem_attr = nhm_mem_events_attrs;
5113 /* UOPS_ISSUED.STALLED_CYCLES */
5114 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
5115 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
5116 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
5117 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
5118 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
5120 intel_pmu_pebs_data_source_nhm();
5121 extra_attr = nhm_format_attr;
5122 pr_cont("Westmere events, ");
5126 case INTEL_FAM6_SANDYBRIDGE:
5127 case INTEL_FAM6_SANDYBRIDGE_X:
5128 x86_add_quirk(intel_sandybridge_quirk);
5129 x86_add_quirk(intel_ht_bug);
5130 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
5131 sizeof(hw_cache_event_ids));
5132 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
5133 sizeof(hw_cache_extra_regs));
5135 intel_pmu_lbr_init_snb();
5137 x86_pmu.event_constraints = intel_snb_event_constraints;
5138 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
5139 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
5140 if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
5141 x86_pmu.extra_regs = intel_snbep_extra_regs;
5143 x86_pmu.extra_regs = intel_snb_extra_regs;
5146 /* all extra regs are per-cpu when HT is on */
5147 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5148 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5150 td_attr = snb_events_attrs;
5151 mem_attr = snb_mem_events_attrs;
5153 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
5154 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
5155 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
5156 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
5157 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
5158 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
5160 extra_attr = nhm_format_attr;
5162 pr_cont("SandyBridge events, ");
5163 name = "sandybridge";
5166 case INTEL_FAM6_IVYBRIDGE:
5167 case INTEL_FAM6_IVYBRIDGE_X:
5168 x86_add_quirk(intel_ht_bug);
5169 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
5170 sizeof(hw_cache_event_ids));
5171 /* dTLB-load-misses on IVB is different than SNB */
5172 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
5174 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
5175 sizeof(hw_cache_extra_regs));
5177 intel_pmu_lbr_init_snb();
5179 x86_pmu.event_constraints = intel_ivb_event_constraints;
5180 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
5181 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
5182 x86_pmu.pebs_prec_dist = true;
5183 if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
5184 x86_pmu.extra_regs = intel_snbep_extra_regs;
5186 x86_pmu.extra_regs = intel_snb_extra_regs;
5187 /* all extra regs are per-cpu when HT is on */
5188 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5189 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5191 td_attr = snb_events_attrs;
5192 mem_attr = snb_mem_events_attrs;
5194 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
5195 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
5196 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
5198 extra_attr = nhm_format_attr;
5200 pr_cont("IvyBridge events, ");
5205 case INTEL_FAM6_HASWELL:
5206 case INTEL_FAM6_HASWELL_X:
5207 case INTEL_FAM6_HASWELL_L:
5208 case INTEL_FAM6_HASWELL_G:
5209 x86_add_quirk(intel_ht_bug);
5210 x86_add_quirk(intel_pebs_isolation_quirk);
5211 x86_pmu.late_ack = true;
5212 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
5213 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
5215 intel_pmu_lbr_init_hsw();
5217 x86_pmu.event_constraints = intel_hsw_event_constraints;
5218 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
5219 x86_pmu.extra_regs = intel_snbep_extra_regs;
5220 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
5221 x86_pmu.pebs_prec_dist = true;
5222 /* all extra regs are per-cpu when HT is on */
5223 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5224 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5226 x86_pmu.hw_config = hsw_hw_config;
5227 x86_pmu.get_event_constraints = hsw_get_event_constraints;
5228 x86_pmu.lbr_double_abort = true;
5229 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
5230 hsw_format_attr : nhm_format_attr;
5231 td_attr = hsw_events_attrs;
5232 mem_attr = hsw_mem_events_attrs;
5233 tsx_attr = hsw_tsx_events_attrs;
5234 pr_cont("Haswell events, ");
5238 case INTEL_FAM6_BROADWELL:
5239 case INTEL_FAM6_BROADWELL_D:
5240 case INTEL_FAM6_BROADWELL_G:
5241 case INTEL_FAM6_BROADWELL_X:
5242 x86_add_quirk(intel_pebs_isolation_quirk);
5243 x86_pmu.late_ack = true;
5244 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
5245 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
5247 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
5248 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
5249 BDW_L3_MISS|HSW_SNOOP_DRAM;
5250 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
5252 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
5253 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
5254 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
5255 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
5257 intel_pmu_lbr_init_hsw();
5259 x86_pmu.event_constraints = intel_bdw_event_constraints;
5260 x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
5261 x86_pmu.extra_regs = intel_snbep_extra_regs;
5262 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
5263 x86_pmu.pebs_prec_dist = true;
5264 /* all extra regs are per-cpu when HT is on */
5265 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5266 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5268 x86_pmu.hw_config = hsw_hw_config;
5269 x86_pmu.get_event_constraints = hsw_get_event_constraints;
5270 x86_pmu.limit_period = bdw_limit_period;
5271 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
5272 hsw_format_attr : nhm_format_attr;
5273 td_attr = hsw_events_attrs;
5274 mem_attr = hsw_mem_events_attrs;
5275 tsx_attr = hsw_tsx_events_attrs;
5276 pr_cont("Broadwell events, ");
5280 case INTEL_FAM6_XEON_PHI_KNL:
5281 case INTEL_FAM6_XEON_PHI_KNM:
5282 memcpy(hw_cache_event_ids,
5283 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
5284 memcpy(hw_cache_extra_regs,
5285 knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
5286 intel_pmu_lbr_init_knl();
5288 x86_pmu.event_constraints = intel_slm_event_constraints;
5289 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
5290 x86_pmu.extra_regs = intel_knl_extra_regs;
5292 /* all extra regs are per-cpu when HT is on */
5293 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5294 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5295 extra_attr = slm_format_attr;
5296 pr_cont("Knights Landing/Mill events, ");
5297 name = "knights-landing";
5300 case INTEL_FAM6_SKYLAKE_X:
5303 case INTEL_FAM6_SKYLAKE_L:
5304 case INTEL_FAM6_SKYLAKE:
5305 case INTEL_FAM6_KABYLAKE_L:
5306 case INTEL_FAM6_KABYLAKE:
5307 case INTEL_FAM6_COMETLAKE_L:
5308 case INTEL_FAM6_COMETLAKE:
5309 x86_add_quirk(intel_pebs_isolation_quirk);
5310 x86_pmu.late_ack = true;
5311 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
5312 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
5313 intel_pmu_lbr_init_skl();
5315 /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
5316 event_attr_td_recovery_bubbles.event_str_noht =
5317 "event=0xd,umask=0x1,cmask=1";
5318 event_attr_td_recovery_bubbles.event_str_ht =
5319 "event=0xd,umask=0x1,cmask=1,any=1";
5321 x86_pmu.event_constraints = intel_skl_event_constraints;
5322 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
5323 x86_pmu.extra_regs = intel_skl_extra_regs;
5324 x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
5325 x86_pmu.pebs_prec_dist = true;
5326 /* all extra regs are per-cpu when HT is on */
5327 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5328 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5330 x86_pmu.hw_config = hsw_hw_config;
5331 x86_pmu.get_event_constraints = hsw_get_event_constraints;
5332 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
5333 hsw_format_attr : nhm_format_attr;
5334 extra_skl_attr = skl_format_attr;
5335 td_attr = hsw_events_attrs;
5336 mem_attr = hsw_mem_events_attrs;
5337 tsx_attr = hsw_tsx_events_attrs;
5338 intel_pmu_pebs_data_source_skl(pmem);
5340 if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
5341 x86_pmu.flags |= PMU_FL_TFA;
5342 x86_pmu.get_event_constraints = tfa_get_event_constraints;
5343 x86_pmu.enable_all = intel_tfa_pmu_enable_all;
5344 x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
5347 pr_cont("Skylake events, ");
5351 case INTEL_FAM6_ICELAKE_X:
5352 case INTEL_FAM6_ICELAKE_D:
5355 case INTEL_FAM6_ICELAKE_L:
5356 case INTEL_FAM6_ICELAKE:
5357 case INTEL_FAM6_TIGERLAKE_L:
5358 case INTEL_FAM6_TIGERLAKE:
5359 x86_pmu.late_ack = true;
5360 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
5361 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
5362 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
5363 intel_pmu_lbr_init_skl();
5365 x86_pmu.event_constraints = intel_icl_event_constraints;
5366 x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints;
5367 x86_pmu.extra_regs = intel_icl_extra_regs;
5368 x86_pmu.pebs_aliases = NULL;
5369 x86_pmu.pebs_prec_dist = true;
5370 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5371 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5373 x86_pmu.hw_config = hsw_hw_config;
5374 x86_pmu.get_event_constraints = icl_get_event_constraints;
5375 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
5376 hsw_format_attr : nhm_format_attr;
5377 extra_skl_attr = skl_format_attr;
5378 mem_attr = icl_events_attrs;
5379 td_attr = icl_td_events_attrs;
5380 tsx_attr = icl_tsx_events_attrs;
5381 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xca, .umask=0x02);
5382 x86_pmu.lbr_pt_coexist = true;
5383 intel_pmu_pebs_data_source_skl(pmem);
5384 x86_pmu.update_topdown_event = icl_update_topdown_event;
5385 x86_pmu.set_topdown_event_period = icl_set_topdown_event_period;
5386 pr_cont("Icelake events, ");
5391 switch (x86_pmu.version) {
5393 x86_pmu.event_constraints = intel_v1_event_constraints;
5394 pr_cont("generic architected perfmon v1, ");
5395 name = "generic_arch_v1";
5399 * default constraints for v2 and up
5401 x86_pmu.event_constraints = intel_gen_event_constraints;
5402 pr_cont("generic architected perfmon, ");
5403 name = "generic_arch_v2+";
5408 snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
5411 group_events_td.attrs = td_attr;
5412 group_events_mem.attrs = mem_attr;
5413 group_events_tsx.attrs = tsx_attr;
5414 group_format_extra.attrs = extra_attr;
5415 group_format_extra_skl.attrs = extra_skl_attr;
5417 x86_pmu.attr_update = attr_update;
5419 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
5420 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
5421 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
5422 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
5424 x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
5426 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
5427 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
5428 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
5429 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
5432 x86_pmu.intel_ctrl |=
5433 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
5435 if (x86_pmu.event_constraints) {
5437 * event on fixed counter2 (REF_CYCLES) only works on this
5438 * counter, so do not extend mask to generic counters
5440 for_each_event_constraint(c, x86_pmu.event_constraints) {
5442 * Don't extend the topdown slots and metrics
5443 * events to the generic counters.
5445 if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
5446 c->weight = hweight64(c->idxmsk64);
5450 if (c->cmask == FIXED_EVENT_FLAGS
5451 && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
5452 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
5455 ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
5456 c->weight = hweight64(c->idxmsk64);
5461 * Access LBR MSR may cause #GP under certain circumstances.
5462 * E.g. KVM doesn't support LBR MSR
5463 * Check all LBT MSR here.
5464 * Disable LBR access if any LBR MSRs can not be accessed.
5466 if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
5468 for (i = 0; i < x86_pmu.lbr_nr; i++) {
5469 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
5470 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
5475 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
5478 * Access extra MSR may cause #GP under certain circumstances.
5479 * E.g. KVM doesn't support offcore event
5480 * Check all extra_regs here.
5482 if (x86_pmu.extra_regs) {
5483 for (er = x86_pmu.extra_regs; er->msr; er++) {
5484 er->extra_msr_access = check_msr(er->msr, 0x11UL);
5485 /* Disable LBR select mapping */
5486 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
5487 x86_pmu.lbr_sel_map = NULL;
5491 /* Support full width counters using alternative MSR range */
5492 if (x86_pmu.intel_cap.full_width_write) {
5493 x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
5494 x86_pmu.perfctr = MSR_IA32_PMC0;
5495 pr_cont("full-width counters, ");
5499 * For arch perfmon 4 use counter freezing to avoid
5500 * several MSR accesses in the PMI.
5502 if (x86_pmu.counter_freezing)
5503 x86_pmu.handle_irq = intel_pmu_handle_irq_v4;
5505 if (x86_pmu.intel_cap.perf_metrics)
5506 x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
5512 * HT bug: phase 2 init
5513 * Called once we have valid topology information to check
5514 * whether or not HT is enabled
5515 * If HT is off, then we disable the workaround
5517 static __init int fixup_ht_bug(void)
5521 * problem not present on this CPU model, nothing to do
5523 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
5526 if (topology_max_smt_threads() > 1) {
5527 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
5533 hardlockup_detector_perf_stop();
5535 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
5537 x86_pmu.start_scheduling = NULL;
5538 x86_pmu.commit_scheduling = NULL;
5539 x86_pmu.stop_scheduling = NULL;
5541 hardlockup_detector_perf_restart();
5543 for_each_online_cpu(c)
5544 free_excl_cntrs(&per_cpu(cpu_hw_events, c));
5547 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
5550 subsys_initcall(fixup_ht_bug)