1 // SPDX-License-Identifier: GPL-2.0-only
5 * Used to coordinate shared registers between HT threads or
6 * among events on a single PMU.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/stddef.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/nmi.h>
18 #include <asm/cpufeature.h>
19 #include <asm/hardirq.h>
20 #include <asm/intel-family.h>
21 #include <asm/intel_pt.h>
23 #include <asm/cpu_device_id.h>
25 #include "../perf_event.h"
28 * Intel PerfMon, used on Core and later.
30 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
32 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
33 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
34 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
35 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
36 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
37 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
38 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
39 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
42 static struct event_constraint intel_core_event_constraints[] __read_mostly =
44 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
45 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
46 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
47 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
48 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
49 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
53 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
55 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
56 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
57 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
58 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
59 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
60 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
61 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
62 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
63 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
64 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
65 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
66 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
67 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
71 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
73 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
74 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
75 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
76 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
77 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
78 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
79 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
80 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
81 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
82 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
83 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
87 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
89 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
90 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
91 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
95 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
97 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
98 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
99 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
100 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
101 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
102 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
103 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
107 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
109 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
110 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
111 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
112 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
113 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
114 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
115 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
116 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
117 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
118 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
119 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
120 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
123 * When HT is off these events can only run on the bottom 4 counters
124 * When HT is on, they are impacted by the HT bug and require EXCL access
126 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
127 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
128 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
129 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
134 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
136 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
137 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
138 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
139 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
140 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
141 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
142 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
143 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
144 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
145 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
146 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
147 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
148 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
151 * When HT is off these events can only run on the bottom 4 counters
152 * When HT is on, they are impacted by the HT bug and require EXCL access
154 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
155 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
156 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
157 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
162 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
164 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
165 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
166 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
167 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
171 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
176 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
178 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
179 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
180 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
184 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
186 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
187 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
188 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
192 static struct event_constraint intel_skl_event_constraints[] = {
193 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
194 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
195 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
196 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
199 * when HT is off, these can only run on the bottom 4 counters
201 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
202 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
203 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
204 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
205 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
210 static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
211 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
212 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
216 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
217 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
218 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
219 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
220 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
224 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
225 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
226 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
227 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
228 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
232 static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
233 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
234 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
235 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
237 * Note the low 8 bits eventsel code is not a continuous field, containing
238 * some #GPing bits. These are masked out.
240 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
244 static struct event_constraint intel_icl_event_constraints[] = {
245 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
246 INTEL_UEVENT_CONSTRAINT(0x1c0, 0), /* INST_RETIRED.PREC_DIST */
247 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
248 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
249 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
250 INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
251 INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
252 INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */
253 INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf),
254 INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
255 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */
256 INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
257 INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */
258 INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
259 INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
260 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
261 INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
265 static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
266 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
267 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
268 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
269 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
273 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
274 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
275 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
277 static struct attribute *nhm_mem_events_attrs[] = {
278 EVENT_PTR(mem_ld_nhm),
283 * topdown events for Intel Core CPUs.
285 * The events are all in slots, which is a free slot in a 4 wide
286 * pipeline. Some events are already reported in slots, for cycle
287 * events we multiply by the pipeline width (4).
289 * With Hyper Threading on, topdown metrics are either summed or averaged
290 * between the threads of a core: (count_t0 + count_t1).
292 * For the average case the metric is always scaled to pipeline width,
293 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
296 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
297 "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */
298 "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */
299 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
300 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
301 "event=0xe,umask=0x1"); /* uops_issued.any */
302 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
303 "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */
304 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
305 "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */
306 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
307 "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */
308 "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */
309 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
312 static struct attribute *snb_events_attrs[] = {
313 EVENT_PTR(td_slots_issued),
314 EVENT_PTR(td_slots_retired),
315 EVENT_PTR(td_fetch_bubbles),
316 EVENT_PTR(td_total_slots),
317 EVENT_PTR(td_total_slots_scale),
318 EVENT_PTR(td_recovery_bubbles),
319 EVENT_PTR(td_recovery_bubbles_scale),
323 static struct attribute *snb_mem_events_attrs[] = {
324 EVENT_PTR(mem_ld_snb),
325 EVENT_PTR(mem_st_snb),
329 static struct event_constraint intel_hsw_event_constraints[] = {
330 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
331 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
332 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
333 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
334 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
335 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
336 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
337 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
338 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
339 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
340 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
341 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
344 * When HT is off these events can only run on the bottom 4 counters
345 * When HT is on, they are impacted by the HT bug and require EXCL access
347 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
348 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
349 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
350 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
355 static struct event_constraint intel_bdw_event_constraints[] = {
356 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
357 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
358 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
359 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
360 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
362 * when HT is off, these can only run on the bottom 4 counters
364 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
365 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
366 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
367 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
371 static u64 intel_pmu_event_map(int hw_event)
373 return intel_perfmon_event_map[hw_event];
377 * Notes on the events:
378 * - data reads do not include code reads (comparable to earlier tables)
379 * - data counts include speculative execution (except L1 write, dtlb, bpu)
380 * - remote node access includes remote memory, remote cache, remote mmio.
381 * - prefetches are not included in the counts.
382 * - icache miss does not include decoded icache
385 #define SKL_DEMAND_DATA_RD BIT_ULL(0)
386 #define SKL_DEMAND_RFO BIT_ULL(1)
387 #define SKL_ANY_RESPONSE BIT_ULL(16)
388 #define SKL_SUPPLIER_NONE BIT_ULL(17)
389 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26)
390 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27)
391 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28)
392 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29)
393 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \
394 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
395 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
396 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
397 #define SKL_SPL_HIT BIT_ULL(30)
398 #define SKL_SNOOP_NONE BIT_ULL(31)
399 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32)
400 #define SKL_SNOOP_MISS BIT_ULL(33)
401 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34)
402 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35)
403 #define SKL_SNOOP_HITM BIT_ULL(36)
404 #define SKL_SNOOP_NON_DRAM BIT_ULL(37)
405 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \
406 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
407 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
408 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
409 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD
410 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \
411 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
412 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
413 SKL_SNOOP_HITM|SKL_SPL_HIT)
414 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO
415 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE
416 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
417 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
418 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
420 static __initconst const u64 skl_hw_cache_event_ids
421 [PERF_COUNT_HW_CACHE_MAX]
422 [PERF_COUNT_HW_CACHE_OP_MAX]
423 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
427 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
428 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
431 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
432 [ C(RESULT_MISS) ] = 0x0,
434 [ C(OP_PREFETCH) ] = {
435 [ C(RESULT_ACCESS) ] = 0x0,
436 [ C(RESULT_MISS) ] = 0x0,
441 [ C(RESULT_ACCESS) ] = 0x0,
442 [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */
445 [ C(RESULT_ACCESS) ] = -1,
446 [ C(RESULT_MISS) ] = -1,
448 [ C(OP_PREFETCH) ] = {
449 [ C(RESULT_ACCESS) ] = 0x0,
450 [ C(RESULT_MISS) ] = 0x0,
455 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
456 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
459 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
460 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
462 [ C(OP_PREFETCH) ] = {
463 [ C(RESULT_ACCESS) ] = 0x0,
464 [ C(RESULT_MISS) ] = 0x0,
469 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
470 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
473 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
474 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
476 [ C(OP_PREFETCH) ] = {
477 [ C(RESULT_ACCESS) ] = 0x0,
478 [ C(RESULT_MISS) ] = 0x0,
483 [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */
484 [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */
487 [ C(RESULT_ACCESS) ] = -1,
488 [ C(RESULT_MISS) ] = -1,
490 [ C(OP_PREFETCH) ] = {
491 [ C(RESULT_ACCESS) ] = -1,
492 [ C(RESULT_MISS) ] = -1,
497 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
498 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
501 [ C(RESULT_ACCESS) ] = -1,
502 [ C(RESULT_MISS) ] = -1,
504 [ C(OP_PREFETCH) ] = {
505 [ C(RESULT_ACCESS) ] = -1,
506 [ C(RESULT_MISS) ] = -1,
511 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
512 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
515 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
516 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
518 [ C(OP_PREFETCH) ] = {
519 [ C(RESULT_ACCESS) ] = 0x0,
520 [ C(RESULT_MISS) ] = 0x0,
525 static __initconst const u64 skl_hw_cache_extra_regs
526 [PERF_COUNT_HW_CACHE_MAX]
527 [PERF_COUNT_HW_CACHE_OP_MAX]
528 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
532 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
533 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
534 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
535 SKL_L3_MISS|SKL_ANY_SNOOP|
539 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
540 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
541 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
542 SKL_L3_MISS|SKL_ANY_SNOOP|
545 [ C(OP_PREFETCH) ] = {
546 [ C(RESULT_ACCESS) ] = 0x0,
547 [ C(RESULT_MISS) ] = 0x0,
552 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
553 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
554 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
555 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
558 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
559 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
560 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
561 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
563 [ C(OP_PREFETCH) ] = {
564 [ C(RESULT_ACCESS) ] = 0x0,
565 [ C(RESULT_MISS) ] = 0x0,
570 #define SNB_DMND_DATA_RD (1ULL << 0)
571 #define SNB_DMND_RFO (1ULL << 1)
572 #define SNB_DMND_IFETCH (1ULL << 2)
573 #define SNB_DMND_WB (1ULL << 3)
574 #define SNB_PF_DATA_RD (1ULL << 4)
575 #define SNB_PF_RFO (1ULL << 5)
576 #define SNB_PF_IFETCH (1ULL << 6)
577 #define SNB_LLC_DATA_RD (1ULL << 7)
578 #define SNB_LLC_RFO (1ULL << 8)
579 #define SNB_LLC_IFETCH (1ULL << 9)
580 #define SNB_BUS_LOCKS (1ULL << 10)
581 #define SNB_STRM_ST (1ULL << 11)
582 #define SNB_OTHER (1ULL << 15)
583 #define SNB_RESP_ANY (1ULL << 16)
584 #define SNB_NO_SUPP (1ULL << 17)
585 #define SNB_LLC_HITM (1ULL << 18)
586 #define SNB_LLC_HITE (1ULL << 19)
587 #define SNB_LLC_HITS (1ULL << 20)
588 #define SNB_LLC_HITF (1ULL << 21)
589 #define SNB_LOCAL (1ULL << 22)
590 #define SNB_REMOTE (0xffULL << 23)
591 #define SNB_SNP_NONE (1ULL << 31)
592 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
593 #define SNB_SNP_MISS (1ULL << 33)
594 #define SNB_NO_FWD (1ULL << 34)
595 #define SNB_SNP_FWD (1ULL << 35)
596 #define SNB_HITM (1ULL << 36)
597 #define SNB_NON_DRAM (1ULL << 37)
599 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
600 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
601 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
603 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
604 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
607 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
608 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
610 #define SNB_L3_ACCESS SNB_RESP_ANY
611 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
613 static __initconst const u64 snb_hw_cache_extra_regs
614 [PERF_COUNT_HW_CACHE_MAX]
615 [PERF_COUNT_HW_CACHE_OP_MAX]
616 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
620 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
621 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
624 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
625 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
627 [ C(OP_PREFETCH) ] = {
628 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
629 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
634 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
635 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
638 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
639 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
641 [ C(OP_PREFETCH) ] = {
642 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
643 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
648 static __initconst const u64 snb_hw_cache_event_ids
649 [PERF_COUNT_HW_CACHE_MAX]
650 [PERF_COUNT_HW_CACHE_OP_MAX]
651 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
655 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
656 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
659 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
660 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
662 [ C(OP_PREFETCH) ] = {
663 [ C(RESULT_ACCESS) ] = 0x0,
664 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
669 [ C(RESULT_ACCESS) ] = 0x0,
670 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
673 [ C(RESULT_ACCESS) ] = -1,
674 [ C(RESULT_MISS) ] = -1,
676 [ C(OP_PREFETCH) ] = {
677 [ C(RESULT_ACCESS) ] = 0x0,
678 [ C(RESULT_MISS) ] = 0x0,
683 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
684 [ C(RESULT_ACCESS) ] = 0x01b7,
685 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
686 [ C(RESULT_MISS) ] = 0x01b7,
689 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
690 [ C(RESULT_ACCESS) ] = 0x01b7,
691 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
692 [ C(RESULT_MISS) ] = 0x01b7,
694 [ C(OP_PREFETCH) ] = {
695 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
696 [ C(RESULT_ACCESS) ] = 0x01b7,
697 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
698 [ C(RESULT_MISS) ] = 0x01b7,
703 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
704 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
707 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
708 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
710 [ C(OP_PREFETCH) ] = {
711 [ C(RESULT_ACCESS) ] = 0x0,
712 [ C(RESULT_MISS) ] = 0x0,
717 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
718 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
721 [ C(RESULT_ACCESS) ] = -1,
722 [ C(RESULT_MISS) ] = -1,
724 [ C(OP_PREFETCH) ] = {
725 [ C(RESULT_ACCESS) ] = -1,
726 [ C(RESULT_MISS) ] = -1,
731 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
732 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
735 [ C(RESULT_ACCESS) ] = -1,
736 [ C(RESULT_MISS) ] = -1,
738 [ C(OP_PREFETCH) ] = {
739 [ C(RESULT_ACCESS) ] = -1,
740 [ C(RESULT_MISS) ] = -1,
745 [ C(RESULT_ACCESS) ] = 0x01b7,
746 [ C(RESULT_MISS) ] = 0x01b7,
749 [ C(RESULT_ACCESS) ] = 0x01b7,
750 [ C(RESULT_MISS) ] = 0x01b7,
752 [ C(OP_PREFETCH) ] = {
753 [ C(RESULT_ACCESS) ] = 0x01b7,
754 [ C(RESULT_MISS) ] = 0x01b7,
761 * Notes on the events:
762 * - data reads do not include code reads (comparable to earlier tables)
763 * - data counts include speculative execution (except L1 write, dtlb, bpu)
764 * - remote node access includes remote memory, remote cache, remote mmio.
765 * - prefetches are not included in the counts because they are not
769 #define HSW_DEMAND_DATA_RD BIT_ULL(0)
770 #define HSW_DEMAND_RFO BIT_ULL(1)
771 #define HSW_ANY_RESPONSE BIT_ULL(16)
772 #define HSW_SUPPLIER_NONE BIT_ULL(17)
773 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
774 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
775 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
776 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
777 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
778 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
779 HSW_L3_MISS_REMOTE_HOP2P)
780 #define HSW_SNOOP_NONE BIT_ULL(31)
781 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
782 #define HSW_SNOOP_MISS BIT_ULL(33)
783 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
784 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
785 #define HSW_SNOOP_HITM BIT_ULL(36)
786 #define HSW_SNOOP_NON_DRAM BIT_ULL(37)
787 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
788 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
789 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
790 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
791 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
792 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
793 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO
794 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
795 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
796 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE
798 #define BDW_L3_MISS_LOCAL BIT(26)
799 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
800 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
801 HSW_L3_MISS_REMOTE_HOP2P)
804 static __initconst const u64 hsw_hw_cache_event_ids
805 [PERF_COUNT_HW_CACHE_MAX]
806 [PERF_COUNT_HW_CACHE_OP_MAX]
807 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
811 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
812 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
815 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
816 [ C(RESULT_MISS) ] = 0x0,
818 [ C(OP_PREFETCH) ] = {
819 [ C(RESULT_ACCESS) ] = 0x0,
820 [ C(RESULT_MISS) ] = 0x0,
825 [ C(RESULT_ACCESS) ] = 0x0,
826 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
829 [ C(RESULT_ACCESS) ] = -1,
830 [ C(RESULT_MISS) ] = -1,
832 [ C(OP_PREFETCH) ] = {
833 [ C(RESULT_ACCESS) ] = 0x0,
834 [ C(RESULT_MISS) ] = 0x0,
839 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
840 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
843 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
844 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
846 [ C(OP_PREFETCH) ] = {
847 [ C(RESULT_ACCESS) ] = 0x0,
848 [ C(RESULT_MISS) ] = 0x0,
853 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
854 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
857 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
858 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
860 [ C(OP_PREFETCH) ] = {
861 [ C(RESULT_ACCESS) ] = 0x0,
862 [ C(RESULT_MISS) ] = 0x0,
867 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
868 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
871 [ C(RESULT_ACCESS) ] = -1,
872 [ C(RESULT_MISS) ] = -1,
874 [ C(OP_PREFETCH) ] = {
875 [ C(RESULT_ACCESS) ] = -1,
876 [ C(RESULT_MISS) ] = -1,
881 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
882 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
885 [ C(RESULT_ACCESS) ] = -1,
886 [ C(RESULT_MISS) ] = -1,
888 [ C(OP_PREFETCH) ] = {
889 [ C(RESULT_ACCESS) ] = -1,
890 [ C(RESULT_MISS) ] = -1,
895 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
896 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
899 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
900 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
902 [ C(OP_PREFETCH) ] = {
903 [ C(RESULT_ACCESS) ] = 0x0,
904 [ C(RESULT_MISS) ] = 0x0,
909 static __initconst const u64 hsw_hw_cache_extra_regs
910 [PERF_COUNT_HW_CACHE_MAX]
911 [PERF_COUNT_HW_CACHE_OP_MAX]
912 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
916 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
918 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
919 HSW_L3_MISS|HSW_ANY_SNOOP,
922 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
924 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
925 HSW_L3_MISS|HSW_ANY_SNOOP,
927 [ C(OP_PREFETCH) ] = {
928 [ C(RESULT_ACCESS) ] = 0x0,
929 [ C(RESULT_MISS) ] = 0x0,
934 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
935 HSW_L3_MISS_LOCAL_DRAM|
937 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
942 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
943 HSW_L3_MISS_LOCAL_DRAM|
945 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
949 [ C(OP_PREFETCH) ] = {
950 [ C(RESULT_ACCESS) ] = 0x0,
951 [ C(RESULT_MISS) ] = 0x0,
956 static __initconst const u64 westmere_hw_cache_event_ids
957 [PERF_COUNT_HW_CACHE_MAX]
958 [PERF_COUNT_HW_CACHE_OP_MAX]
959 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
963 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
964 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
967 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
968 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
970 [ C(OP_PREFETCH) ] = {
971 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
972 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
977 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
978 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
981 [ C(RESULT_ACCESS) ] = -1,
982 [ C(RESULT_MISS) ] = -1,
984 [ C(OP_PREFETCH) ] = {
985 [ C(RESULT_ACCESS) ] = 0x0,
986 [ C(RESULT_MISS) ] = 0x0,
991 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
992 [ C(RESULT_ACCESS) ] = 0x01b7,
993 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
994 [ C(RESULT_MISS) ] = 0x01b7,
997 * Use RFO, not WRITEBACK, because a write miss would typically occur
1001 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1002 [ C(RESULT_ACCESS) ] = 0x01b7,
1003 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1004 [ C(RESULT_MISS) ] = 0x01b7,
1006 [ C(OP_PREFETCH) ] = {
1007 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1008 [ C(RESULT_ACCESS) ] = 0x01b7,
1009 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1010 [ C(RESULT_MISS) ] = 0x01b7,
1015 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1016 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1019 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1020 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1022 [ C(OP_PREFETCH) ] = {
1023 [ C(RESULT_ACCESS) ] = 0x0,
1024 [ C(RESULT_MISS) ] = 0x0,
1029 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1030 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
1033 [ C(RESULT_ACCESS) ] = -1,
1034 [ C(RESULT_MISS) ] = -1,
1036 [ C(OP_PREFETCH) ] = {
1037 [ C(RESULT_ACCESS) ] = -1,
1038 [ C(RESULT_MISS) ] = -1,
1043 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1044 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1047 [ C(RESULT_ACCESS) ] = -1,
1048 [ C(RESULT_MISS) ] = -1,
1050 [ C(OP_PREFETCH) ] = {
1051 [ C(RESULT_ACCESS) ] = -1,
1052 [ C(RESULT_MISS) ] = -1,
1057 [ C(RESULT_ACCESS) ] = 0x01b7,
1058 [ C(RESULT_MISS) ] = 0x01b7,
1061 [ C(RESULT_ACCESS) ] = 0x01b7,
1062 [ C(RESULT_MISS) ] = 0x01b7,
1064 [ C(OP_PREFETCH) ] = {
1065 [ C(RESULT_ACCESS) ] = 0x01b7,
1066 [ C(RESULT_MISS) ] = 0x01b7,
1072 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1073 * See IA32 SDM Vol 3B 30.6.1.3
1076 #define NHM_DMND_DATA_RD (1 << 0)
1077 #define NHM_DMND_RFO (1 << 1)
1078 #define NHM_DMND_IFETCH (1 << 2)
1079 #define NHM_DMND_WB (1 << 3)
1080 #define NHM_PF_DATA_RD (1 << 4)
1081 #define NHM_PF_DATA_RFO (1 << 5)
1082 #define NHM_PF_IFETCH (1 << 6)
1083 #define NHM_OFFCORE_OTHER (1 << 7)
1084 #define NHM_UNCORE_HIT (1 << 8)
1085 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
1086 #define NHM_OTHER_CORE_HITM (1 << 10)
1088 #define NHM_REMOTE_CACHE_FWD (1 << 12)
1089 #define NHM_REMOTE_DRAM (1 << 13)
1090 #define NHM_LOCAL_DRAM (1 << 14)
1091 #define NHM_NON_DRAM (1 << 15)
1093 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1094 #define NHM_REMOTE (NHM_REMOTE_DRAM)
1096 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
1097 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
1098 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1100 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1101 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1102 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
1104 static __initconst const u64 nehalem_hw_cache_extra_regs
1105 [PERF_COUNT_HW_CACHE_MAX]
1106 [PERF_COUNT_HW_CACHE_OP_MAX]
1107 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1111 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1112 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
1115 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1116 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
1118 [ C(OP_PREFETCH) ] = {
1119 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1120 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1125 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1126 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
1129 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1130 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
1132 [ C(OP_PREFETCH) ] = {
1133 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1134 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1139 static __initconst const u64 nehalem_hw_cache_event_ids
1140 [PERF_COUNT_HW_CACHE_MAX]
1141 [PERF_COUNT_HW_CACHE_OP_MAX]
1142 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1146 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1147 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1150 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1151 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1153 [ C(OP_PREFETCH) ] = {
1154 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1155 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1160 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1161 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1164 [ C(RESULT_ACCESS) ] = -1,
1165 [ C(RESULT_MISS) ] = -1,
1167 [ C(OP_PREFETCH) ] = {
1168 [ C(RESULT_ACCESS) ] = 0x0,
1169 [ C(RESULT_MISS) ] = 0x0,
1174 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1175 [ C(RESULT_ACCESS) ] = 0x01b7,
1176 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1177 [ C(RESULT_MISS) ] = 0x01b7,
1180 * Use RFO, not WRITEBACK, because a write miss would typically occur
1184 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1185 [ C(RESULT_ACCESS) ] = 0x01b7,
1186 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1187 [ C(RESULT_MISS) ] = 0x01b7,
1189 [ C(OP_PREFETCH) ] = {
1190 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1191 [ C(RESULT_ACCESS) ] = 0x01b7,
1192 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1193 [ C(RESULT_MISS) ] = 0x01b7,
1198 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1199 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1202 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1203 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1205 [ C(OP_PREFETCH) ] = {
1206 [ C(RESULT_ACCESS) ] = 0x0,
1207 [ C(RESULT_MISS) ] = 0x0,
1212 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1213 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
1216 [ C(RESULT_ACCESS) ] = -1,
1217 [ C(RESULT_MISS) ] = -1,
1219 [ C(OP_PREFETCH) ] = {
1220 [ C(RESULT_ACCESS) ] = -1,
1221 [ C(RESULT_MISS) ] = -1,
1226 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1227 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1230 [ C(RESULT_ACCESS) ] = -1,
1231 [ C(RESULT_MISS) ] = -1,
1233 [ C(OP_PREFETCH) ] = {
1234 [ C(RESULT_ACCESS) ] = -1,
1235 [ C(RESULT_MISS) ] = -1,
1240 [ C(RESULT_ACCESS) ] = 0x01b7,
1241 [ C(RESULT_MISS) ] = 0x01b7,
1244 [ C(RESULT_ACCESS) ] = 0x01b7,
1245 [ C(RESULT_MISS) ] = 0x01b7,
1247 [ C(OP_PREFETCH) ] = {
1248 [ C(RESULT_ACCESS) ] = 0x01b7,
1249 [ C(RESULT_MISS) ] = 0x01b7,
1254 static __initconst const u64 core2_hw_cache_event_ids
1255 [PERF_COUNT_HW_CACHE_MAX]
1256 [PERF_COUNT_HW_CACHE_OP_MAX]
1257 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1261 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
1262 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
1265 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
1266 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
1268 [ C(OP_PREFETCH) ] = {
1269 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
1270 [ C(RESULT_MISS) ] = 0,
1275 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
1276 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
1279 [ C(RESULT_ACCESS) ] = -1,
1280 [ C(RESULT_MISS) ] = -1,
1282 [ C(OP_PREFETCH) ] = {
1283 [ C(RESULT_ACCESS) ] = 0,
1284 [ C(RESULT_MISS) ] = 0,
1289 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1290 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1293 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1294 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1296 [ C(OP_PREFETCH) ] = {
1297 [ C(RESULT_ACCESS) ] = 0,
1298 [ C(RESULT_MISS) ] = 0,
1303 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1304 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
1307 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1308 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
1310 [ C(OP_PREFETCH) ] = {
1311 [ C(RESULT_ACCESS) ] = 0,
1312 [ C(RESULT_MISS) ] = 0,
1317 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1318 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
1321 [ C(RESULT_ACCESS) ] = -1,
1322 [ C(RESULT_MISS) ] = -1,
1324 [ C(OP_PREFETCH) ] = {
1325 [ C(RESULT_ACCESS) ] = -1,
1326 [ C(RESULT_MISS) ] = -1,
1331 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1332 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1335 [ C(RESULT_ACCESS) ] = -1,
1336 [ C(RESULT_MISS) ] = -1,
1338 [ C(OP_PREFETCH) ] = {
1339 [ C(RESULT_ACCESS) ] = -1,
1340 [ C(RESULT_MISS) ] = -1,
1345 static __initconst const u64 atom_hw_cache_event_ids
1346 [PERF_COUNT_HW_CACHE_MAX]
1347 [PERF_COUNT_HW_CACHE_OP_MAX]
1348 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1352 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
1353 [ C(RESULT_MISS) ] = 0,
1356 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
1357 [ C(RESULT_MISS) ] = 0,
1359 [ C(OP_PREFETCH) ] = {
1360 [ C(RESULT_ACCESS) ] = 0x0,
1361 [ C(RESULT_MISS) ] = 0,
1366 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1367 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1370 [ C(RESULT_ACCESS) ] = -1,
1371 [ C(RESULT_MISS) ] = -1,
1373 [ C(OP_PREFETCH) ] = {
1374 [ C(RESULT_ACCESS) ] = 0,
1375 [ C(RESULT_MISS) ] = 0,
1380 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1381 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1384 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1385 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1387 [ C(OP_PREFETCH) ] = {
1388 [ C(RESULT_ACCESS) ] = 0,
1389 [ C(RESULT_MISS) ] = 0,
1394 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1395 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1398 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1399 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1401 [ C(OP_PREFETCH) ] = {
1402 [ C(RESULT_ACCESS) ] = 0,
1403 [ C(RESULT_MISS) ] = 0,
1408 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1409 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1412 [ C(RESULT_ACCESS) ] = -1,
1413 [ C(RESULT_MISS) ] = -1,
1415 [ C(OP_PREFETCH) ] = {
1416 [ C(RESULT_ACCESS) ] = -1,
1417 [ C(RESULT_MISS) ] = -1,
1422 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1423 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1426 [ C(RESULT_ACCESS) ] = -1,
1427 [ C(RESULT_MISS) ] = -1,
1429 [ C(OP_PREFETCH) ] = {
1430 [ C(RESULT_ACCESS) ] = -1,
1431 [ C(RESULT_MISS) ] = -1,
1436 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1437 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1438 /* no_alloc_cycles.not_delivered */
1439 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1440 "event=0xca,umask=0x50");
1441 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1442 /* uops_retired.all */
1443 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1444 "event=0xc2,umask=0x10");
1445 /* uops_retired.all */
1446 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1447 "event=0xc2,umask=0x10");
1449 static struct attribute *slm_events_attrs[] = {
1450 EVENT_PTR(td_total_slots_slm),
1451 EVENT_PTR(td_total_slots_scale_slm),
1452 EVENT_PTR(td_fetch_bubbles_slm),
1453 EVENT_PTR(td_fetch_bubbles_scale_slm),
1454 EVENT_PTR(td_slots_issued_slm),
1455 EVENT_PTR(td_slots_retired_slm),
1459 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1461 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1462 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1463 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1467 #define SLM_DMND_READ SNB_DMND_DATA_RD
1468 #define SLM_DMND_WRITE SNB_DMND_RFO
1469 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1471 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1472 #define SLM_LLC_ACCESS SNB_RESP_ANY
1473 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1475 static __initconst const u64 slm_hw_cache_extra_regs
1476 [PERF_COUNT_HW_CACHE_MAX]
1477 [PERF_COUNT_HW_CACHE_OP_MAX]
1478 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1482 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1483 [ C(RESULT_MISS) ] = 0,
1486 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1487 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1489 [ C(OP_PREFETCH) ] = {
1490 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1491 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1496 static __initconst const u64 slm_hw_cache_event_ids
1497 [PERF_COUNT_HW_CACHE_MAX]
1498 [PERF_COUNT_HW_CACHE_OP_MAX]
1499 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1503 [ C(RESULT_ACCESS) ] = 0,
1504 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
1507 [ C(RESULT_ACCESS) ] = 0,
1508 [ C(RESULT_MISS) ] = 0,
1510 [ C(OP_PREFETCH) ] = {
1511 [ C(RESULT_ACCESS) ] = 0,
1512 [ C(RESULT_MISS) ] = 0,
1517 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1518 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
1521 [ C(RESULT_ACCESS) ] = -1,
1522 [ C(RESULT_MISS) ] = -1,
1524 [ C(OP_PREFETCH) ] = {
1525 [ C(RESULT_ACCESS) ] = 0,
1526 [ C(RESULT_MISS) ] = 0,
1531 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1532 [ C(RESULT_ACCESS) ] = 0x01b7,
1533 [ C(RESULT_MISS) ] = 0,
1536 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1537 [ C(RESULT_ACCESS) ] = 0x01b7,
1538 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1539 [ C(RESULT_MISS) ] = 0x01b7,
1541 [ C(OP_PREFETCH) ] = {
1542 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1543 [ C(RESULT_ACCESS) ] = 0x01b7,
1544 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1545 [ C(RESULT_MISS) ] = 0x01b7,
1550 [ C(RESULT_ACCESS) ] = 0,
1551 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
1554 [ C(RESULT_ACCESS) ] = 0,
1555 [ C(RESULT_MISS) ] = 0,
1557 [ C(OP_PREFETCH) ] = {
1558 [ C(RESULT_ACCESS) ] = 0,
1559 [ C(RESULT_MISS) ] = 0,
1564 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1565 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1568 [ C(RESULT_ACCESS) ] = -1,
1569 [ C(RESULT_MISS) ] = -1,
1571 [ C(OP_PREFETCH) ] = {
1572 [ C(RESULT_ACCESS) ] = -1,
1573 [ C(RESULT_MISS) ] = -1,
1578 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1579 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1582 [ C(RESULT_ACCESS) ] = -1,
1583 [ C(RESULT_MISS) ] = -1,
1585 [ C(OP_PREFETCH) ] = {
1586 [ C(RESULT_ACCESS) ] = -1,
1587 [ C(RESULT_MISS) ] = -1,
1592 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1593 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1594 /* UOPS_NOT_DELIVERED.ANY */
1595 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1596 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1597 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1598 /* UOPS_RETIRED.ANY */
1599 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1600 /* UOPS_ISSUED.ANY */
1601 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1603 static struct attribute *glm_events_attrs[] = {
1604 EVENT_PTR(td_total_slots_glm),
1605 EVENT_PTR(td_total_slots_scale_glm),
1606 EVENT_PTR(td_fetch_bubbles_glm),
1607 EVENT_PTR(td_recovery_bubbles_glm),
1608 EVENT_PTR(td_slots_issued_glm),
1609 EVENT_PTR(td_slots_retired_glm),
1613 static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1614 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1615 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1616 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1620 #define GLM_DEMAND_DATA_RD BIT_ULL(0)
1621 #define GLM_DEMAND_RFO BIT_ULL(1)
1622 #define GLM_ANY_RESPONSE BIT_ULL(16)
1623 #define GLM_SNP_NONE_OR_MISS BIT_ULL(33)
1624 #define GLM_DEMAND_READ GLM_DEMAND_DATA_RD
1625 #define GLM_DEMAND_WRITE GLM_DEMAND_RFO
1626 #define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1627 #define GLM_LLC_ACCESS GLM_ANY_RESPONSE
1628 #define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1629 #define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM)
1631 static __initconst const u64 glm_hw_cache_event_ids
1632 [PERF_COUNT_HW_CACHE_MAX]
1633 [PERF_COUNT_HW_CACHE_OP_MAX]
1634 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1637 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1638 [C(RESULT_MISS)] = 0x0,
1641 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1642 [C(RESULT_MISS)] = 0x0,
1644 [C(OP_PREFETCH)] = {
1645 [C(RESULT_ACCESS)] = 0x0,
1646 [C(RESULT_MISS)] = 0x0,
1651 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1652 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1655 [C(RESULT_ACCESS)] = -1,
1656 [C(RESULT_MISS)] = -1,
1658 [C(OP_PREFETCH)] = {
1659 [C(RESULT_ACCESS)] = 0x0,
1660 [C(RESULT_MISS)] = 0x0,
1665 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1666 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1669 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1670 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1672 [C(OP_PREFETCH)] = {
1673 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1674 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1679 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1680 [C(RESULT_MISS)] = 0x0,
1683 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1684 [C(RESULT_MISS)] = 0x0,
1686 [C(OP_PREFETCH)] = {
1687 [C(RESULT_ACCESS)] = 0x0,
1688 [C(RESULT_MISS)] = 0x0,
1693 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1694 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1697 [C(RESULT_ACCESS)] = -1,
1698 [C(RESULT_MISS)] = -1,
1700 [C(OP_PREFETCH)] = {
1701 [C(RESULT_ACCESS)] = -1,
1702 [C(RESULT_MISS)] = -1,
1707 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1708 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1711 [C(RESULT_ACCESS)] = -1,
1712 [C(RESULT_MISS)] = -1,
1714 [C(OP_PREFETCH)] = {
1715 [C(RESULT_ACCESS)] = -1,
1716 [C(RESULT_MISS)] = -1,
1721 static __initconst const u64 glm_hw_cache_extra_regs
1722 [PERF_COUNT_HW_CACHE_MAX]
1723 [PERF_COUNT_HW_CACHE_OP_MAX]
1724 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1727 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1729 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1733 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1735 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1738 [C(OP_PREFETCH)] = {
1739 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH|
1741 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH|
1747 static __initconst const u64 glp_hw_cache_event_ids
1748 [PERF_COUNT_HW_CACHE_MAX]
1749 [PERF_COUNT_HW_CACHE_OP_MAX]
1750 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1753 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1754 [C(RESULT_MISS)] = 0x0,
1757 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1758 [C(RESULT_MISS)] = 0x0,
1760 [C(OP_PREFETCH)] = {
1761 [C(RESULT_ACCESS)] = 0x0,
1762 [C(RESULT_MISS)] = 0x0,
1767 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1768 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1771 [C(RESULT_ACCESS)] = -1,
1772 [C(RESULT_MISS)] = -1,
1774 [C(OP_PREFETCH)] = {
1775 [C(RESULT_ACCESS)] = 0x0,
1776 [C(RESULT_MISS)] = 0x0,
1781 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1782 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1785 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1786 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1788 [C(OP_PREFETCH)] = {
1789 [C(RESULT_ACCESS)] = 0x0,
1790 [C(RESULT_MISS)] = 0x0,
1795 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1796 [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
1799 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1800 [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
1802 [C(OP_PREFETCH)] = {
1803 [C(RESULT_ACCESS)] = 0x0,
1804 [C(RESULT_MISS)] = 0x0,
1809 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1810 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1813 [C(RESULT_ACCESS)] = -1,
1814 [C(RESULT_MISS)] = -1,
1816 [C(OP_PREFETCH)] = {
1817 [C(RESULT_ACCESS)] = -1,
1818 [C(RESULT_MISS)] = -1,
1823 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1824 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1827 [C(RESULT_ACCESS)] = -1,
1828 [C(RESULT_MISS)] = -1,
1830 [C(OP_PREFETCH)] = {
1831 [C(RESULT_ACCESS)] = -1,
1832 [C(RESULT_MISS)] = -1,
1837 static __initconst const u64 glp_hw_cache_extra_regs
1838 [PERF_COUNT_HW_CACHE_MAX]
1839 [PERF_COUNT_HW_CACHE_OP_MAX]
1840 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1843 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1845 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1849 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1851 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1854 [C(OP_PREFETCH)] = {
1855 [C(RESULT_ACCESS)] = 0x0,
1856 [C(RESULT_MISS)] = 0x0,
1861 #define TNT_LOCAL_DRAM BIT_ULL(26)
1862 #define TNT_DEMAND_READ GLM_DEMAND_DATA_RD
1863 #define TNT_DEMAND_WRITE GLM_DEMAND_RFO
1864 #define TNT_LLC_ACCESS GLM_ANY_RESPONSE
1865 #define TNT_SNP_ANY (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
1866 SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
1867 #define TNT_LLC_MISS (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
1869 static __initconst const u64 tnt_hw_cache_extra_regs
1870 [PERF_COUNT_HW_CACHE_MAX]
1871 [PERF_COUNT_HW_CACHE_OP_MAX]
1872 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1875 [C(RESULT_ACCESS)] = TNT_DEMAND_READ|
1877 [C(RESULT_MISS)] = TNT_DEMAND_READ|
1881 [C(RESULT_ACCESS)] = TNT_DEMAND_WRITE|
1883 [C(RESULT_MISS)] = TNT_DEMAND_WRITE|
1886 [C(OP_PREFETCH)] = {
1887 [C(RESULT_ACCESS)] = 0x0,
1888 [C(RESULT_MISS)] = 0x0,
1893 static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
1894 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1895 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0),
1896 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1),
1900 #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
1901 #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
1902 #define KNL_MCDRAM_LOCAL BIT_ULL(21)
1903 #define KNL_MCDRAM_FAR BIT_ULL(22)
1904 #define KNL_DDR_LOCAL BIT_ULL(23)
1905 #define KNL_DDR_FAR BIT_ULL(24)
1906 #define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
1907 KNL_DDR_LOCAL | KNL_DDR_FAR)
1908 #define KNL_L2_READ SLM_DMND_READ
1909 #define KNL_L2_WRITE SLM_DMND_WRITE
1910 #define KNL_L2_PREFETCH SLM_DMND_PREFETCH
1911 #define KNL_L2_ACCESS SLM_LLC_ACCESS
1912 #define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
1913 KNL_DRAM_ANY | SNB_SNP_ANY | \
1916 static __initconst const u64 knl_hw_cache_extra_regs
1917 [PERF_COUNT_HW_CACHE_MAX]
1918 [PERF_COUNT_HW_CACHE_OP_MAX]
1919 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1922 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
1923 [C(RESULT_MISS)] = 0,
1926 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
1927 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS,
1929 [C(OP_PREFETCH)] = {
1930 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
1931 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS,
1937 * Used from PMIs where the LBRs are already disabled.
1939 * This function could be called consecutively. It is required to remain in
1940 * disabled state if called consecutively.
1942 * During consecutive calls, the same disable value will be written to related
1943 * registers, so the PMU state remains unchanged.
1945 * intel_bts events don't coexist with intel PMU's BTS events because of
1946 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
1947 * disabled around intel PMU's event batching etc, only inside the PMI handler.
1949 * Avoid PEBS_ENABLE MSR access in PMIs.
1950 * The GLOBAL_CTRL has been disabled. All the counters do not count anymore.
1951 * It doesn't matter if the PEBS is enabled or not.
1952 * Usually, the PEBS status are not changed in PMIs. It's unnecessary to
1953 * access PEBS_ENABLE MSR in disable_all()/enable_all().
1954 * However, there are some cases which may change PEBS status, e.g. PMI
1955 * throttle. The PEBS_ENABLE should be updated where the status changes.
1957 static void __intel_pmu_disable_all(void)
1959 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1961 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1963 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1964 intel_pmu_disable_bts();
1967 static void intel_pmu_disable_all(void)
1969 __intel_pmu_disable_all();
1970 intel_pmu_pebs_disable_all();
1971 intel_pmu_lbr_disable_all();
1974 static void __intel_pmu_enable_all(int added, bool pmi)
1976 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1978 intel_pmu_lbr_enable_all(pmi);
1979 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
1980 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
1982 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1983 struct perf_event *event =
1984 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
1986 if (WARN_ON_ONCE(!event))
1989 intel_pmu_enable_bts(event->hw.config);
1993 static void intel_pmu_enable_all(int added)
1995 intel_pmu_pebs_enable_all();
1996 __intel_pmu_enable_all(added, false);
2001 * Intel Errata AAK100 (model 26)
2002 * Intel Errata AAP53 (model 30)
2003 * Intel Errata BD53 (model 44)
2005 * The official story:
2006 * These chips need to be 'reset' when adding counters by programming the
2007 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
2008 * in sequence on the same PMC or on different PMCs.
2010 * In practise it appears some of these events do in fact count, and
2011 * we need to program all 4 events.
2013 static void intel_pmu_nhm_workaround(void)
2015 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2016 static const unsigned long nhm_magic[4] = {
2022 struct perf_event *event;
2026 * The Errata requires below steps:
2027 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
2028 * 2) Configure 4 PERFEVTSELx with the magic events and clear
2029 * the corresponding PMCx;
2030 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
2031 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
2032 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
2036 * The real steps we choose are a little different from above.
2037 * A) To reduce MSR operations, we don't run step 1) as they
2038 * are already cleared before this function is called;
2039 * B) Call x86_perf_event_update to save PMCx before configuring
2040 * PERFEVTSELx with magic number;
2041 * C) With step 5), we do clear only when the PERFEVTSELx is
2042 * not used currently.
2043 * D) Call x86_perf_event_set_period to restore PMCx;
2046 /* We always operate 4 pairs of PERF Counters */
2047 for (i = 0; i < 4; i++) {
2048 event = cpuc->events[i];
2050 x86_perf_event_update(event);
2053 for (i = 0; i < 4; i++) {
2054 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
2055 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
2058 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
2059 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
2061 for (i = 0; i < 4; i++) {
2062 event = cpuc->events[i];
2065 x86_perf_event_set_period(event);
2066 __x86_pmu_enable_event(&event->hw,
2067 ARCH_PERFMON_EVENTSEL_ENABLE);
2069 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
2073 static void intel_pmu_nhm_enable_all(int added)
2076 intel_pmu_nhm_workaround();
2077 intel_pmu_enable_all(added);
2080 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2082 u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2084 if (cpuc->tfa_shadow != val) {
2085 cpuc->tfa_shadow = val;
2086 wrmsrl(MSR_TSX_FORCE_ABORT, val);
2090 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2093 * We're going to use PMC3, make sure TFA is set before we touch it.
2096 intel_set_tfa(cpuc, true);
2099 static void intel_tfa_pmu_enable_all(int added)
2101 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2104 * If we find PMC3 is no longer used when we enable the PMU, we can
2107 if (!test_bit(3, cpuc->active_mask))
2108 intel_set_tfa(cpuc, false);
2110 intel_pmu_enable_all(added);
2113 static void enable_counter_freeze(void)
2115 update_debugctlmsr(get_debugctlmsr() |
2116 DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
2119 static void disable_counter_freeze(void)
2121 update_debugctlmsr(get_debugctlmsr() &
2122 ~DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
2125 static inline u64 intel_pmu_get_status(void)
2129 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
2134 static inline void intel_pmu_ack_status(u64 ack)
2136 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2139 static inline bool event_is_checkpointed(struct perf_event *event)
2141 return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2144 static inline void intel_set_masks(struct perf_event *event, int idx)
2146 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2148 if (event->attr.exclude_host)
2149 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2150 if (event->attr.exclude_guest)
2151 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2152 if (event_is_checkpointed(event))
2153 __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2156 static inline void intel_clear_masks(struct perf_event *event, int idx)
2158 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2160 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2161 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2162 __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2165 static void intel_pmu_disable_fixed(struct perf_event *event)
2167 struct hw_perf_event *hwc = &event->hw;
2168 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
2171 mask = 0xfULL << (idx * 4);
2173 rdmsrl(hwc->config_base, ctrl_val);
2175 wrmsrl(hwc->config_base, ctrl_val);
2178 static void intel_pmu_disable_event(struct perf_event *event)
2180 struct hw_perf_event *hwc = &event->hw;
2183 if (idx < INTEL_PMC_IDX_FIXED) {
2184 intel_clear_masks(event, idx);
2185 x86_pmu_disable_event(event);
2186 } else if (idx < INTEL_PMC_IDX_FIXED_BTS) {
2187 intel_clear_masks(event, idx);
2188 intel_pmu_disable_fixed(event);
2189 } else if (idx == INTEL_PMC_IDX_FIXED_BTS) {
2190 intel_pmu_disable_bts();
2191 intel_pmu_drain_bts_buffer();
2192 } else if (idx == INTEL_PMC_IDX_FIXED_VLBR)
2193 intel_clear_masks(event, idx);
2196 * Needs to be called after x86_pmu_disable_event,
2197 * so we don't trigger the event without PEBS bit set.
2199 if (unlikely(event->attr.precise_ip))
2200 intel_pmu_pebs_disable(event);
2203 static void intel_pmu_del_event(struct perf_event *event)
2205 if (needs_branch_stack(event))
2206 intel_pmu_lbr_del(event);
2207 if (event->attr.precise_ip)
2208 intel_pmu_pebs_del(event);
2211 static void intel_pmu_read_event(struct perf_event *event)
2213 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2214 intel_pmu_auto_reload_read(event);
2216 x86_perf_event_update(event);
2219 static void intel_pmu_enable_fixed(struct perf_event *event)
2221 struct hw_perf_event *hwc = &event->hw;
2222 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
2223 u64 ctrl_val, mask, bits = 0;
2226 * Enable IRQ generation (0x8), if not PEBS,
2227 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2230 if (!event->attr.precise_ip)
2232 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2234 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2238 * ANY bit is supported in v3 and up
2240 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2244 mask = 0xfULL << (idx * 4);
2246 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
2247 bits |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
2248 mask |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
2251 rdmsrl(hwc->config_base, ctrl_val);
2254 wrmsrl(hwc->config_base, ctrl_val);
2257 static void intel_pmu_enable_event(struct perf_event *event)
2259 struct hw_perf_event *hwc = &event->hw;
2262 if (unlikely(event->attr.precise_ip))
2263 intel_pmu_pebs_enable(event);
2265 if (idx < INTEL_PMC_IDX_FIXED) {
2266 intel_set_masks(event, idx);
2267 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2268 } else if (idx < INTEL_PMC_IDX_FIXED_BTS) {
2269 intel_set_masks(event, idx);
2270 intel_pmu_enable_fixed(event);
2271 } else if (idx == INTEL_PMC_IDX_FIXED_BTS) {
2272 if (!__this_cpu_read(cpu_hw_events.enabled))
2274 intel_pmu_enable_bts(hwc->config);
2275 } else if (idx == INTEL_PMC_IDX_FIXED_VLBR)
2276 intel_set_masks(event, idx);
2279 static void intel_pmu_add_event(struct perf_event *event)
2281 if (event->attr.precise_ip)
2282 intel_pmu_pebs_add(event);
2283 if (needs_branch_stack(event))
2284 intel_pmu_lbr_add(event);
2288 * Save and restart an expired event. Called by NMI contexts,
2289 * so it has to be careful about preempting normal event ops:
2291 int intel_pmu_save_and_restart(struct perf_event *event)
2293 x86_perf_event_update(event);
2295 * For a checkpointed counter always reset back to 0. This
2296 * avoids a situation where the counter overflows, aborts the
2297 * transaction and is then set back to shortly before the
2298 * overflow, and overflows and aborts again.
2300 if (unlikely(event_is_checkpointed(event))) {
2301 /* No race with NMIs because the counter should not be armed */
2302 wrmsrl(event->hw.event_base, 0);
2303 local64_set(&event->hw.prev_count, 0);
2305 return x86_perf_event_set_period(event);
2308 static void intel_pmu_reset(void)
2310 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2311 unsigned long flags;
2314 if (!x86_pmu.num_counters)
2317 local_irq_save(flags);
2319 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2321 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
2322 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
2323 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
2325 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
2326 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2329 ds->bts_index = ds->bts_buffer_base;
2331 /* Ack all overflows and disable fixed counters */
2332 if (x86_pmu.version >= 2) {
2333 intel_pmu_ack_status(intel_pmu_get_status());
2334 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2337 /* Reset LBRs and LBR freezing */
2338 if (x86_pmu.lbr_nr) {
2339 update_debugctlmsr(get_debugctlmsr() &
2340 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
2343 local_irq_restore(flags);
2346 static int handle_pmi_common(struct pt_regs *regs, u64 status)
2348 struct perf_sample_data data;
2349 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2353 inc_irq_stat(apic_perf_irqs);
2356 * Ignore a range of extra bits in status that do not indicate
2357 * overflow by themselves.
2359 status &= ~(GLOBAL_STATUS_COND_CHG |
2360 GLOBAL_STATUS_ASIF |
2361 GLOBAL_STATUS_LBRS_FROZEN);
2365 * In case multiple PEBS events are sampled at the same time,
2366 * it is possible to have GLOBAL_STATUS bit 62 set indicating
2367 * PEBS buffer overflow and also seeing at most 3 PEBS counters
2368 * having their bits set in the status register. This is a sign
2369 * that there was at least one PEBS record pending at the time
2370 * of the PMU interrupt. PEBS counters must only be processed
2371 * via the drain_pebs() calls and not via the regular sample
2372 * processing loop coming after that the function, otherwise
2373 * phony regular samples may be generated in the sampling buffer
2374 * not marked with the EXACT tag. Another possibility is to have
2375 * one PEBS event and at least one non-PEBS event whic hoverflows
2376 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
2377 * not be set, yet the overflow status bit for the PEBS counter will
2380 * To avoid this problem, we systematically ignore the PEBS-enabled
2381 * counters from the GLOBAL_STATUS mask and we always process PEBS
2382 * events via drain_pebs().
2384 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
2385 status &= ~cpuc->pebs_enabled;
2387 status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
2390 * PEBS overflow sets bit 62 in the global status register
2392 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
2393 u64 pebs_enabled = cpuc->pebs_enabled;
2396 x86_pmu.drain_pebs(regs);
2397 status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
2400 * PMI throttle may be triggered, which stops the PEBS event.
2401 * Although cpuc->pebs_enabled is updated accordingly, the
2402 * MSR_IA32_PEBS_ENABLE is not updated. Because the
2403 * cpuc->enabled has been forced to 0 in PMI.
2404 * Update the MSR if pebs_enabled is changed.
2406 if (pebs_enabled != cpuc->pebs_enabled)
2407 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
2413 if (__test_and_clear_bit(55, (unsigned long *)&status)) {
2415 if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() &&
2416 perf_guest_cbs->handle_intel_pt_intr))
2417 perf_guest_cbs->handle_intel_pt_intr();
2419 intel_pt_interrupt();
2423 * Checkpointed counters can lead to 'spurious' PMIs because the
2424 * rollback caused by the PMI will have cleared the overflow status
2425 * bit. Therefore always force probe these counters.
2427 status |= cpuc->intel_cp_status;
2429 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2430 struct perf_event *event = cpuc->events[bit];
2434 if (!test_bit(bit, cpuc->active_mask))
2437 if (!intel_pmu_save_and_restart(event))
2440 perf_sample_data_init(&data, 0, event->hw.last_period);
2442 if (has_branch_stack(event))
2443 data.br_stack = &cpuc->lbr_stack;
2445 if (perf_event_overflow(event, &data, regs))
2446 x86_pmu_stop(event, 0);
2452 static bool disable_counter_freezing = true;
2453 static int __init intel_perf_counter_freezing_setup(char *s)
2457 if (kstrtobool(s, &res))
2460 disable_counter_freezing = !res;
2463 __setup("perf_v4_pmi=", intel_perf_counter_freezing_setup);
2466 * Simplified handler for Arch Perfmon v4:
2467 * - We rely on counter freezing/unfreezing to enable/disable the PMU.
2468 * This is done automatically on PMU ack.
2469 * - Ack the PMU only after the APIC.
2472 static int intel_pmu_handle_irq_v4(struct pt_regs *regs)
2474 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2478 int pmu_enabled = cpuc->enabled;
2481 /* PMU has been disabled because of counter freezing */
2483 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2485 intel_bts_disable_local();
2486 handled = intel_pmu_drain_bts_buffer();
2487 handled += intel_bts_interrupt();
2489 status = intel_pmu_get_status();
2493 intel_pmu_lbr_read();
2494 if (++loops > 100) {
2498 WARN(1, "perfevents: irq loop stuck!\n");
2499 perf_event_print_debug();
2507 handled += handle_pmi_common(regs, status);
2509 /* Ack the PMI in the APIC */
2510 apic_write(APIC_LVTPC, APIC_DM_NMI);
2513 * The counters start counting immediately while ack the status.
2514 * Make it as close as possible to IRET. This avoids bogus
2515 * freezing on Skylake CPUs.
2518 intel_pmu_ack_status(status);
2521 * CPU may issues two PMIs very close to each other.
2522 * When the PMI handler services the first one, the
2523 * GLOBAL_STATUS is already updated to reflect both.
2524 * When it IRETs, the second PMI is immediately
2525 * handled and it sees clear status. At the meantime,
2526 * there may be a third PMI, because the freezing bit
2527 * isn't set since the ack in first PMI handlers.
2528 * Double check if there is more work to be done.
2530 status = intel_pmu_get_status();
2536 intel_bts_enable_local();
2537 cpuc->enabled = pmu_enabled;
2542 * This handler is triggered by the local APIC, so the APIC IRQ handling
2545 static int intel_pmu_handle_irq(struct pt_regs *regs)
2547 struct cpu_hw_events *cpuc;
2553 cpuc = this_cpu_ptr(&cpu_hw_events);
2556 * Save the PMU state.
2557 * It needs to be restored when leaving the handler.
2559 pmu_enabled = cpuc->enabled;
2561 * No known reason to not always do late ACK,
2562 * but just in case do it opt-in.
2564 if (!x86_pmu.late_ack)
2565 apic_write(APIC_LVTPC, APIC_DM_NMI);
2566 intel_bts_disable_local();
2568 __intel_pmu_disable_all();
2569 handled = intel_pmu_drain_bts_buffer();
2570 handled += intel_bts_interrupt();
2571 status = intel_pmu_get_status();
2577 intel_pmu_lbr_read();
2578 intel_pmu_ack_status(status);
2579 if (++loops > 100) {
2583 WARN(1, "perfevents: irq loop stuck!\n");
2584 perf_event_print_debug();
2591 handled += handle_pmi_common(regs, status);
2594 * Repeat if there is more work to be done:
2596 status = intel_pmu_get_status();
2601 /* Only restore PMU state when it's active. See x86_pmu_disable(). */
2602 cpuc->enabled = pmu_enabled;
2604 __intel_pmu_enable_all(0, true);
2605 intel_bts_enable_local();
2608 * Only unmask the NMI after the overflow counters
2609 * have been reset. This avoids spurious NMIs on
2612 if (x86_pmu.late_ack)
2613 apic_write(APIC_LVTPC, APIC_DM_NMI);
2617 static struct event_constraint *
2618 intel_bts_constraints(struct perf_event *event)
2620 if (unlikely(intel_pmu_has_bts(event)))
2621 return &bts_constraint;
2627 * Note: matches a fake event, like Fixed2.
2629 static struct event_constraint *
2630 intel_vlbr_constraints(struct perf_event *event)
2632 struct event_constraint *c = &vlbr_constraint;
2634 if (unlikely(constraint_match(c, event->hw.config)))
2640 static int intel_alt_er(int idx, u64 config)
2644 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
2647 if (idx == EXTRA_REG_RSP_0)
2648 alt_idx = EXTRA_REG_RSP_1;
2650 if (idx == EXTRA_REG_RSP_1)
2651 alt_idx = EXTRA_REG_RSP_0;
2653 if (config & ~x86_pmu.extra_regs[alt_idx].valid_mask)
2659 static void intel_fixup_er(struct perf_event *event, int idx)
2661 event->hw.extra_reg.idx = idx;
2663 if (idx == EXTRA_REG_RSP_0) {
2664 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
2665 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
2666 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
2667 } else if (idx == EXTRA_REG_RSP_1) {
2668 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
2669 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
2670 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
2675 * manage allocation of shared extra msr for certain events
2678 * per-cpu: to be shared between the various events on a single PMU
2679 * per-core: per-cpu + shared by HT threads
2681 static struct event_constraint *
2682 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
2683 struct perf_event *event,
2684 struct hw_perf_event_extra *reg)
2686 struct event_constraint *c = &emptyconstraint;
2687 struct er_account *era;
2688 unsigned long flags;
2692 * reg->alloc can be set due to existing state, so for fake cpuc we
2693 * need to ignore this, otherwise we might fail to allocate proper fake
2694 * state for this extra reg constraint. Also see the comment below.
2696 if (reg->alloc && !cpuc->is_fake)
2697 return NULL; /* call x86_get_event_constraint() */
2700 era = &cpuc->shared_regs->regs[idx];
2702 * we use spin_lock_irqsave() to avoid lockdep issues when
2703 * passing a fake cpuc
2705 raw_spin_lock_irqsave(&era->lock, flags);
2707 if (!atomic_read(&era->ref) || era->config == reg->config) {
2710 * If its a fake cpuc -- as per validate_{group,event}() we
2711 * shouldn't touch event state and we can avoid doing so
2712 * since both will only call get_event_constraints() once
2713 * on each event, this avoids the need for reg->alloc.
2715 * Not doing the ER fixup will only result in era->reg being
2716 * wrong, but since we won't actually try and program hardware
2717 * this isn't a problem either.
2719 if (!cpuc->is_fake) {
2720 if (idx != reg->idx)
2721 intel_fixup_er(event, idx);
2724 * x86_schedule_events() can call get_event_constraints()
2725 * multiple times on events in the case of incremental
2726 * scheduling(). reg->alloc ensures we only do the ER
2732 /* lock in msr value */
2733 era->config = reg->config;
2734 era->reg = reg->reg;
2737 atomic_inc(&era->ref);
2740 * need to call x86_get_event_constraint()
2741 * to check if associated event has constraints
2745 idx = intel_alt_er(idx, reg->config);
2746 if (idx != reg->idx) {
2747 raw_spin_unlock_irqrestore(&era->lock, flags);
2751 raw_spin_unlock_irqrestore(&era->lock, flags);
2757 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
2758 struct hw_perf_event_extra *reg)
2760 struct er_account *era;
2763 * Only put constraint if extra reg was actually allocated. Also takes
2764 * care of event which do not use an extra shared reg.
2766 * Also, if this is a fake cpuc we shouldn't touch any event state
2767 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
2768 * either since it'll be thrown out.
2770 if (!reg->alloc || cpuc->is_fake)
2773 era = &cpuc->shared_regs->regs[reg->idx];
2775 /* one fewer user */
2776 atomic_dec(&era->ref);
2778 /* allocate again next time */
2782 static struct event_constraint *
2783 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
2784 struct perf_event *event)
2786 struct event_constraint *c = NULL, *d;
2787 struct hw_perf_event_extra *xreg, *breg;
2789 xreg = &event->hw.extra_reg;
2790 if (xreg->idx != EXTRA_REG_NONE) {
2791 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
2792 if (c == &emptyconstraint)
2795 breg = &event->hw.branch_reg;
2796 if (breg->idx != EXTRA_REG_NONE) {
2797 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
2798 if (d == &emptyconstraint) {
2799 __intel_shared_reg_put_constraints(cpuc, xreg);
2806 struct event_constraint *
2807 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2808 struct perf_event *event)
2810 struct event_constraint *c;
2812 if (x86_pmu.event_constraints) {
2813 for_each_event_constraint(c, x86_pmu.event_constraints) {
2814 if (constraint_match(c, event->hw.config)) {
2815 event->hw.flags |= c->flags;
2821 return &unconstrained;
2824 static struct event_constraint *
2825 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2826 struct perf_event *event)
2828 struct event_constraint *c;
2830 c = intel_vlbr_constraints(event);
2834 c = intel_bts_constraints(event);
2838 c = intel_shared_regs_constraints(cpuc, event);
2842 c = intel_pebs_constraints(event);
2846 return x86_get_event_constraints(cpuc, idx, event);
2850 intel_start_scheduling(struct cpu_hw_events *cpuc)
2852 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2853 struct intel_excl_states *xl;
2854 int tid = cpuc->excl_thread_id;
2857 * nothing needed if in group validation mode
2859 if (cpuc->is_fake || !is_ht_workaround_enabled())
2863 * no exclusion needed
2865 if (WARN_ON_ONCE(!excl_cntrs))
2868 xl = &excl_cntrs->states[tid];
2870 xl->sched_started = true;
2872 * lock shared state until we are done scheduling
2873 * in stop_event_scheduling()
2874 * makes scheduling appear as a transaction
2876 raw_spin_lock(&excl_cntrs->lock);
2879 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2881 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2882 struct event_constraint *c = cpuc->event_constraint[idx];
2883 struct intel_excl_states *xl;
2884 int tid = cpuc->excl_thread_id;
2886 if (cpuc->is_fake || !is_ht_workaround_enabled())
2889 if (WARN_ON_ONCE(!excl_cntrs))
2892 if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
2895 xl = &excl_cntrs->states[tid];
2897 lockdep_assert_held(&excl_cntrs->lock);
2899 if (c->flags & PERF_X86_EVENT_EXCL)
2900 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
2902 xl->state[cntr] = INTEL_EXCL_SHARED;
2906 intel_stop_scheduling(struct cpu_hw_events *cpuc)
2908 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2909 struct intel_excl_states *xl;
2910 int tid = cpuc->excl_thread_id;
2913 * nothing needed if in group validation mode
2915 if (cpuc->is_fake || !is_ht_workaround_enabled())
2918 * no exclusion needed
2920 if (WARN_ON_ONCE(!excl_cntrs))
2923 xl = &excl_cntrs->states[tid];
2925 xl->sched_started = false;
2927 * release shared state lock (acquired in intel_start_scheduling())
2929 raw_spin_unlock(&excl_cntrs->lock);
2932 static struct event_constraint *
2933 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
2935 WARN_ON_ONCE(!cpuc->constraint_list);
2937 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
2938 struct event_constraint *cx;
2941 * grab pre-allocated constraint entry
2943 cx = &cpuc->constraint_list[idx];
2946 * initialize dynamic constraint
2947 * with static constraint
2952 * mark constraint as dynamic
2954 cx->flags |= PERF_X86_EVENT_DYNAMIC;
2961 static struct event_constraint *
2962 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
2963 int idx, struct event_constraint *c)
2965 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2966 struct intel_excl_states *xlo;
2967 int tid = cpuc->excl_thread_id;
2971 * validating a group does not require
2972 * enforcing cross-thread exclusion
2974 if (cpuc->is_fake || !is_ht_workaround_enabled())
2978 * no exclusion needed
2980 if (WARN_ON_ONCE(!excl_cntrs))
2984 * because we modify the constraint, we need
2985 * to make a copy. Static constraints come
2986 * from static const tables.
2988 * only needed when constraint has not yet
2989 * been cloned (marked dynamic)
2991 c = dyn_constraint(cpuc, c, idx);
2994 * From here on, the constraint is dynamic.
2995 * Either it was just allocated above, or it
2996 * was allocated during a earlier invocation
3001 * state of sibling HT
3003 xlo = &excl_cntrs->states[tid ^ 1];
3006 * event requires exclusive counter access
3009 is_excl = c->flags & PERF_X86_EVENT_EXCL;
3010 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
3011 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
3012 if (!cpuc->n_excl++)
3013 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
3017 * Modify static constraint with current dynamic
3020 * EXCLUSIVE: sibling counter measuring exclusive event
3021 * SHARED : sibling counter measuring non-exclusive event
3022 * UNUSED : sibling counter unused
3025 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
3027 * exclusive event in sibling counter
3028 * our corresponding counter cannot be used
3029 * regardless of our event
3031 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
3032 __clear_bit(i, c->idxmsk);
3037 * if measuring an exclusive event, sibling
3038 * measuring non-exclusive, then counter cannot
3041 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
3042 __clear_bit(i, c->idxmsk);
3049 * if we return an empty mask, then switch
3050 * back to static empty constraint to avoid
3051 * the cost of freeing later on
3054 c = &emptyconstraint;
3061 static struct event_constraint *
3062 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3063 struct perf_event *event)
3065 struct event_constraint *c1, *c2;
3067 c1 = cpuc->event_constraint[idx];
3071 * - static constraint: no change across incremental scheduling calls
3072 * - dynamic constraint: handled by intel_get_excl_constraints()
3074 c2 = __intel_get_event_constraints(cpuc, idx, event);
3076 WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
3077 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
3078 c1->weight = c2->weight;
3082 if (cpuc->excl_cntrs)
3083 return intel_get_excl_constraints(cpuc, event, idx, c2);
3088 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
3089 struct perf_event *event)
3091 struct hw_perf_event *hwc = &event->hw;
3092 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3093 int tid = cpuc->excl_thread_id;
3094 struct intel_excl_states *xl;
3097 * nothing needed if in group validation mode
3102 if (WARN_ON_ONCE(!excl_cntrs))
3105 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
3106 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
3107 if (!--cpuc->n_excl)
3108 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
3112 * If event was actually assigned, then mark the counter state as
3115 if (hwc->idx >= 0) {
3116 xl = &excl_cntrs->states[tid];
3119 * put_constraint may be called from x86_schedule_events()
3120 * which already has the lock held so here make locking
3123 if (!xl->sched_started)
3124 raw_spin_lock(&excl_cntrs->lock);
3126 xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
3128 if (!xl->sched_started)
3129 raw_spin_unlock(&excl_cntrs->lock);
3134 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
3135 struct perf_event *event)
3137 struct hw_perf_event_extra *reg;
3139 reg = &event->hw.extra_reg;
3140 if (reg->idx != EXTRA_REG_NONE)
3141 __intel_shared_reg_put_constraints(cpuc, reg);
3143 reg = &event->hw.branch_reg;
3144 if (reg->idx != EXTRA_REG_NONE)
3145 __intel_shared_reg_put_constraints(cpuc, reg);
3148 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
3149 struct perf_event *event)
3151 intel_put_shared_regs_event_constraints(cpuc, event);
3154 * is PMU has exclusive counter restrictions, then
3155 * all events are subject to and must call the
3156 * put_excl_constraints() routine
3158 if (cpuc->excl_cntrs)
3159 intel_put_excl_constraints(cpuc, event);
3162 static void intel_pebs_aliases_core2(struct perf_event *event)
3164 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3166 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3167 * (0x003c) so that we can use it with PEBS.
3169 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3170 * PEBS capable. However we can use INST_RETIRED.ANY_P
3171 * (0x00c0), which is a PEBS capable event, to get the same
3174 * INST_RETIRED.ANY_P counts the number of cycles that retires
3175 * CNTMASK instructions. By setting CNTMASK to a value (16)
3176 * larger than the maximum number of instructions that can be
3177 * retired per cycle (4) and then inverting the condition, we
3178 * count all cycles that retire 16 or less instructions, which
3181 * Thereby we gain a PEBS capable cycle counter.
3183 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
3185 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3186 event->hw.config = alt_config;
3190 static void intel_pebs_aliases_snb(struct perf_event *event)
3192 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3194 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3195 * (0x003c) so that we can use it with PEBS.
3197 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3198 * PEBS capable. However we can use UOPS_RETIRED.ALL
3199 * (0x01c2), which is a PEBS capable event, to get the same
3202 * UOPS_RETIRED.ALL counts the number of cycles that retires
3203 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
3204 * larger than the maximum number of micro-ops that can be
3205 * retired per cycle (4) and then inverting the condition, we
3206 * count all cycles that retire 16 or less micro-ops, which
3209 * Thereby we gain a PEBS capable cycle counter.
3211 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
3213 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3214 event->hw.config = alt_config;
3218 static void intel_pebs_aliases_precdist(struct perf_event *event)
3220 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3222 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3223 * (0x003c) so that we can use it with PEBS.
3225 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3226 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
3227 * (0x01c0), which is a PEBS capable event, to get the same
3230 * The PREC_DIST event has special support to minimize sample
3231 * shadowing effects. One drawback is that it can be
3232 * only programmed on counter 1, but that seems like an
3233 * acceptable trade off.
3235 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
3237 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3238 event->hw.config = alt_config;
3242 static void intel_pebs_aliases_ivb(struct perf_event *event)
3244 if (event->attr.precise_ip < 3)
3245 return intel_pebs_aliases_snb(event);
3246 return intel_pebs_aliases_precdist(event);
3249 static void intel_pebs_aliases_skl(struct perf_event *event)
3251 if (event->attr.precise_ip < 3)
3252 return intel_pebs_aliases_core2(event);
3253 return intel_pebs_aliases_precdist(event);
3256 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3258 unsigned long flags = x86_pmu.large_pebs_flags;
3260 if (event->attr.use_clockid)
3261 flags &= ~PERF_SAMPLE_TIME;
3262 if (!event->attr.exclude_kernel)
3263 flags &= ~PERF_SAMPLE_REGS_USER;
3264 if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
3265 flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
3269 static int intel_pmu_bts_config(struct perf_event *event)
3271 struct perf_event_attr *attr = &event->attr;
3273 if (unlikely(intel_pmu_has_bts(event))) {
3274 /* BTS is not supported by this architecture. */
3275 if (!x86_pmu.bts_active)
3278 /* BTS is currently only allowed for user-mode. */
3279 if (!attr->exclude_kernel)
3282 /* BTS is not allowed for precise events. */
3283 if (attr->precise_ip)
3286 /* disallow bts if conflicting events are present */
3287 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3290 event->destroy = hw_perf_lbr_event_destroy;
3296 static int core_pmu_hw_config(struct perf_event *event)
3298 int ret = x86_pmu_hw_config(event);
3303 return intel_pmu_bts_config(event);
3306 static int intel_pmu_hw_config(struct perf_event *event)
3308 int ret = x86_pmu_hw_config(event);
3313 ret = intel_pmu_bts_config(event);
3317 if (event->attr.precise_ip) {
3318 if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
3319 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
3320 if (!(event->attr.sample_type &
3321 ~intel_pmu_large_pebs_flags(event)))
3322 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
3324 if (x86_pmu.pebs_aliases)
3325 x86_pmu.pebs_aliases(event);
3327 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3328 event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
3331 if (needs_branch_stack(event)) {
3332 ret = intel_pmu_setup_lbr_filter(event);
3337 * BTS is set up earlier in this path, so don't account twice
3339 if (!unlikely(intel_pmu_has_bts(event))) {
3340 /* disallow lbr if conflicting events are present */
3341 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3344 event->destroy = hw_perf_lbr_event_destroy;
3348 if (event->attr.aux_output) {
3349 if (!event->attr.precise_ip)
3352 event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT;
3355 if (event->attr.type != PERF_TYPE_RAW)
3358 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
3361 if (x86_pmu.version < 3)
3364 ret = perf_allow_cpu(&event->attr);
3368 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
3373 #ifdef CONFIG_RETPOLINE
3374 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr);
3375 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr);
3378 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
3380 #ifdef CONFIG_RETPOLINE
3381 if (x86_pmu.guest_get_msrs == intel_guest_get_msrs)
3382 return intel_guest_get_msrs(nr);
3383 else if (x86_pmu.guest_get_msrs == core_guest_get_msrs)
3384 return core_guest_get_msrs(nr);
3386 if (x86_pmu.guest_get_msrs)
3387 return x86_pmu.guest_get_msrs(nr);
3391 EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
3393 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
3395 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3396 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3398 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
3399 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
3400 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
3401 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
3402 arr[0].guest &= ~cpuc->pebs_enabled;
3404 arr[0].guest &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
3407 if (x86_pmu.pebs && x86_pmu.pebs_no_isolation) {
3409 * If PMU counter has PEBS enabled it is not enough to
3410 * disable counter on a guest entry since PEBS memory
3411 * write can overshoot guest entry and corrupt guest
3412 * memory. Disabling PEBS solves the problem.
3414 * Don't do this if the CPU already enforces it.
3416 arr[1].msr = MSR_IA32_PEBS_ENABLE;
3417 arr[1].host = cpuc->pebs_enabled;
3425 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
3427 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3428 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3431 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3432 struct perf_event *event = cpuc->events[idx];
3434 arr[idx].msr = x86_pmu_config_addr(idx);
3435 arr[idx].host = arr[idx].guest = 0;
3437 if (!test_bit(idx, cpuc->active_mask))
3440 arr[idx].host = arr[idx].guest =
3441 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
3443 if (event->attr.exclude_host)
3444 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3445 else if (event->attr.exclude_guest)
3446 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3449 *nr = x86_pmu.num_counters;
3453 static void core_pmu_enable_event(struct perf_event *event)
3455 if (!event->attr.exclude_host)
3456 x86_pmu_enable_event(event);
3459 static void core_pmu_enable_all(int added)
3461 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3464 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3465 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
3467 if (!test_bit(idx, cpuc->active_mask) ||
3468 cpuc->events[idx]->attr.exclude_host)
3471 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
3475 static int hsw_hw_config(struct perf_event *event)
3477 int ret = intel_pmu_hw_config(event);
3481 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
3483 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
3486 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
3487 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
3490 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
3491 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
3492 event->attr.precise_ip > 0))
3495 if (event_is_checkpointed(event)) {
3497 * Sampling of checkpointed events can cause situations where
3498 * the CPU constantly aborts because of a overflow, which is
3499 * then checkpointed back and ignored. Forbid checkpointing
3502 * But still allow a long sampling period, so that perf stat
3505 if (event->attr.sample_period > 0 &&
3506 event->attr.sample_period < 0x7fffffff)
3512 static struct event_constraint counter0_constraint =
3513 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
3515 static struct event_constraint counter2_constraint =
3516 EVENT_CONSTRAINT(0, 0x4, 0);
3518 static struct event_constraint fixed0_constraint =
3519 FIXED_EVENT_CONSTRAINT(0x00c0, 0);
3521 static struct event_constraint fixed0_counter0_constraint =
3522 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
3524 static struct event_constraint *
3525 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3526 struct perf_event *event)
3528 struct event_constraint *c;
3530 c = intel_get_event_constraints(cpuc, idx, event);
3532 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
3533 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
3534 if (c->idxmsk64 & (1U << 2))
3535 return &counter2_constraint;
3536 return &emptyconstraint;
3542 static struct event_constraint *
3543 icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3544 struct perf_event *event)
3547 * Fixed counter 0 has less skid.
3548 * Force instruction:ppp in Fixed counter 0
3550 if ((event->attr.precise_ip == 3) &&
3551 constraint_match(&fixed0_constraint, event->hw.config))
3552 return &fixed0_constraint;
3554 return hsw_get_event_constraints(cpuc, idx, event);
3557 static struct event_constraint *
3558 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3559 struct perf_event *event)
3561 struct event_constraint *c;
3563 /* :ppp means to do reduced skid PEBS which is PMC0 only. */
3564 if (event->attr.precise_ip == 3)
3565 return &counter0_constraint;
3567 c = intel_get_event_constraints(cpuc, idx, event);
3572 static struct event_constraint *
3573 tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3574 struct perf_event *event)
3576 struct event_constraint *c;
3579 * :ppp means to do reduced skid PEBS,
3580 * which is available on PMC0 and fixed counter 0.
3582 if (event->attr.precise_ip == 3) {
3583 /* Force instruction:ppp on PMC0 and Fixed counter 0 */
3584 if (constraint_match(&fixed0_constraint, event->hw.config))
3585 return &fixed0_counter0_constraint;
3587 return &counter0_constraint;
3590 c = intel_get_event_constraints(cpuc, idx, event);
3595 static bool allow_tsx_force_abort = true;
3597 static struct event_constraint *
3598 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3599 struct perf_event *event)
3601 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
3604 * Without TFA we must not use PMC3.
3606 if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
3607 c = dyn_constraint(cpuc, c, idx);
3608 c->idxmsk64 &= ~(1ULL << 3);
3618 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
3619 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
3620 * the two to enforce a minimum period of 128 (the smallest value that has bits
3621 * 0-5 cleared and >= 100).
3623 * Because of how the code in x86_perf_event_set_period() works, the truncation
3624 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
3625 * to make up for the 'lost' events due to carrying the 'error' in period_left.
3627 * Therefore the effective (average) period matches the requested period,
3628 * despite coarser hardware granularity.
3630 static u64 bdw_limit_period(struct perf_event *event, u64 left)
3632 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
3633 X86_CONFIG(.event=0xc0, .umask=0x01)) {
3641 static u64 nhm_limit_period(struct perf_event *event, u64 left)
3643 return max(left, 32ULL);
3646 PMU_FORMAT_ATTR(event, "config:0-7" );
3647 PMU_FORMAT_ATTR(umask, "config:8-15" );
3648 PMU_FORMAT_ATTR(edge, "config:18" );
3649 PMU_FORMAT_ATTR(pc, "config:19" );
3650 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
3651 PMU_FORMAT_ATTR(inv, "config:23" );
3652 PMU_FORMAT_ATTR(cmask, "config:24-31" );
3653 PMU_FORMAT_ATTR(in_tx, "config:32");
3654 PMU_FORMAT_ATTR(in_tx_cp, "config:33");
3656 static struct attribute *intel_arch_formats_attr[] = {
3657 &format_attr_event.attr,
3658 &format_attr_umask.attr,
3659 &format_attr_edge.attr,
3660 &format_attr_pc.attr,
3661 &format_attr_inv.attr,
3662 &format_attr_cmask.attr,
3666 ssize_t intel_event_sysfs_show(char *page, u64 config)
3668 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
3670 return x86_event_sysfs_show(page, config, event);
3673 static struct intel_shared_regs *allocate_shared_regs(int cpu)
3675 struct intel_shared_regs *regs;
3678 regs = kzalloc_node(sizeof(struct intel_shared_regs),
3679 GFP_KERNEL, cpu_to_node(cpu));
3682 * initialize the locks to keep lockdep happy
3684 for (i = 0; i < EXTRA_REG_MAX; i++)
3685 raw_spin_lock_init(®s->regs[i].lock);
3692 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
3694 struct intel_excl_cntrs *c;
3696 c = kzalloc_node(sizeof(struct intel_excl_cntrs),
3697 GFP_KERNEL, cpu_to_node(cpu));
3699 raw_spin_lock_init(&c->lock);
3706 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
3708 cpuc->pebs_record_size = x86_pmu.pebs_record_size;
3710 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
3711 cpuc->shared_regs = allocate_shared_regs(cpu);
3712 if (!cpuc->shared_regs)
3716 if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
3717 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
3719 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
3720 if (!cpuc->constraint_list)
3721 goto err_shared_regs;
3724 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3725 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
3726 if (!cpuc->excl_cntrs)
3727 goto err_constraint_list;
3729 cpuc->excl_thread_id = 0;
3734 err_constraint_list:
3735 kfree(cpuc->constraint_list);
3736 cpuc->constraint_list = NULL;
3739 kfree(cpuc->shared_regs);
3740 cpuc->shared_regs = NULL;
3746 static int intel_pmu_cpu_prepare(int cpu)
3748 return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
3751 static void flip_smm_bit(void *data)
3753 unsigned long set = *(unsigned long *)data;
3756 msr_set_bit(MSR_IA32_DEBUGCTLMSR,
3757 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
3759 msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
3760 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
3764 static void intel_pmu_cpu_starting(int cpu)
3766 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
3767 int core_id = topology_core_id(cpu);
3770 init_debug_store_on_cpu(cpu);
3772 * Deal with CPUs that don't clear their LBRs on power-up.
3774 intel_pmu_lbr_reset();
3776 cpuc->lbr_sel = NULL;
3778 if (x86_pmu.flags & PMU_FL_TFA) {
3779 WARN_ON_ONCE(cpuc->tfa_shadow);
3780 cpuc->tfa_shadow = ~0ULL;
3781 intel_set_tfa(cpuc, false);
3784 if (x86_pmu.version > 1)
3785 flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
3787 if (x86_pmu.counter_freezing)
3788 enable_counter_freeze();
3790 if (!cpuc->shared_regs)
3793 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
3794 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
3795 struct intel_shared_regs *pc;
3797 pc = per_cpu(cpu_hw_events, i).shared_regs;
3798 if (pc && pc->core_id == core_id) {
3799 cpuc->kfree_on_online[0] = cpuc->shared_regs;
3800 cpuc->shared_regs = pc;
3804 cpuc->shared_regs->core_id = core_id;
3805 cpuc->shared_regs->refcnt++;
3808 if (x86_pmu.lbr_sel_map)
3809 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
3811 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3812 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
3813 struct cpu_hw_events *sibling;
3814 struct intel_excl_cntrs *c;
3816 sibling = &per_cpu(cpu_hw_events, i);
3817 c = sibling->excl_cntrs;
3818 if (c && c->core_id == core_id) {
3819 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
3820 cpuc->excl_cntrs = c;
3821 if (!sibling->excl_thread_id)
3822 cpuc->excl_thread_id = 1;
3826 cpuc->excl_cntrs->core_id = core_id;
3827 cpuc->excl_cntrs->refcnt++;
3831 static void free_excl_cntrs(struct cpu_hw_events *cpuc)
3833 struct intel_excl_cntrs *c;
3835 c = cpuc->excl_cntrs;
3837 if (c->core_id == -1 || --c->refcnt == 0)
3839 cpuc->excl_cntrs = NULL;
3842 kfree(cpuc->constraint_list);
3843 cpuc->constraint_list = NULL;
3846 static void intel_pmu_cpu_dying(int cpu)
3848 fini_debug_store_on_cpu(cpu);
3850 if (x86_pmu.counter_freezing)
3851 disable_counter_freeze();
3854 void intel_cpuc_finish(struct cpu_hw_events *cpuc)
3856 struct intel_shared_regs *pc;
3858 pc = cpuc->shared_regs;
3860 if (pc->core_id == -1 || --pc->refcnt == 0)
3862 cpuc->shared_regs = NULL;
3865 free_excl_cntrs(cpuc);
3868 static void intel_pmu_cpu_dead(int cpu)
3870 intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
3873 static void intel_pmu_sched_task(struct perf_event_context *ctx,
3876 intel_pmu_pebs_sched_task(ctx, sched_in);
3877 intel_pmu_lbr_sched_task(ctx, sched_in);
3880 static void intel_pmu_swap_task_ctx(struct perf_event_context *prev,
3881 struct perf_event_context *next)
3883 intel_pmu_lbr_swap_task_ctx(prev, next);
3886 static int intel_pmu_check_period(struct perf_event *event, u64 value)
3888 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
3891 static int intel_pmu_aux_output_match(struct perf_event *event)
3893 if (!x86_pmu.intel_cap.pebs_output_pt_available)
3896 return is_intel_pt_event(event);
3899 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
3901 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
3903 PMU_FORMAT_ATTR(frontend, "config1:0-23");
3905 static struct attribute *intel_arch3_formats_attr[] = {
3906 &format_attr_event.attr,
3907 &format_attr_umask.attr,
3908 &format_attr_edge.attr,
3909 &format_attr_pc.attr,
3910 &format_attr_any.attr,
3911 &format_attr_inv.attr,
3912 &format_attr_cmask.attr,
3916 static struct attribute *hsw_format_attr[] = {
3917 &format_attr_in_tx.attr,
3918 &format_attr_in_tx_cp.attr,
3919 &format_attr_offcore_rsp.attr,
3920 &format_attr_ldlat.attr,
3924 static struct attribute *nhm_format_attr[] = {
3925 &format_attr_offcore_rsp.attr,
3926 &format_attr_ldlat.attr,
3930 static struct attribute *slm_format_attr[] = {
3931 &format_attr_offcore_rsp.attr,
3935 static struct attribute *skl_format_attr[] = {
3936 &format_attr_frontend.attr,
3940 static __initconst const struct x86_pmu core_pmu = {
3942 .handle_irq = x86_pmu_handle_irq,
3943 .disable_all = x86_pmu_disable_all,
3944 .enable_all = core_pmu_enable_all,
3945 .enable = core_pmu_enable_event,
3946 .disable = x86_pmu_disable_event,
3947 .hw_config = core_pmu_hw_config,
3948 .schedule_events = x86_schedule_events,
3949 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
3950 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
3951 .event_map = intel_pmu_event_map,
3952 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
3954 .large_pebs_flags = LARGE_PEBS_FLAGS,
3957 * Intel PMCs cannot be accessed sanely above 32-bit width,
3958 * so we install an artificial 1<<31 period regardless of
3959 * the generic event period:
3961 .max_period = (1ULL<<31) - 1,
3962 .get_event_constraints = intel_get_event_constraints,
3963 .put_event_constraints = intel_put_event_constraints,
3964 .event_constraints = intel_core_event_constraints,
3965 .guest_get_msrs = core_guest_get_msrs,
3966 .format_attrs = intel_arch_formats_attr,
3967 .events_sysfs_show = intel_event_sysfs_show,
3970 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
3971 * together with PMU version 1 and thus be using core_pmu with
3972 * shared_regs. We need following callbacks here to allocate
3975 .cpu_prepare = intel_pmu_cpu_prepare,
3976 .cpu_starting = intel_pmu_cpu_starting,
3977 .cpu_dying = intel_pmu_cpu_dying,
3978 .cpu_dead = intel_pmu_cpu_dead,
3980 .check_period = intel_pmu_check_period,
3982 .lbr_reset = intel_pmu_lbr_reset_64,
3983 .lbr_read = intel_pmu_lbr_read_64,
3984 .lbr_save = intel_pmu_lbr_save,
3985 .lbr_restore = intel_pmu_lbr_restore,
3988 static __initconst const struct x86_pmu intel_pmu = {
3990 .handle_irq = intel_pmu_handle_irq,
3991 .disable_all = intel_pmu_disable_all,
3992 .enable_all = intel_pmu_enable_all,
3993 .enable = intel_pmu_enable_event,
3994 .disable = intel_pmu_disable_event,
3995 .add = intel_pmu_add_event,
3996 .del = intel_pmu_del_event,
3997 .read = intel_pmu_read_event,
3998 .hw_config = intel_pmu_hw_config,
3999 .schedule_events = x86_schedule_events,
4000 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
4001 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
4002 .event_map = intel_pmu_event_map,
4003 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
4005 .large_pebs_flags = LARGE_PEBS_FLAGS,
4007 * Intel PMCs cannot be accessed sanely above 32 bit width,
4008 * so we install an artificial 1<<31 period regardless of
4009 * the generic event period:
4011 .max_period = (1ULL << 31) - 1,
4012 .get_event_constraints = intel_get_event_constraints,
4013 .put_event_constraints = intel_put_event_constraints,
4014 .pebs_aliases = intel_pebs_aliases_core2,
4016 .format_attrs = intel_arch3_formats_attr,
4017 .events_sysfs_show = intel_event_sysfs_show,
4019 .cpu_prepare = intel_pmu_cpu_prepare,
4020 .cpu_starting = intel_pmu_cpu_starting,
4021 .cpu_dying = intel_pmu_cpu_dying,
4022 .cpu_dead = intel_pmu_cpu_dead,
4024 .guest_get_msrs = intel_guest_get_msrs,
4025 .sched_task = intel_pmu_sched_task,
4026 .swap_task_ctx = intel_pmu_swap_task_ctx,
4028 .check_period = intel_pmu_check_period,
4030 .aux_output_match = intel_pmu_aux_output_match,
4032 .lbr_reset = intel_pmu_lbr_reset_64,
4033 .lbr_read = intel_pmu_lbr_read_64,
4034 .lbr_save = intel_pmu_lbr_save,
4035 .lbr_restore = intel_pmu_lbr_restore,
4038 static __init void intel_clovertown_quirk(void)
4041 * PEBS is unreliable due to:
4043 * AJ67 - PEBS may experience CPL leaks
4044 * AJ68 - PEBS PMI may be delayed by one event
4045 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
4046 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
4048 * AJ67 could be worked around by restricting the OS/USR flags.
4049 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
4051 * AJ106 could possibly be worked around by not allowing LBR
4052 * usage from PEBS, including the fixup.
4053 * AJ68 could possibly be worked around by always programming
4054 * a pebs_event_reset[0] value and coping with the lost events.
4056 * But taken together it might just make sense to not enable PEBS on
4059 pr_warn("PEBS disabled due to CPU errata\n");
4061 x86_pmu.pebs_constraints = NULL;
4064 static const struct x86_cpu_desc isolation_ucodes[] = {
4065 INTEL_CPU_DESC(INTEL_FAM6_HASWELL, 3, 0x0000001f),
4066 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_L, 1, 0x0000001e),
4067 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_G, 1, 0x00000015),
4068 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037),
4069 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a),
4070 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL, 4, 0x00000023),
4071 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_G, 1, 0x00000014),
4072 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 2, 0x00000010),
4073 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009),
4074 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009),
4075 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002),
4076 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014),
4077 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021),
4078 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000),
4079 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c),
4080 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c),
4081 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e),
4082 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 9, 0x0000004e),
4083 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 10, 0x0000004e),
4084 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 11, 0x0000004e),
4085 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 12, 0x0000004e),
4086 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 10, 0x0000004e),
4087 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 11, 0x0000004e),
4088 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 12, 0x0000004e),
4089 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 13, 0x0000004e),
4093 static void intel_check_pebs_isolation(void)
4095 x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes);
4098 static __init void intel_pebs_isolation_quirk(void)
4100 WARN_ON_ONCE(x86_pmu.check_microcode);
4101 x86_pmu.check_microcode = intel_check_pebs_isolation;
4102 intel_check_pebs_isolation();
4105 static const struct x86_cpu_desc pebs_ucodes[] = {
4106 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE, 7, 0x00000028),
4107 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 6, 0x00000618),
4108 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 7, 0x0000070c),
4112 static bool intel_snb_pebs_broken(void)
4114 return !x86_cpu_has_min_microcode_rev(pebs_ucodes);
4117 static void intel_snb_check_microcode(void)
4119 if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
4123 * Serialized by the microcode lock..
4125 if (x86_pmu.pebs_broken) {
4126 pr_info("PEBS enabled due to microcode update\n");
4127 x86_pmu.pebs_broken = 0;
4129 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
4130 x86_pmu.pebs_broken = 1;
4134 static bool is_lbr_from(unsigned long msr)
4136 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
4138 return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
4142 * Under certain circumstances, access certain MSR may cause #GP.
4143 * The function tests if the input MSR can be safely accessed.
4145 static bool check_msr(unsigned long msr, u64 mask)
4147 u64 val_old, val_new, val_tmp;
4150 * Disable the check for real HW, so we don't
4151 * mess with potentionaly enabled registers:
4153 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
4157 * Read the current value, change it and read it back to see if it
4158 * matches, this is needed to detect certain hardware emulators
4159 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
4161 if (rdmsrl_safe(msr, &val_old))
4165 * Only change the bits which can be updated by wrmsrl.
4167 val_tmp = val_old ^ mask;
4169 if (is_lbr_from(msr))
4170 val_tmp = lbr_from_signext_quirk_wr(val_tmp);
4172 if (wrmsrl_safe(msr, val_tmp) ||
4173 rdmsrl_safe(msr, &val_new))
4177 * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
4178 * should equal rdmsrl()'s even with the quirk.
4180 if (val_new != val_tmp)
4183 if (is_lbr_from(msr))
4184 val_old = lbr_from_signext_quirk_wr(val_old);
4186 /* Here it's sure that the MSR can be safely accessed.
4187 * Restore the old value and return.
4189 wrmsrl(msr, val_old);
4194 static __init void intel_sandybridge_quirk(void)
4196 x86_pmu.check_microcode = intel_snb_check_microcode;
4198 intel_snb_check_microcode();
4202 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
4203 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
4204 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
4205 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
4206 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
4207 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
4208 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
4209 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
4212 static __init void intel_arch_events_quirk(void)
4216 /* disable event that reported as not presend by cpuid */
4217 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
4218 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
4219 pr_warn("CPUID marked event: \'%s\' unavailable\n",
4220 intel_arch_events_map[bit].name);
4224 static __init void intel_nehalem_quirk(void)
4226 union cpuid10_ebx ebx;
4228 ebx.full = x86_pmu.events_maskl;
4229 if (ebx.split.no_branch_misses_retired) {
4231 * Erratum AAJ80 detected, we work it around by using
4232 * the BR_MISP_EXEC.ANY event. This will over-count
4233 * branch-misses, but it's still much better than the
4234 * architectural event which is often completely bogus:
4236 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
4237 ebx.split.no_branch_misses_retired = 0;
4238 x86_pmu.events_maskl = ebx.full;
4239 pr_info("CPU erratum AAJ80 worked around\n");
4243 static const struct x86_cpu_desc counter_freezing_ucodes[] = {
4244 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 2, 0x0000000e),
4245 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 9, 0x0000002e),
4246 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 10, 0x00000008),
4247 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_D, 1, 0x00000028),
4248 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 1, 0x00000028),
4249 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 8, 0x00000006),
4253 static bool intel_counter_freezing_broken(void)
4255 return !x86_cpu_has_min_microcode_rev(counter_freezing_ucodes);
4258 static __init void intel_counter_freezing_quirk(void)
4260 /* Check if it's already disabled */
4261 if (disable_counter_freezing)
4265 * If the system starts with the wrong ucode, leave the
4266 * counter-freezing feature permanently disabled.
4268 if (intel_counter_freezing_broken()) {
4269 pr_info("PMU counter freezing disabled due to CPU errata,"
4270 "please upgrade microcode\n");
4271 x86_pmu.counter_freezing = false;
4272 x86_pmu.handle_irq = intel_pmu_handle_irq;
4277 * enable software workaround for errata:
4282 * Only needed when HT is enabled. However detecting
4283 * if HT is enabled is difficult (model specific). So instead,
4284 * we enable the workaround in the early boot, and verify if
4285 * it is needed in a later initcall phase once we have valid
4286 * topology information to check if HT is actually enabled
4288 static __init void intel_ht_bug(void)
4290 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
4292 x86_pmu.start_scheduling = intel_start_scheduling;
4293 x86_pmu.commit_scheduling = intel_commit_scheduling;
4294 x86_pmu.stop_scheduling = intel_stop_scheduling;
4297 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
4298 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
4300 /* Haswell special events */
4301 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
4302 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
4303 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
4304 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
4305 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
4306 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
4307 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
4308 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
4309 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
4310 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
4311 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
4312 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
4314 static struct attribute *hsw_events_attrs[] = {
4315 EVENT_PTR(td_slots_issued),
4316 EVENT_PTR(td_slots_retired),
4317 EVENT_PTR(td_fetch_bubbles),
4318 EVENT_PTR(td_total_slots),
4319 EVENT_PTR(td_total_slots_scale),
4320 EVENT_PTR(td_recovery_bubbles),
4321 EVENT_PTR(td_recovery_bubbles_scale),
4325 static struct attribute *hsw_mem_events_attrs[] = {
4326 EVENT_PTR(mem_ld_hsw),
4327 EVENT_PTR(mem_st_hsw),
4331 static struct attribute *hsw_tsx_events_attrs[] = {
4332 EVENT_PTR(tx_start),
4333 EVENT_PTR(tx_commit),
4334 EVENT_PTR(tx_abort),
4335 EVENT_PTR(tx_capacity),
4336 EVENT_PTR(tx_conflict),
4337 EVENT_PTR(el_start),
4338 EVENT_PTR(el_commit),
4339 EVENT_PTR(el_abort),
4340 EVENT_PTR(el_capacity),
4341 EVENT_PTR(el_conflict),
4342 EVENT_PTR(cycles_t),
4343 EVENT_PTR(cycles_ct),
4347 EVENT_ATTR_STR(tx-capacity-read, tx_capacity_read, "event=0x54,umask=0x80");
4348 EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2");
4349 EVENT_ATTR_STR(el-capacity-read, el_capacity_read, "event=0x54,umask=0x80");
4350 EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2");
4352 static struct attribute *icl_events_attrs[] = {
4353 EVENT_PTR(mem_ld_hsw),
4354 EVENT_PTR(mem_st_hsw),
4358 static struct attribute *icl_tsx_events_attrs[] = {
4359 EVENT_PTR(tx_start),
4360 EVENT_PTR(tx_abort),
4361 EVENT_PTR(tx_commit),
4362 EVENT_PTR(tx_capacity_read),
4363 EVENT_PTR(tx_capacity_write),
4364 EVENT_PTR(tx_conflict),
4365 EVENT_PTR(el_start),
4366 EVENT_PTR(el_abort),
4367 EVENT_PTR(el_commit),
4368 EVENT_PTR(el_capacity_read),
4369 EVENT_PTR(el_capacity_write),
4370 EVENT_PTR(el_conflict),
4371 EVENT_PTR(cycles_t),
4372 EVENT_PTR(cycles_ct),
4376 static ssize_t freeze_on_smi_show(struct device *cdev,
4377 struct device_attribute *attr,
4380 return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
4383 static DEFINE_MUTEX(freeze_on_smi_mutex);
4385 static ssize_t freeze_on_smi_store(struct device *cdev,
4386 struct device_attribute *attr,
4387 const char *buf, size_t count)
4392 ret = kstrtoul(buf, 0, &val);
4399 mutex_lock(&freeze_on_smi_mutex);
4401 if (x86_pmu.attr_freeze_on_smi == val)
4404 x86_pmu.attr_freeze_on_smi = val;
4407 on_each_cpu(flip_smm_bit, &val, 1);
4410 mutex_unlock(&freeze_on_smi_mutex);
4415 static void update_tfa_sched(void *ignored)
4417 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4420 * check if PMC3 is used
4421 * and if so force schedule out for all event types all contexts
4423 if (test_bit(3, cpuc->active_mask))
4424 perf_pmu_resched(x86_get_pmu());
4427 static ssize_t show_sysctl_tfa(struct device *cdev,
4428 struct device_attribute *attr,
4431 return snprintf(buf, 40, "%d\n", allow_tsx_force_abort);
4434 static ssize_t set_sysctl_tfa(struct device *cdev,
4435 struct device_attribute *attr,
4436 const char *buf, size_t count)
4441 ret = kstrtobool(buf, &val);
4446 if (val == allow_tsx_force_abort)
4449 allow_tsx_force_abort = val;
4452 on_each_cpu(update_tfa_sched, NULL, 1);
4459 static DEVICE_ATTR_RW(freeze_on_smi);
4461 static ssize_t branches_show(struct device *cdev,
4462 struct device_attribute *attr,
4465 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
4468 static DEVICE_ATTR_RO(branches);
4470 static struct attribute *lbr_attrs[] = {
4471 &dev_attr_branches.attr,
4475 static char pmu_name_str[30];
4477 static ssize_t pmu_name_show(struct device *cdev,
4478 struct device_attribute *attr,
4481 return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str);
4484 static DEVICE_ATTR_RO(pmu_name);
4486 static struct attribute *intel_pmu_caps_attrs[] = {
4487 &dev_attr_pmu_name.attr,
4491 static DEVICE_ATTR(allow_tsx_force_abort, 0644,
4495 static struct attribute *intel_pmu_attrs[] = {
4496 &dev_attr_freeze_on_smi.attr,
4497 &dev_attr_allow_tsx_force_abort.attr,
4502 tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i)
4504 return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0;
4508 pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
4510 return x86_pmu.pebs ? attr->mode : 0;
4514 lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
4516 return x86_pmu.lbr_nr ? attr->mode : 0;
4520 exra_is_visible(struct kobject *kobj, struct attribute *attr, int i)
4522 return x86_pmu.version >= 2 ? attr->mode : 0;
4526 default_is_visible(struct kobject *kobj, struct attribute *attr, int i)
4528 if (attr == &dev_attr_allow_tsx_force_abort.attr)
4529 return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0;
4534 static struct attribute_group group_events_td = {
4538 static struct attribute_group group_events_mem = {
4540 .is_visible = pebs_is_visible,
4543 static struct attribute_group group_events_tsx = {
4545 .is_visible = tsx_is_visible,
4548 static struct attribute_group group_caps_gen = {
4550 .attrs = intel_pmu_caps_attrs,
4553 static struct attribute_group group_caps_lbr = {
4556 .is_visible = lbr_is_visible,
4559 static struct attribute_group group_format_extra = {
4561 .is_visible = exra_is_visible,
4564 static struct attribute_group group_format_extra_skl = {
4566 .is_visible = exra_is_visible,
4569 static struct attribute_group group_default = {
4570 .attrs = intel_pmu_attrs,
4571 .is_visible = default_is_visible,
4574 static const struct attribute_group *attr_update[] = {
4580 &group_format_extra,
4581 &group_format_extra_skl,
4586 static struct attribute *empty_attrs;
4588 __init int intel_pmu_init(void)
4590 struct attribute **extra_skl_attr = &empty_attrs;
4591 struct attribute **extra_attr = &empty_attrs;
4592 struct attribute **td_attr = &empty_attrs;
4593 struct attribute **mem_attr = &empty_attrs;
4594 struct attribute **tsx_attr = &empty_attrs;
4595 union cpuid10_edx edx;
4596 union cpuid10_eax eax;
4597 union cpuid10_ebx ebx;
4598 struct event_constraint *c;
4599 unsigned int unused;
4600 struct extra_reg *er;
4605 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
4606 switch (boot_cpu_data.x86) {
4608 return p6_pmu_init();
4610 return knc_pmu_init();
4612 return p4_pmu_init();
4618 * Check whether the Architectural PerfMon supports
4619 * Branch Misses Retired hw_event or not.
4621 cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
4622 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
4625 version = eax.split.version_id;
4629 x86_pmu = intel_pmu;
4631 x86_pmu.version = version;
4632 x86_pmu.num_counters = eax.split.num_counters;
4633 x86_pmu.cntval_bits = eax.split.bit_width;
4634 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
4636 x86_pmu.events_maskl = ebx.full;
4637 x86_pmu.events_mask_len = eax.split.mask_length;
4639 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
4642 * Quirk: v2 perfmon does not report fixed-purpose events, so
4643 * assume at least 3 events, when not running in a hypervisor:
4646 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
4648 x86_pmu.num_counters_fixed =
4649 max((int)edx.split.num_counters_fixed, assume);
4653 x86_pmu.counter_freezing = !disable_counter_freezing;
4655 if (boot_cpu_has(X86_FEATURE_PDCM)) {
4658 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
4659 x86_pmu.intel_cap.capabilities = capabilities;
4662 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) {
4663 x86_pmu.lbr_reset = intel_pmu_lbr_reset_32;
4664 x86_pmu.lbr_read = intel_pmu_lbr_read_32;
4667 if (boot_cpu_has(X86_FEATURE_ARCH_LBR))
4668 intel_pmu_arch_lbr_init();
4672 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
4675 * Install the hw-cache-events table:
4677 switch (boot_cpu_data.x86_model) {
4678 case INTEL_FAM6_CORE_YONAH:
4679 pr_cont("Core events, ");
4683 case INTEL_FAM6_CORE2_MEROM:
4684 x86_add_quirk(intel_clovertown_quirk);
4687 case INTEL_FAM6_CORE2_MEROM_L:
4688 case INTEL_FAM6_CORE2_PENRYN:
4689 case INTEL_FAM6_CORE2_DUNNINGTON:
4690 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
4691 sizeof(hw_cache_event_ids));
4693 intel_pmu_lbr_init_core();
4695 x86_pmu.event_constraints = intel_core2_event_constraints;
4696 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
4697 pr_cont("Core2 events, ");
4701 case INTEL_FAM6_NEHALEM:
4702 case INTEL_FAM6_NEHALEM_EP:
4703 case INTEL_FAM6_NEHALEM_EX:
4704 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
4705 sizeof(hw_cache_event_ids));
4706 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
4707 sizeof(hw_cache_extra_regs));
4709 intel_pmu_lbr_init_nhm();
4711 x86_pmu.event_constraints = intel_nehalem_event_constraints;
4712 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
4713 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4714 x86_pmu.extra_regs = intel_nehalem_extra_regs;
4715 x86_pmu.limit_period = nhm_limit_period;
4717 mem_attr = nhm_mem_events_attrs;
4719 /* UOPS_ISSUED.STALLED_CYCLES */
4720 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4721 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4722 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
4723 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4724 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
4726 intel_pmu_pebs_data_source_nhm();
4727 x86_add_quirk(intel_nehalem_quirk);
4728 x86_pmu.pebs_no_tlb = 1;
4729 extra_attr = nhm_format_attr;
4731 pr_cont("Nehalem events, ");
4735 case INTEL_FAM6_ATOM_BONNELL:
4736 case INTEL_FAM6_ATOM_BONNELL_MID:
4737 case INTEL_FAM6_ATOM_SALTWELL:
4738 case INTEL_FAM6_ATOM_SALTWELL_MID:
4739 case INTEL_FAM6_ATOM_SALTWELL_TABLET:
4740 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
4741 sizeof(hw_cache_event_ids));
4743 intel_pmu_lbr_init_atom();
4745 x86_pmu.event_constraints = intel_gen_event_constraints;
4746 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
4747 x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
4748 pr_cont("Atom events, ");
4752 case INTEL_FAM6_ATOM_SILVERMONT:
4753 case INTEL_FAM6_ATOM_SILVERMONT_D:
4754 case INTEL_FAM6_ATOM_SILVERMONT_MID:
4755 case INTEL_FAM6_ATOM_AIRMONT:
4756 case INTEL_FAM6_ATOM_AIRMONT_MID:
4757 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
4758 sizeof(hw_cache_event_ids));
4759 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
4760 sizeof(hw_cache_extra_regs));
4762 intel_pmu_lbr_init_slm();
4764 x86_pmu.event_constraints = intel_slm_event_constraints;
4765 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
4766 x86_pmu.extra_regs = intel_slm_extra_regs;
4767 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4768 td_attr = slm_events_attrs;
4769 extra_attr = slm_format_attr;
4770 pr_cont("Silvermont events, ");
4771 name = "silvermont";
4774 case INTEL_FAM6_ATOM_GOLDMONT:
4775 case INTEL_FAM6_ATOM_GOLDMONT_D:
4776 x86_add_quirk(intel_counter_freezing_quirk);
4777 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
4778 sizeof(hw_cache_event_ids));
4779 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
4780 sizeof(hw_cache_extra_regs));
4782 intel_pmu_lbr_init_skl();
4784 x86_pmu.event_constraints = intel_slm_event_constraints;
4785 x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
4786 x86_pmu.extra_regs = intel_glm_extra_regs;
4788 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
4789 * for precise cycles.
4790 * :pp is identical to :ppp
4792 x86_pmu.pebs_aliases = NULL;
4793 x86_pmu.pebs_prec_dist = true;
4794 x86_pmu.lbr_pt_coexist = true;
4795 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4796 td_attr = glm_events_attrs;
4797 extra_attr = slm_format_attr;
4798 pr_cont("Goldmont events, ");
4802 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
4803 x86_add_quirk(intel_counter_freezing_quirk);
4804 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
4805 sizeof(hw_cache_event_ids));
4806 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
4807 sizeof(hw_cache_extra_regs));
4809 intel_pmu_lbr_init_skl();
4811 x86_pmu.event_constraints = intel_slm_event_constraints;
4812 x86_pmu.extra_regs = intel_glm_extra_regs;
4814 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
4815 * for precise cycles.
4817 x86_pmu.pebs_aliases = NULL;
4818 x86_pmu.pebs_prec_dist = true;
4819 x86_pmu.lbr_pt_coexist = true;
4820 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4821 x86_pmu.flags |= PMU_FL_PEBS_ALL;
4822 x86_pmu.get_event_constraints = glp_get_event_constraints;
4823 td_attr = glm_events_attrs;
4824 /* Goldmont Plus has 4-wide pipeline */
4825 event_attr_td_total_slots_scale_glm.event_str = "4";
4826 extra_attr = slm_format_attr;
4827 pr_cont("Goldmont plus events, ");
4828 name = "goldmont_plus";
4831 case INTEL_FAM6_ATOM_TREMONT_D:
4832 case INTEL_FAM6_ATOM_TREMONT:
4833 x86_pmu.late_ack = true;
4834 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
4835 sizeof(hw_cache_event_ids));
4836 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
4837 sizeof(hw_cache_extra_regs));
4838 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
4840 intel_pmu_lbr_init_skl();
4842 x86_pmu.event_constraints = intel_slm_event_constraints;
4843 x86_pmu.extra_regs = intel_tnt_extra_regs;
4845 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
4846 * for precise cycles.
4848 x86_pmu.pebs_aliases = NULL;
4849 x86_pmu.pebs_prec_dist = true;
4850 x86_pmu.lbr_pt_coexist = true;
4851 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4852 x86_pmu.get_event_constraints = tnt_get_event_constraints;
4853 extra_attr = slm_format_attr;
4854 pr_cont("Tremont events, ");
4858 case INTEL_FAM6_WESTMERE:
4859 case INTEL_FAM6_WESTMERE_EP:
4860 case INTEL_FAM6_WESTMERE_EX:
4861 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
4862 sizeof(hw_cache_event_ids));
4863 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
4864 sizeof(hw_cache_extra_regs));
4866 intel_pmu_lbr_init_nhm();
4868 x86_pmu.event_constraints = intel_westmere_event_constraints;
4869 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4870 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
4871 x86_pmu.extra_regs = intel_westmere_extra_regs;
4872 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4874 mem_attr = nhm_mem_events_attrs;
4876 /* UOPS_ISSUED.STALLED_CYCLES */
4877 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4878 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4879 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
4880 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4881 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
4883 intel_pmu_pebs_data_source_nhm();
4884 extra_attr = nhm_format_attr;
4885 pr_cont("Westmere events, ");
4889 case INTEL_FAM6_SANDYBRIDGE:
4890 case INTEL_FAM6_SANDYBRIDGE_X:
4891 x86_add_quirk(intel_sandybridge_quirk);
4892 x86_add_quirk(intel_ht_bug);
4893 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
4894 sizeof(hw_cache_event_ids));
4895 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
4896 sizeof(hw_cache_extra_regs));
4898 intel_pmu_lbr_init_snb();
4900 x86_pmu.event_constraints = intel_snb_event_constraints;
4901 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
4902 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
4903 if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
4904 x86_pmu.extra_regs = intel_snbep_extra_regs;
4906 x86_pmu.extra_regs = intel_snb_extra_regs;
4909 /* all extra regs are per-cpu when HT is on */
4910 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4911 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4913 td_attr = snb_events_attrs;
4914 mem_attr = snb_mem_events_attrs;
4916 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
4917 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4918 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4919 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
4920 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4921 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
4923 extra_attr = nhm_format_attr;
4925 pr_cont("SandyBridge events, ");
4926 name = "sandybridge";
4929 case INTEL_FAM6_IVYBRIDGE:
4930 case INTEL_FAM6_IVYBRIDGE_X:
4931 x86_add_quirk(intel_ht_bug);
4932 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
4933 sizeof(hw_cache_event_ids));
4934 /* dTLB-load-misses on IVB is different than SNB */
4935 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
4937 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
4938 sizeof(hw_cache_extra_regs));
4940 intel_pmu_lbr_init_snb();
4942 x86_pmu.event_constraints = intel_ivb_event_constraints;
4943 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
4944 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4945 x86_pmu.pebs_prec_dist = true;
4946 if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
4947 x86_pmu.extra_regs = intel_snbep_extra_regs;
4949 x86_pmu.extra_regs = intel_snb_extra_regs;
4950 /* all extra regs are per-cpu when HT is on */
4951 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4952 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4954 td_attr = snb_events_attrs;
4955 mem_attr = snb_mem_events_attrs;
4957 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
4958 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4959 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4961 extra_attr = nhm_format_attr;
4963 pr_cont("IvyBridge events, ");
4968 case INTEL_FAM6_HASWELL:
4969 case INTEL_FAM6_HASWELL_X:
4970 case INTEL_FAM6_HASWELL_L:
4971 case INTEL_FAM6_HASWELL_G:
4972 x86_add_quirk(intel_ht_bug);
4973 x86_add_quirk(intel_pebs_isolation_quirk);
4974 x86_pmu.late_ack = true;
4975 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4976 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4978 intel_pmu_lbr_init_hsw();
4980 x86_pmu.event_constraints = intel_hsw_event_constraints;
4981 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
4982 x86_pmu.extra_regs = intel_snbep_extra_regs;
4983 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4984 x86_pmu.pebs_prec_dist = true;
4985 /* all extra regs are per-cpu when HT is on */
4986 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4987 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4989 x86_pmu.hw_config = hsw_hw_config;
4990 x86_pmu.get_event_constraints = hsw_get_event_constraints;
4991 x86_pmu.lbr_double_abort = true;
4992 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4993 hsw_format_attr : nhm_format_attr;
4994 td_attr = hsw_events_attrs;
4995 mem_attr = hsw_mem_events_attrs;
4996 tsx_attr = hsw_tsx_events_attrs;
4997 pr_cont("Haswell events, ");
5001 case INTEL_FAM6_BROADWELL:
5002 case INTEL_FAM6_BROADWELL_D:
5003 case INTEL_FAM6_BROADWELL_G:
5004 case INTEL_FAM6_BROADWELL_X:
5005 x86_add_quirk(intel_pebs_isolation_quirk);
5006 x86_pmu.late_ack = true;
5007 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
5008 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
5010 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
5011 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
5012 BDW_L3_MISS|HSW_SNOOP_DRAM;
5013 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
5015 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
5016 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
5017 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
5018 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
5020 intel_pmu_lbr_init_hsw();
5022 x86_pmu.event_constraints = intel_bdw_event_constraints;
5023 x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
5024 x86_pmu.extra_regs = intel_snbep_extra_regs;
5025 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
5026 x86_pmu.pebs_prec_dist = true;
5027 /* all extra regs are per-cpu when HT is on */
5028 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5029 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5031 x86_pmu.hw_config = hsw_hw_config;
5032 x86_pmu.get_event_constraints = hsw_get_event_constraints;
5033 x86_pmu.limit_period = bdw_limit_period;
5034 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
5035 hsw_format_attr : nhm_format_attr;
5036 td_attr = hsw_events_attrs;
5037 mem_attr = hsw_mem_events_attrs;
5038 tsx_attr = hsw_tsx_events_attrs;
5039 pr_cont("Broadwell events, ");
5043 case INTEL_FAM6_XEON_PHI_KNL:
5044 case INTEL_FAM6_XEON_PHI_KNM:
5045 memcpy(hw_cache_event_ids,
5046 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
5047 memcpy(hw_cache_extra_regs,
5048 knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
5049 intel_pmu_lbr_init_knl();
5051 x86_pmu.event_constraints = intel_slm_event_constraints;
5052 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
5053 x86_pmu.extra_regs = intel_knl_extra_regs;
5055 /* all extra regs are per-cpu when HT is on */
5056 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5057 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5058 extra_attr = slm_format_attr;
5059 pr_cont("Knights Landing/Mill events, ");
5060 name = "knights-landing";
5063 case INTEL_FAM6_SKYLAKE_X:
5066 case INTEL_FAM6_SKYLAKE_L:
5067 case INTEL_FAM6_SKYLAKE:
5068 case INTEL_FAM6_KABYLAKE_L:
5069 case INTEL_FAM6_KABYLAKE:
5070 case INTEL_FAM6_COMETLAKE_L:
5071 case INTEL_FAM6_COMETLAKE:
5072 x86_add_quirk(intel_pebs_isolation_quirk);
5073 x86_pmu.late_ack = true;
5074 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
5075 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
5076 intel_pmu_lbr_init_skl();
5078 /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
5079 event_attr_td_recovery_bubbles.event_str_noht =
5080 "event=0xd,umask=0x1,cmask=1";
5081 event_attr_td_recovery_bubbles.event_str_ht =
5082 "event=0xd,umask=0x1,cmask=1,any=1";
5084 x86_pmu.event_constraints = intel_skl_event_constraints;
5085 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
5086 x86_pmu.extra_regs = intel_skl_extra_regs;
5087 x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
5088 x86_pmu.pebs_prec_dist = true;
5089 /* all extra regs are per-cpu when HT is on */
5090 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5091 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5093 x86_pmu.hw_config = hsw_hw_config;
5094 x86_pmu.get_event_constraints = hsw_get_event_constraints;
5095 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
5096 hsw_format_attr : nhm_format_attr;
5097 extra_skl_attr = skl_format_attr;
5098 td_attr = hsw_events_attrs;
5099 mem_attr = hsw_mem_events_attrs;
5100 tsx_attr = hsw_tsx_events_attrs;
5101 intel_pmu_pebs_data_source_skl(pmem);
5103 if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
5104 x86_pmu.flags |= PMU_FL_TFA;
5105 x86_pmu.get_event_constraints = tfa_get_event_constraints;
5106 x86_pmu.enable_all = intel_tfa_pmu_enable_all;
5107 x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
5110 pr_cont("Skylake events, ");
5114 case INTEL_FAM6_ICELAKE_X:
5115 case INTEL_FAM6_ICELAKE_D:
5118 case INTEL_FAM6_ICELAKE_L:
5119 case INTEL_FAM6_ICELAKE:
5120 case INTEL_FAM6_TIGERLAKE_L:
5121 case INTEL_FAM6_TIGERLAKE:
5122 x86_pmu.late_ack = true;
5123 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
5124 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
5125 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
5126 intel_pmu_lbr_init_skl();
5128 x86_pmu.event_constraints = intel_icl_event_constraints;
5129 x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints;
5130 x86_pmu.extra_regs = intel_icl_extra_regs;
5131 x86_pmu.pebs_aliases = NULL;
5132 x86_pmu.pebs_prec_dist = true;
5133 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5134 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5136 x86_pmu.hw_config = hsw_hw_config;
5137 x86_pmu.get_event_constraints = icl_get_event_constraints;
5138 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
5139 hsw_format_attr : nhm_format_attr;
5140 extra_skl_attr = skl_format_attr;
5141 mem_attr = icl_events_attrs;
5142 tsx_attr = icl_tsx_events_attrs;
5143 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xca, .umask=0x02);
5144 x86_pmu.lbr_pt_coexist = true;
5145 intel_pmu_pebs_data_source_skl(pmem);
5146 pr_cont("Icelake events, ");
5151 switch (x86_pmu.version) {
5153 x86_pmu.event_constraints = intel_v1_event_constraints;
5154 pr_cont("generic architected perfmon v1, ");
5155 name = "generic_arch_v1";
5159 * default constraints for v2 and up
5161 x86_pmu.event_constraints = intel_gen_event_constraints;
5162 pr_cont("generic architected perfmon, ");
5163 name = "generic_arch_v2+";
5168 snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
5171 group_events_td.attrs = td_attr;
5172 group_events_mem.attrs = mem_attr;
5173 group_events_tsx.attrs = tsx_attr;
5174 group_format_extra.attrs = extra_attr;
5175 group_format_extra_skl.attrs = extra_skl_attr;
5177 x86_pmu.attr_update = attr_update;
5179 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
5180 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
5181 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
5182 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
5184 x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
5186 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
5187 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
5188 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
5189 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
5192 x86_pmu.intel_ctrl |=
5193 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
5195 if (x86_pmu.event_constraints) {
5197 * event on fixed counter2 (REF_CYCLES) only works on this
5198 * counter, so do not extend mask to generic counters
5200 for_each_event_constraint(c, x86_pmu.event_constraints) {
5201 if (c->cmask == FIXED_EVENT_FLAGS
5202 && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
5203 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
5206 ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
5207 c->weight = hweight64(c->idxmsk64);
5212 * Access LBR MSR may cause #GP under certain circumstances.
5213 * E.g. KVM doesn't support LBR MSR
5214 * Check all LBT MSR here.
5215 * Disable LBR access if any LBR MSRs can not be accessed.
5217 if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
5219 for (i = 0; i < x86_pmu.lbr_nr; i++) {
5220 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
5221 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
5226 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
5229 * Access extra MSR may cause #GP under certain circumstances.
5230 * E.g. KVM doesn't support offcore event
5231 * Check all extra_regs here.
5233 if (x86_pmu.extra_regs) {
5234 for (er = x86_pmu.extra_regs; er->msr; er++) {
5235 er->extra_msr_access = check_msr(er->msr, 0x11UL);
5236 /* Disable LBR select mapping */
5237 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
5238 x86_pmu.lbr_sel_map = NULL;
5242 /* Support full width counters using alternative MSR range */
5243 if (x86_pmu.intel_cap.full_width_write) {
5244 x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
5245 x86_pmu.perfctr = MSR_IA32_PMC0;
5246 pr_cont("full-width counters, ");
5250 * For arch perfmon 4 use counter freezing to avoid
5251 * several MSR accesses in the PMI.
5253 if (x86_pmu.counter_freezing)
5254 x86_pmu.handle_irq = intel_pmu_handle_irq_v4;
5260 * HT bug: phase 2 init
5261 * Called once we have valid topology information to check
5262 * whether or not HT is enabled
5263 * If HT is off, then we disable the workaround
5265 static __init int fixup_ht_bug(void)
5269 * problem not present on this CPU model, nothing to do
5271 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
5274 if (topology_max_smt_threads() > 1) {
5275 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
5281 hardlockup_detector_perf_stop();
5283 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
5285 x86_pmu.start_scheduling = NULL;
5286 x86_pmu.commit_scheduling = NULL;
5287 x86_pmu.stop_scheduling = NULL;
5289 hardlockup_detector_perf_restart();
5291 for_each_online_cpu(c)
5292 free_excl_cntrs(&per_cpu(cpu_hw_events, c));
5295 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
5298 subsys_initcall(fixup_ht_bug)