1 // SPDX-License-Identifier: GPL-2.0-only
5 * Used to coordinate shared registers between HT threads or
6 * among events on a single PMU.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/stddef.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/nmi.h>
18 #include <asm/cpufeature.h>
19 #include <asm/hardirq.h>
20 #include <asm/intel-family.h>
21 #include <asm/intel_pt.h>
23 #include <asm/cpu_device_id.h>
25 #include "../perf_event.h"
28 * Intel PerfMon, used on Core and later.
30 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
32 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
33 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
34 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
35 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
36 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
37 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
38 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
39 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
42 static struct event_constraint intel_core_event_constraints[] __read_mostly =
44 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
45 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
46 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
47 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
48 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
49 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
53 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
55 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
56 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
57 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
58 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
59 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
60 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
61 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
62 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
63 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
64 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
65 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
66 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
67 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
71 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
73 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
74 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
75 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
76 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
77 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
78 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
79 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
80 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
81 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
82 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
83 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
87 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
89 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
90 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
91 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
95 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
97 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
98 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
99 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
100 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
101 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
102 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
103 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
107 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
109 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
110 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
111 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
112 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
113 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
114 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
115 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
116 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
117 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
118 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
119 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
120 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
123 * When HT is off these events can only run on the bottom 4 counters
124 * When HT is on, they are impacted by the HT bug and require EXCL access
126 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
127 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
128 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
129 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
134 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
136 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
137 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
138 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
139 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
140 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */
141 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
142 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
143 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
144 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
145 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
146 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
147 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
148 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
151 * When HT is off these events can only run on the bottom 4 counters
152 * When HT is on, they are impacted by the HT bug and require EXCL access
154 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
155 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
156 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
157 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
162 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
164 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
165 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
166 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
167 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
171 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
176 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
178 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
179 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
180 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
184 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
186 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
187 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
188 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
192 static struct event_constraint intel_skl_event_constraints[] = {
193 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
194 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
195 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
196 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
199 * when HT is off, these can only run on the bottom 4 counters
201 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
202 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
203 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
204 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
205 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
210 static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
211 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
212 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
216 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
217 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
218 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
219 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
220 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
224 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
225 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
226 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
227 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
228 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
232 static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
233 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
234 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
235 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
237 * Note the low 8 bits eventsel code is not a continuous field, containing
238 * some #GPing bits. These are masked out.
240 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
244 static struct event_constraint intel_icl_event_constraints[] = {
245 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
246 FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* INST_RETIRED.PREC_DIST */
247 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
248 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
249 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
250 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
251 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
252 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
253 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
254 INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
255 INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
256 INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */
257 INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf),
258 INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
259 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */
260 INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */
261 INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
262 INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */
263 INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
264 INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
265 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
266 INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
270 static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
271 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
272 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
273 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
274 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
278 static struct extra_reg intel_spr_extra_regs[] __read_mostly = {
279 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
280 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
281 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
282 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
283 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
284 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
288 static struct event_constraint intel_spr_event_constraints[] = {
289 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
290 FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* INST_RETIRED.PREC_DIST */
291 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
292 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
293 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
294 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
295 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
296 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
297 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
298 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
299 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
300 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
301 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
303 INTEL_EVENT_CONSTRAINT(0x2e, 0xff),
304 INTEL_EVENT_CONSTRAINT(0x3c, 0xff),
306 * Generally event codes < 0x90 are restricted to counters 0-3.
307 * The 0x2E and 0x3C are exception, which has no restriction.
309 INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
311 INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
312 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
313 INTEL_UEVENT_CONSTRAINT(0x08a3, 0xf),
314 INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
315 INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
316 INTEL_UEVENT_CONSTRAINT(0x02cd, 0x1),
317 INTEL_EVENT_CONSTRAINT(0xce, 0x1),
318 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
320 * Generally event codes >= 0x90 are likely to have no restrictions.
321 * The exception are defined as above.
323 INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0xff),
329 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
330 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
331 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
333 static struct attribute *nhm_mem_events_attrs[] = {
334 EVENT_PTR(mem_ld_nhm),
339 * topdown events for Intel Core CPUs.
341 * The events are all in slots, which is a free slot in a 4 wide
342 * pipeline. Some events are already reported in slots, for cycle
343 * events we multiply by the pipeline width (4).
345 * With Hyper Threading on, topdown metrics are either summed or averaged
346 * between the threads of a core: (count_t0 + count_t1).
348 * For the average case the metric is always scaled to pipeline width,
349 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
352 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
353 "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */
354 "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */
355 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
356 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
357 "event=0xe,umask=0x1"); /* uops_issued.any */
358 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
359 "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */
360 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
361 "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */
362 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
363 "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */
364 "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */
365 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
368 EVENT_ATTR_STR(slots, slots, "event=0x00,umask=0x4");
369 EVENT_ATTR_STR(topdown-retiring, td_retiring, "event=0x00,umask=0x80");
370 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec, "event=0x00,umask=0x81");
371 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound, "event=0x00,umask=0x82");
372 EVENT_ATTR_STR(topdown-be-bound, td_be_bound, "event=0x00,umask=0x83");
373 EVENT_ATTR_STR(topdown-heavy-ops, td_heavy_ops, "event=0x00,umask=0x84");
374 EVENT_ATTR_STR(topdown-br-mispredict, td_br_mispredict, "event=0x00,umask=0x85");
375 EVENT_ATTR_STR(topdown-fetch-lat, td_fetch_lat, "event=0x00,umask=0x86");
376 EVENT_ATTR_STR(topdown-mem-bound, td_mem_bound, "event=0x00,umask=0x87");
378 static struct attribute *snb_events_attrs[] = {
379 EVENT_PTR(td_slots_issued),
380 EVENT_PTR(td_slots_retired),
381 EVENT_PTR(td_fetch_bubbles),
382 EVENT_PTR(td_total_slots),
383 EVENT_PTR(td_total_slots_scale),
384 EVENT_PTR(td_recovery_bubbles),
385 EVENT_PTR(td_recovery_bubbles_scale),
389 static struct attribute *snb_mem_events_attrs[] = {
390 EVENT_PTR(mem_ld_snb),
391 EVENT_PTR(mem_st_snb),
395 static struct event_constraint intel_hsw_event_constraints[] = {
396 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
397 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
398 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
399 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
400 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
401 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
402 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
403 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
404 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
405 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
406 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
407 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
410 * When HT is off these events can only run on the bottom 4 counters
411 * When HT is on, they are impacted by the HT bug and require EXCL access
413 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
414 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
415 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
416 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
421 static struct event_constraint intel_bdw_event_constraints[] = {
422 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
423 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
424 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
425 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
426 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
428 * when HT is off, these can only run on the bottom 4 counters
430 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
431 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
432 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
433 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
437 static u64 intel_pmu_event_map(int hw_event)
439 return intel_perfmon_event_map[hw_event];
442 static __initconst const u64 spr_hw_cache_event_ids
443 [PERF_COUNT_HW_CACHE_MAX]
444 [PERF_COUNT_HW_CACHE_OP_MAX]
445 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
449 [ C(RESULT_ACCESS) ] = 0x81d0,
450 [ C(RESULT_MISS) ] = 0xe124,
453 [ C(RESULT_ACCESS) ] = 0x82d0,
458 [ C(RESULT_MISS) ] = 0xe424,
461 [ C(RESULT_ACCESS) ] = -1,
462 [ C(RESULT_MISS) ] = -1,
467 [ C(RESULT_ACCESS) ] = 0x12a,
468 [ C(RESULT_MISS) ] = 0x12a,
471 [ C(RESULT_ACCESS) ] = 0x12a,
472 [ C(RESULT_MISS) ] = 0x12a,
477 [ C(RESULT_ACCESS) ] = 0x81d0,
478 [ C(RESULT_MISS) ] = 0xe12,
481 [ C(RESULT_ACCESS) ] = 0x82d0,
482 [ C(RESULT_MISS) ] = 0xe13,
487 [ C(RESULT_ACCESS) ] = -1,
488 [ C(RESULT_MISS) ] = 0xe11,
491 [ C(RESULT_ACCESS) ] = -1,
492 [ C(RESULT_MISS) ] = -1,
494 [ C(OP_PREFETCH) ] = {
495 [ C(RESULT_ACCESS) ] = -1,
496 [ C(RESULT_MISS) ] = -1,
501 [ C(RESULT_ACCESS) ] = 0x4c4,
502 [ C(RESULT_MISS) ] = 0x4c5,
505 [ C(RESULT_ACCESS) ] = -1,
506 [ C(RESULT_MISS) ] = -1,
508 [ C(OP_PREFETCH) ] = {
509 [ C(RESULT_ACCESS) ] = -1,
510 [ C(RESULT_MISS) ] = -1,
515 [ C(RESULT_ACCESS) ] = 0x12a,
516 [ C(RESULT_MISS) ] = 0x12a,
521 static __initconst const u64 spr_hw_cache_extra_regs
522 [PERF_COUNT_HW_CACHE_MAX]
523 [PERF_COUNT_HW_CACHE_OP_MAX]
524 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
528 [ C(RESULT_ACCESS) ] = 0x10001,
529 [ C(RESULT_MISS) ] = 0x3fbfc00001,
532 [ C(RESULT_ACCESS) ] = 0x3f3ffc0002,
533 [ C(RESULT_MISS) ] = 0x3f3fc00002,
538 [ C(RESULT_ACCESS) ] = 0x10c000001,
539 [ C(RESULT_MISS) ] = 0x3fb3000001,
545 * Notes on the events:
546 * - data reads do not include code reads (comparable to earlier tables)
547 * - data counts include speculative execution (except L1 write, dtlb, bpu)
548 * - remote node access includes remote memory, remote cache, remote mmio.
549 * - prefetches are not included in the counts.
550 * - icache miss does not include decoded icache
553 #define SKL_DEMAND_DATA_RD BIT_ULL(0)
554 #define SKL_DEMAND_RFO BIT_ULL(1)
555 #define SKL_ANY_RESPONSE BIT_ULL(16)
556 #define SKL_SUPPLIER_NONE BIT_ULL(17)
557 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26)
558 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27)
559 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28)
560 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29)
561 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \
562 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
563 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
564 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
565 #define SKL_SPL_HIT BIT_ULL(30)
566 #define SKL_SNOOP_NONE BIT_ULL(31)
567 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32)
568 #define SKL_SNOOP_MISS BIT_ULL(33)
569 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34)
570 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35)
571 #define SKL_SNOOP_HITM BIT_ULL(36)
572 #define SKL_SNOOP_NON_DRAM BIT_ULL(37)
573 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \
574 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
575 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
576 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
577 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD
578 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \
579 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
580 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
581 SKL_SNOOP_HITM|SKL_SPL_HIT)
582 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO
583 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE
584 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
585 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
586 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
588 static __initconst const u64 skl_hw_cache_event_ids
589 [PERF_COUNT_HW_CACHE_MAX]
590 [PERF_COUNT_HW_CACHE_OP_MAX]
591 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
595 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
596 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
599 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
600 [ C(RESULT_MISS) ] = 0x0,
602 [ C(OP_PREFETCH) ] = {
603 [ C(RESULT_ACCESS) ] = 0x0,
604 [ C(RESULT_MISS) ] = 0x0,
609 [ C(RESULT_ACCESS) ] = 0x0,
610 [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */
613 [ C(RESULT_ACCESS) ] = -1,
614 [ C(RESULT_MISS) ] = -1,
616 [ C(OP_PREFETCH) ] = {
617 [ C(RESULT_ACCESS) ] = 0x0,
618 [ C(RESULT_MISS) ] = 0x0,
623 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
624 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
627 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
628 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
630 [ C(OP_PREFETCH) ] = {
631 [ C(RESULT_ACCESS) ] = 0x0,
632 [ C(RESULT_MISS) ] = 0x0,
637 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
638 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
641 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
642 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
644 [ C(OP_PREFETCH) ] = {
645 [ C(RESULT_ACCESS) ] = 0x0,
646 [ C(RESULT_MISS) ] = 0x0,
651 [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */
652 [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */
655 [ C(RESULT_ACCESS) ] = -1,
656 [ C(RESULT_MISS) ] = -1,
658 [ C(OP_PREFETCH) ] = {
659 [ C(RESULT_ACCESS) ] = -1,
660 [ C(RESULT_MISS) ] = -1,
665 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
666 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
669 [ C(RESULT_ACCESS) ] = -1,
670 [ C(RESULT_MISS) ] = -1,
672 [ C(OP_PREFETCH) ] = {
673 [ C(RESULT_ACCESS) ] = -1,
674 [ C(RESULT_MISS) ] = -1,
679 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
680 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
683 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
684 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
686 [ C(OP_PREFETCH) ] = {
687 [ C(RESULT_ACCESS) ] = 0x0,
688 [ C(RESULT_MISS) ] = 0x0,
693 static __initconst const u64 skl_hw_cache_extra_regs
694 [PERF_COUNT_HW_CACHE_MAX]
695 [PERF_COUNT_HW_CACHE_OP_MAX]
696 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
700 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
701 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
702 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
703 SKL_L3_MISS|SKL_ANY_SNOOP|
707 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
708 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
709 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
710 SKL_L3_MISS|SKL_ANY_SNOOP|
713 [ C(OP_PREFETCH) ] = {
714 [ C(RESULT_ACCESS) ] = 0x0,
715 [ C(RESULT_MISS) ] = 0x0,
720 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
721 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
722 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
723 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
726 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
727 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
728 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
729 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
731 [ C(OP_PREFETCH) ] = {
732 [ C(RESULT_ACCESS) ] = 0x0,
733 [ C(RESULT_MISS) ] = 0x0,
738 #define SNB_DMND_DATA_RD (1ULL << 0)
739 #define SNB_DMND_RFO (1ULL << 1)
740 #define SNB_DMND_IFETCH (1ULL << 2)
741 #define SNB_DMND_WB (1ULL << 3)
742 #define SNB_PF_DATA_RD (1ULL << 4)
743 #define SNB_PF_RFO (1ULL << 5)
744 #define SNB_PF_IFETCH (1ULL << 6)
745 #define SNB_LLC_DATA_RD (1ULL << 7)
746 #define SNB_LLC_RFO (1ULL << 8)
747 #define SNB_LLC_IFETCH (1ULL << 9)
748 #define SNB_BUS_LOCKS (1ULL << 10)
749 #define SNB_STRM_ST (1ULL << 11)
750 #define SNB_OTHER (1ULL << 15)
751 #define SNB_RESP_ANY (1ULL << 16)
752 #define SNB_NO_SUPP (1ULL << 17)
753 #define SNB_LLC_HITM (1ULL << 18)
754 #define SNB_LLC_HITE (1ULL << 19)
755 #define SNB_LLC_HITS (1ULL << 20)
756 #define SNB_LLC_HITF (1ULL << 21)
757 #define SNB_LOCAL (1ULL << 22)
758 #define SNB_REMOTE (0xffULL << 23)
759 #define SNB_SNP_NONE (1ULL << 31)
760 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
761 #define SNB_SNP_MISS (1ULL << 33)
762 #define SNB_NO_FWD (1ULL << 34)
763 #define SNB_SNP_FWD (1ULL << 35)
764 #define SNB_HITM (1ULL << 36)
765 #define SNB_NON_DRAM (1ULL << 37)
767 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
768 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
769 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
771 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
772 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
775 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
776 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
778 #define SNB_L3_ACCESS SNB_RESP_ANY
779 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
781 static __initconst const u64 snb_hw_cache_extra_regs
782 [PERF_COUNT_HW_CACHE_MAX]
783 [PERF_COUNT_HW_CACHE_OP_MAX]
784 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
788 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
789 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
792 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
793 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
795 [ C(OP_PREFETCH) ] = {
796 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
797 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
802 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
803 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
806 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
807 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
809 [ C(OP_PREFETCH) ] = {
810 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
811 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
816 static __initconst const u64 snb_hw_cache_event_ids
817 [PERF_COUNT_HW_CACHE_MAX]
818 [PERF_COUNT_HW_CACHE_OP_MAX]
819 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
823 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
824 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
827 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
828 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
830 [ C(OP_PREFETCH) ] = {
831 [ C(RESULT_ACCESS) ] = 0x0,
832 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
837 [ C(RESULT_ACCESS) ] = 0x0,
838 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
841 [ C(RESULT_ACCESS) ] = -1,
842 [ C(RESULT_MISS) ] = -1,
844 [ C(OP_PREFETCH) ] = {
845 [ C(RESULT_ACCESS) ] = 0x0,
846 [ C(RESULT_MISS) ] = 0x0,
851 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
852 [ C(RESULT_ACCESS) ] = 0x01b7,
853 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
854 [ C(RESULT_MISS) ] = 0x01b7,
857 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
858 [ C(RESULT_ACCESS) ] = 0x01b7,
859 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
860 [ C(RESULT_MISS) ] = 0x01b7,
862 [ C(OP_PREFETCH) ] = {
863 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
864 [ C(RESULT_ACCESS) ] = 0x01b7,
865 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
866 [ C(RESULT_MISS) ] = 0x01b7,
871 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
872 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
875 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
876 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
878 [ C(OP_PREFETCH) ] = {
879 [ C(RESULT_ACCESS) ] = 0x0,
880 [ C(RESULT_MISS) ] = 0x0,
885 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
886 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
889 [ C(RESULT_ACCESS) ] = -1,
890 [ C(RESULT_MISS) ] = -1,
892 [ C(OP_PREFETCH) ] = {
893 [ C(RESULT_ACCESS) ] = -1,
894 [ C(RESULT_MISS) ] = -1,
899 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
900 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
903 [ C(RESULT_ACCESS) ] = -1,
904 [ C(RESULT_MISS) ] = -1,
906 [ C(OP_PREFETCH) ] = {
907 [ C(RESULT_ACCESS) ] = -1,
908 [ C(RESULT_MISS) ] = -1,
913 [ C(RESULT_ACCESS) ] = 0x01b7,
914 [ C(RESULT_MISS) ] = 0x01b7,
917 [ C(RESULT_ACCESS) ] = 0x01b7,
918 [ C(RESULT_MISS) ] = 0x01b7,
920 [ C(OP_PREFETCH) ] = {
921 [ C(RESULT_ACCESS) ] = 0x01b7,
922 [ C(RESULT_MISS) ] = 0x01b7,
929 * Notes on the events:
930 * - data reads do not include code reads (comparable to earlier tables)
931 * - data counts include speculative execution (except L1 write, dtlb, bpu)
932 * - remote node access includes remote memory, remote cache, remote mmio.
933 * - prefetches are not included in the counts because they are not
937 #define HSW_DEMAND_DATA_RD BIT_ULL(0)
938 #define HSW_DEMAND_RFO BIT_ULL(1)
939 #define HSW_ANY_RESPONSE BIT_ULL(16)
940 #define HSW_SUPPLIER_NONE BIT_ULL(17)
941 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
942 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
943 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
944 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
945 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
946 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
947 HSW_L3_MISS_REMOTE_HOP2P)
948 #define HSW_SNOOP_NONE BIT_ULL(31)
949 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
950 #define HSW_SNOOP_MISS BIT_ULL(33)
951 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
952 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
953 #define HSW_SNOOP_HITM BIT_ULL(36)
954 #define HSW_SNOOP_NON_DRAM BIT_ULL(37)
955 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
956 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
957 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
958 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
959 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
960 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
961 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO
962 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
963 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
964 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE
966 #define BDW_L3_MISS_LOCAL BIT(26)
967 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
968 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
969 HSW_L3_MISS_REMOTE_HOP2P)
972 static __initconst const u64 hsw_hw_cache_event_ids
973 [PERF_COUNT_HW_CACHE_MAX]
974 [PERF_COUNT_HW_CACHE_OP_MAX]
975 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
979 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
980 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
983 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
984 [ C(RESULT_MISS) ] = 0x0,
986 [ C(OP_PREFETCH) ] = {
987 [ C(RESULT_ACCESS) ] = 0x0,
988 [ C(RESULT_MISS) ] = 0x0,
993 [ C(RESULT_ACCESS) ] = 0x0,
994 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
997 [ C(RESULT_ACCESS) ] = -1,
998 [ C(RESULT_MISS) ] = -1,
1000 [ C(OP_PREFETCH) ] = {
1001 [ C(RESULT_ACCESS) ] = 0x0,
1002 [ C(RESULT_MISS) ] = 0x0,
1007 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1008 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1011 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1012 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1014 [ C(OP_PREFETCH) ] = {
1015 [ C(RESULT_ACCESS) ] = 0x0,
1016 [ C(RESULT_MISS) ] = 0x0,
1021 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1022 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
1025 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1026 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
1028 [ C(OP_PREFETCH) ] = {
1029 [ C(RESULT_ACCESS) ] = 0x0,
1030 [ C(RESULT_MISS) ] = 0x0,
1035 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
1036 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
1039 [ C(RESULT_ACCESS) ] = -1,
1040 [ C(RESULT_MISS) ] = -1,
1042 [ C(OP_PREFETCH) ] = {
1043 [ C(RESULT_ACCESS) ] = -1,
1044 [ C(RESULT_MISS) ] = -1,
1049 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
1050 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1053 [ C(RESULT_ACCESS) ] = -1,
1054 [ C(RESULT_MISS) ] = -1,
1056 [ C(OP_PREFETCH) ] = {
1057 [ C(RESULT_ACCESS) ] = -1,
1058 [ C(RESULT_MISS) ] = -1,
1063 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1064 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1067 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1068 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1070 [ C(OP_PREFETCH) ] = {
1071 [ C(RESULT_ACCESS) ] = 0x0,
1072 [ C(RESULT_MISS) ] = 0x0,
1077 static __initconst const u64 hsw_hw_cache_extra_regs
1078 [PERF_COUNT_HW_CACHE_MAX]
1079 [PERF_COUNT_HW_CACHE_OP_MAX]
1080 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1084 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1086 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
1087 HSW_L3_MISS|HSW_ANY_SNOOP,
1090 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1092 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
1093 HSW_L3_MISS|HSW_ANY_SNOOP,
1095 [ C(OP_PREFETCH) ] = {
1096 [ C(RESULT_ACCESS) ] = 0x0,
1097 [ C(RESULT_MISS) ] = 0x0,
1102 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1103 HSW_L3_MISS_LOCAL_DRAM|
1105 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
1110 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1111 HSW_L3_MISS_LOCAL_DRAM|
1113 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
1117 [ C(OP_PREFETCH) ] = {
1118 [ C(RESULT_ACCESS) ] = 0x0,
1119 [ C(RESULT_MISS) ] = 0x0,
1124 static __initconst const u64 westmere_hw_cache_event_ids
1125 [PERF_COUNT_HW_CACHE_MAX]
1126 [PERF_COUNT_HW_CACHE_OP_MAX]
1127 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1131 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1132 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1135 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1136 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1138 [ C(OP_PREFETCH) ] = {
1139 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1140 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1145 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1146 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1149 [ C(RESULT_ACCESS) ] = -1,
1150 [ C(RESULT_MISS) ] = -1,
1152 [ C(OP_PREFETCH) ] = {
1153 [ C(RESULT_ACCESS) ] = 0x0,
1154 [ C(RESULT_MISS) ] = 0x0,
1159 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1160 [ C(RESULT_ACCESS) ] = 0x01b7,
1161 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1162 [ C(RESULT_MISS) ] = 0x01b7,
1165 * Use RFO, not WRITEBACK, because a write miss would typically occur
1169 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1170 [ C(RESULT_ACCESS) ] = 0x01b7,
1171 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1172 [ C(RESULT_MISS) ] = 0x01b7,
1174 [ C(OP_PREFETCH) ] = {
1175 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1176 [ C(RESULT_ACCESS) ] = 0x01b7,
1177 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1178 [ C(RESULT_MISS) ] = 0x01b7,
1183 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1184 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1187 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1188 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1190 [ C(OP_PREFETCH) ] = {
1191 [ C(RESULT_ACCESS) ] = 0x0,
1192 [ C(RESULT_MISS) ] = 0x0,
1197 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1198 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
1201 [ C(RESULT_ACCESS) ] = -1,
1202 [ C(RESULT_MISS) ] = -1,
1204 [ C(OP_PREFETCH) ] = {
1205 [ C(RESULT_ACCESS) ] = -1,
1206 [ C(RESULT_MISS) ] = -1,
1211 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1212 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1215 [ C(RESULT_ACCESS) ] = -1,
1216 [ C(RESULT_MISS) ] = -1,
1218 [ C(OP_PREFETCH) ] = {
1219 [ C(RESULT_ACCESS) ] = -1,
1220 [ C(RESULT_MISS) ] = -1,
1225 [ C(RESULT_ACCESS) ] = 0x01b7,
1226 [ C(RESULT_MISS) ] = 0x01b7,
1229 [ C(RESULT_ACCESS) ] = 0x01b7,
1230 [ C(RESULT_MISS) ] = 0x01b7,
1232 [ C(OP_PREFETCH) ] = {
1233 [ C(RESULT_ACCESS) ] = 0x01b7,
1234 [ C(RESULT_MISS) ] = 0x01b7,
1240 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1241 * See IA32 SDM Vol 3B 30.6.1.3
1244 #define NHM_DMND_DATA_RD (1 << 0)
1245 #define NHM_DMND_RFO (1 << 1)
1246 #define NHM_DMND_IFETCH (1 << 2)
1247 #define NHM_DMND_WB (1 << 3)
1248 #define NHM_PF_DATA_RD (1 << 4)
1249 #define NHM_PF_DATA_RFO (1 << 5)
1250 #define NHM_PF_IFETCH (1 << 6)
1251 #define NHM_OFFCORE_OTHER (1 << 7)
1252 #define NHM_UNCORE_HIT (1 << 8)
1253 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
1254 #define NHM_OTHER_CORE_HITM (1 << 10)
1256 #define NHM_REMOTE_CACHE_FWD (1 << 12)
1257 #define NHM_REMOTE_DRAM (1 << 13)
1258 #define NHM_LOCAL_DRAM (1 << 14)
1259 #define NHM_NON_DRAM (1 << 15)
1261 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1262 #define NHM_REMOTE (NHM_REMOTE_DRAM)
1264 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
1265 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
1266 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1268 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1269 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1270 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
1272 static __initconst const u64 nehalem_hw_cache_extra_regs
1273 [PERF_COUNT_HW_CACHE_MAX]
1274 [PERF_COUNT_HW_CACHE_OP_MAX]
1275 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1279 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1280 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
1283 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1284 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
1286 [ C(OP_PREFETCH) ] = {
1287 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1288 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1293 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1294 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
1297 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1298 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
1300 [ C(OP_PREFETCH) ] = {
1301 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1302 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1307 static __initconst const u64 nehalem_hw_cache_event_ids
1308 [PERF_COUNT_HW_CACHE_MAX]
1309 [PERF_COUNT_HW_CACHE_OP_MAX]
1310 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1314 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1315 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1318 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1319 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1321 [ C(OP_PREFETCH) ] = {
1322 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1323 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1328 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1329 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1332 [ C(RESULT_ACCESS) ] = -1,
1333 [ C(RESULT_MISS) ] = -1,
1335 [ C(OP_PREFETCH) ] = {
1336 [ C(RESULT_ACCESS) ] = 0x0,
1337 [ C(RESULT_MISS) ] = 0x0,
1342 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1343 [ C(RESULT_ACCESS) ] = 0x01b7,
1344 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1345 [ C(RESULT_MISS) ] = 0x01b7,
1348 * Use RFO, not WRITEBACK, because a write miss would typically occur
1352 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1353 [ C(RESULT_ACCESS) ] = 0x01b7,
1354 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1355 [ C(RESULT_MISS) ] = 0x01b7,
1357 [ C(OP_PREFETCH) ] = {
1358 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1359 [ C(RESULT_ACCESS) ] = 0x01b7,
1360 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1361 [ C(RESULT_MISS) ] = 0x01b7,
1366 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1367 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1370 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1371 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1373 [ C(OP_PREFETCH) ] = {
1374 [ C(RESULT_ACCESS) ] = 0x0,
1375 [ C(RESULT_MISS) ] = 0x0,
1380 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1381 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
1384 [ C(RESULT_ACCESS) ] = -1,
1385 [ C(RESULT_MISS) ] = -1,
1387 [ C(OP_PREFETCH) ] = {
1388 [ C(RESULT_ACCESS) ] = -1,
1389 [ C(RESULT_MISS) ] = -1,
1394 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1395 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1398 [ C(RESULT_ACCESS) ] = -1,
1399 [ C(RESULT_MISS) ] = -1,
1401 [ C(OP_PREFETCH) ] = {
1402 [ C(RESULT_ACCESS) ] = -1,
1403 [ C(RESULT_MISS) ] = -1,
1408 [ C(RESULT_ACCESS) ] = 0x01b7,
1409 [ C(RESULT_MISS) ] = 0x01b7,
1412 [ C(RESULT_ACCESS) ] = 0x01b7,
1413 [ C(RESULT_MISS) ] = 0x01b7,
1415 [ C(OP_PREFETCH) ] = {
1416 [ C(RESULT_ACCESS) ] = 0x01b7,
1417 [ C(RESULT_MISS) ] = 0x01b7,
1422 static __initconst const u64 core2_hw_cache_event_ids
1423 [PERF_COUNT_HW_CACHE_MAX]
1424 [PERF_COUNT_HW_CACHE_OP_MAX]
1425 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1429 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
1430 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
1433 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
1434 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
1436 [ C(OP_PREFETCH) ] = {
1437 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
1438 [ C(RESULT_MISS) ] = 0,
1443 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
1444 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
1447 [ C(RESULT_ACCESS) ] = -1,
1448 [ C(RESULT_MISS) ] = -1,
1450 [ C(OP_PREFETCH) ] = {
1451 [ C(RESULT_ACCESS) ] = 0,
1452 [ C(RESULT_MISS) ] = 0,
1457 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1458 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1461 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1462 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1464 [ C(OP_PREFETCH) ] = {
1465 [ C(RESULT_ACCESS) ] = 0,
1466 [ C(RESULT_MISS) ] = 0,
1471 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1472 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
1475 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1476 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
1478 [ C(OP_PREFETCH) ] = {
1479 [ C(RESULT_ACCESS) ] = 0,
1480 [ C(RESULT_MISS) ] = 0,
1485 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1486 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
1489 [ C(RESULT_ACCESS) ] = -1,
1490 [ C(RESULT_MISS) ] = -1,
1492 [ C(OP_PREFETCH) ] = {
1493 [ C(RESULT_ACCESS) ] = -1,
1494 [ C(RESULT_MISS) ] = -1,
1499 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1500 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1503 [ C(RESULT_ACCESS) ] = -1,
1504 [ C(RESULT_MISS) ] = -1,
1506 [ C(OP_PREFETCH) ] = {
1507 [ C(RESULT_ACCESS) ] = -1,
1508 [ C(RESULT_MISS) ] = -1,
1513 static __initconst const u64 atom_hw_cache_event_ids
1514 [PERF_COUNT_HW_CACHE_MAX]
1515 [PERF_COUNT_HW_CACHE_OP_MAX]
1516 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1520 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
1521 [ C(RESULT_MISS) ] = 0,
1524 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
1525 [ C(RESULT_MISS) ] = 0,
1527 [ C(OP_PREFETCH) ] = {
1528 [ C(RESULT_ACCESS) ] = 0x0,
1529 [ C(RESULT_MISS) ] = 0,
1534 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1535 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1538 [ C(RESULT_ACCESS) ] = -1,
1539 [ C(RESULT_MISS) ] = -1,
1541 [ C(OP_PREFETCH) ] = {
1542 [ C(RESULT_ACCESS) ] = 0,
1543 [ C(RESULT_MISS) ] = 0,
1548 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1549 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1552 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1553 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1555 [ C(OP_PREFETCH) ] = {
1556 [ C(RESULT_ACCESS) ] = 0,
1557 [ C(RESULT_MISS) ] = 0,
1562 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1563 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1566 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1567 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1569 [ C(OP_PREFETCH) ] = {
1570 [ C(RESULT_ACCESS) ] = 0,
1571 [ C(RESULT_MISS) ] = 0,
1576 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1577 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1580 [ C(RESULT_ACCESS) ] = -1,
1581 [ C(RESULT_MISS) ] = -1,
1583 [ C(OP_PREFETCH) ] = {
1584 [ C(RESULT_ACCESS) ] = -1,
1585 [ C(RESULT_MISS) ] = -1,
1590 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1591 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1594 [ C(RESULT_ACCESS) ] = -1,
1595 [ C(RESULT_MISS) ] = -1,
1597 [ C(OP_PREFETCH) ] = {
1598 [ C(RESULT_ACCESS) ] = -1,
1599 [ C(RESULT_MISS) ] = -1,
1604 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1605 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1606 /* no_alloc_cycles.not_delivered */
1607 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1608 "event=0xca,umask=0x50");
1609 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1610 /* uops_retired.all */
1611 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1612 "event=0xc2,umask=0x10");
1613 /* uops_retired.all */
1614 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1615 "event=0xc2,umask=0x10");
1617 static struct attribute *slm_events_attrs[] = {
1618 EVENT_PTR(td_total_slots_slm),
1619 EVENT_PTR(td_total_slots_scale_slm),
1620 EVENT_PTR(td_fetch_bubbles_slm),
1621 EVENT_PTR(td_fetch_bubbles_scale_slm),
1622 EVENT_PTR(td_slots_issued_slm),
1623 EVENT_PTR(td_slots_retired_slm),
1627 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1629 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1630 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1631 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1635 #define SLM_DMND_READ SNB_DMND_DATA_RD
1636 #define SLM_DMND_WRITE SNB_DMND_RFO
1637 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1639 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1640 #define SLM_LLC_ACCESS SNB_RESP_ANY
1641 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1643 static __initconst const u64 slm_hw_cache_extra_regs
1644 [PERF_COUNT_HW_CACHE_MAX]
1645 [PERF_COUNT_HW_CACHE_OP_MAX]
1646 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1650 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1651 [ C(RESULT_MISS) ] = 0,
1654 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1655 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1657 [ C(OP_PREFETCH) ] = {
1658 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1659 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1664 static __initconst const u64 slm_hw_cache_event_ids
1665 [PERF_COUNT_HW_CACHE_MAX]
1666 [PERF_COUNT_HW_CACHE_OP_MAX]
1667 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1671 [ C(RESULT_ACCESS) ] = 0,
1672 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
1675 [ C(RESULT_ACCESS) ] = 0,
1676 [ C(RESULT_MISS) ] = 0,
1678 [ C(OP_PREFETCH) ] = {
1679 [ C(RESULT_ACCESS) ] = 0,
1680 [ C(RESULT_MISS) ] = 0,
1685 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1686 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
1689 [ C(RESULT_ACCESS) ] = -1,
1690 [ C(RESULT_MISS) ] = -1,
1692 [ C(OP_PREFETCH) ] = {
1693 [ C(RESULT_ACCESS) ] = 0,
1694 [ C(RESULT_MISS) ] = 0,
1699 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1700 [ C(RESULT_ACCESS) ] = 0x01b7,
1701 [ C(RESULT_MISS) ] = 0,
1704 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1705 [ C(RESULT_ACCESS) ] = 0x01b7,
1706 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1707 [ C(RESULT_MISS) ] = 0x01b7,
1709 [ C(OP_PREFETCH) ] = {
1710 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1711 [ C(RESULT_ACCESS) ] = 0x01b7,
1712 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1713 [ C(RESULT_MISS) ] = 0x01b7,
1718 [ C(RESULT_ACCESS) ] = 0,
1719 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
1722 [ C(RESULT_ACCESS) ] = 0,
1723 [ C(RESULT_MISS) ] = 0,
1725 [ C(OP_PREFETCH) ] = {
1726 [ C(RESULT_ACCESS) ] = 0,
1727 [ C(RESULT_MISS) ] = 0,
1732 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1733 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1736 [ C(RESULT_ACCESS) ] = -1,
1737 [ C(RESULT_MISS) ] = -1,
1739 [ C(OP_PREFETCH) ] = {
1740 [ C(RESULT_ACCESS) ] = -1,
1741 [ C(RESULT_MISS) ] = -1,
1746 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1747 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1750 [ C(RESULT_ACCESS) ] = -1,
1751 [ C(RESULT_MISS) ] = -1,
1753 [ C(OP_PREFETCH) ] = {
1754 [ C(RESULT_ACCESS) ] = -1,
1755 [ C(RESULT_MISS) ] = -1,
1760 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1761 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1762 /* UOPS_NOT_DELIVERED.ANY */
1763 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1764 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1765 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1766 /* UOPS_RETIRED.ANY */
1767 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1768 /* UOPS_ISSUED.ANY */
1769 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1771 static struct attribute *glm_events_attrs[] = {
1772 EVENT_PTR(td_total_slots_glm),
1773 EVENT_PTR(td_total_slots_scale_glm),
1774 EVENT_PTR(td_fetch_bubbles_glm),
1775 EVENT_PTR(td_recovery_bubbles_glm),
1776 EVENT_PTR(td_slots_issued_glm),
1777 EVENT_PTR(td_slots_retired_glm),
1781 static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1782 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1783 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1784 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1788 #define GLM_DEMAND_DATA_RD BIT_ULL(0)
1789 #define GLM_DEMAND_RFO BIT_ULL(1)
1790 #define GLM_ANY_RESPONSE BIT_ULL(16)
1791 #define GLM_SNP_NONE_OR_MISS BIT_ULL(33)
1792 #define GLM_DEMAND_READ GLM_DEMAND_DATA_RD
1793 #define GLM_DEMAND_WRITE GLM_DEMAND_RFO
1794 #define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1795 #define GLM_LLC_ACCESS GLM_ANY_RESPONSE
1796 #define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1797 #define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM)
1799 static __initconst const u64 glm_hw_cache_event_ids
1800 [PERF_COUNT_HW_CACHE_MAX]
1801 [PERF_COUNT_HW_CACHE_OP_MAX]
1802 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1805 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1806 [C(RESULT_MISS)] = 0x0,
1809 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1810 [C(RESULT_MISS)] = 0x0,
1812 [C(OP_PREFETCH)] = {
1813 [C(RESULT_ACCESS)] = 0x0,
1814 [C(RESULT_MISS)] = 0x0,
1819 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1820 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1823 [C(RESULT_ACCESS)] = -1,
1824 [C(RESULT_MISS)] = -1,
1826 [C(OP_PREFETCH)] = {
1827 [C(RESULT_ACCESS)] = 0x0,
1828 [C(RESULT_MISS)] = 0x0,
1833 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1834 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1837 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1838 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1840 [C(OP_PREFETCH)] = {
1841 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1842 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1847 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1848 [C(RESULT_MISS)] = 0x0,
1851 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1852 [C(RESULT_MISS)] = 0x0,
1854 [C(OP_PREFETCH)] = {
1855 [C(RESULT_ACCESS)] = 0x0,
1856 [C(RESULT_MISS)] = 0x0,
1861 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1862 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1865 [C(RESULT_ACCESS)] = -1,
1866 [C(RESULT_MISS)] = -1,
1868 [C(OP_PREFETCH)] = {
1869 [C(RESULT_ACCESS)] = -1,
1870 [C(RESULT_MISS)] = -1,
1875 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1876 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1879 [C(RESULT_ACCESS)] = -1,
1880 [C(RESULT_MISS)] = -1,
1882 [C(OP_PREFETCH)] = {
1883 [C(RESULT_ACCESS)] = -1,
1884 [C(RESULT_MISS)] = -1,
1889 static __initconst const u64 glm_hw_cache_extra_regs
1890 [PERF_COUNT_HW_CACHE_MAX]
1891 [PERF_COUNT_HW_CACHE_OP_MAX]
1892 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1895 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1897 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1901 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1903 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1906 [C(OP_PREFETCH)] = {
1907 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH|
1909 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH|
1915 static __initconst const u64 glp_hw_cache_event_ids
1916 [PERF_COUNT_HW_CACHE_MAX]
1917 [PERF_COUNT_HW_CACHE_OP_MAX]
1918 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1921 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1922 [C(RESULT_MISS)] = 0x0,
1925 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1926 [C(RESULT_MISS)] = 0x0,
1928 [C(OP_PREFETCH)] = {
1929 [C(RESULT_ACCESS)] = 0x0,
1930 [C(RESULT_MISS)] = 0x0,
1935 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1936 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1939 [C(RESULT_ACCESS)] = -1,
1940 [C(RESULT_MISS)] = -1,
1942 [C(OP_PREFETCH)] = {
1943 [C(RESULT_ACCESS)] = 0x0,
1944 [C(RESULT_MISS)] = 0x0,
1949 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1950 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1953 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1954 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1956 [C(OP_PREFETCH)] = {
1957 [C(RESULT_ACCESS)] = 0x0,
1958 [C(RESULT_MISS)] = 0x0,
1963 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1964 [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
1967 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1968 [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
1970 [C(OP_PREFETCH)] = {
1971 [C(RESULT_ACCESS)] = 0x0,
1972 [C(RESULT_MISS)] = 0x0,
1977 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1978 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1981 [C(RESULT_ACCESS)] = -1,
1982 [C(RESULT_MISS)] = -1,
1984 [C(OP_PREFETCH)] = {
1985 [C(RESULT_ACCESS)] = -1,
1986 [C(RESULT_MISS)] = -1,
1991 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1992 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1995 [C(RESULT_ACCESS)] = -1,
1996 [C(RESULT_MISS)] = -1,
1998 [C(OP_PREFETCH)] = {
1999 [C(RESULT_ACCESS)] = -1,
2000 [C(RESULT_MISS)] = -1,
2005 static __initconst const u64 glp_hw_cache_extra_regs
2006 [PERF_COUNT_HW_CACHE_MAX]
2007 [PERF_COUNT_HW_CACHE_OP_MAX]
2008 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2011 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
2013 [C(RESULT_MISS)] = GLM_DEMAND_READ|
2017 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
2019 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
2022 [C(OP_PREFETCH)] = {
2023 [C(RESULT_ACCESS)] = 0x0,
2024 [C(RESULT_MISS)] = 0x0,
2029 #define TNT_LOCAL_DRAM BIT_ULL(26)
2030 #define TNT_DEMAND_READ GLM_DEMAND_DATA_RD
2031 #define TNT_DEMAND_WRITE GLM_DEMAND_RFO
2032 #define TNT_LLC_ACCESS GLM_ANY_RESPONSE
2033 #define TNT_SNP_ANY (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
2034 SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
2035 #define TNT_LLC_MISS (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
2037 static __initconst const u64 tnt_hw_cache_extra_regs
2038 [PERF_COUNT_HW_CACHE_MAX]
2039 [PERF_COUNT_HW_CACHE_OP_MAX]
2040 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2043 [C(RESULT_ACCESS)] = TNT_DEMAND_READ|
2045 [C(RESULT_MISS)] = TNT_DEMAND_READ|
2049 [C(RESULT_ACCESS)] = TNT_DEMAND_WRITE|
2051 [C(RESULT_MISS)] = TNT_DEMAND_WRITE|
2054 [C(OP_PREFETCH)] = {
2055 [C(RESULT_ACCESS)] = 0x0,
2056 [C(RESULT_MISS)] = 0x0,
2061 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_tnt, "event=0x71,umask=0x0");
2062 EVENT_ATTR_STR(topdown-retiring, td_retiring_tnt, "event=0xc2,umask=0x0");
2063 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_tnt, "event=0x73,umask=0x6");
2064 EVENT_ATTR_STR(topdown-be-bound, td_be_bound_tnt, "event=0x74,umask=0x0");
2066 static struct attribute *tnt_events_attrs[] = {
2067 EVENT_PTR(td_fe_bound_tnt),
2068 EVENT_PTR(td_retiring_tnt),
2069 EVENT_PTR(td_bad_spec_tnt),
2070 EVENT_PTR(td_be_bound_tnt),
2074 static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
2075 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2076 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0),
2077 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1),
2081 static struct extra_reg intel_grt_extra_regs[] __read_mostly = {
2082 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2083 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
2084 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
2085 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2089 #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
2090 #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
2091 #define KNL_MCDRAM_LOCAL BIT_ULL(21)
2092 #define KNL_MCDRAM_FAR BIT_ULL(22)
2093 #define KNL_DDR_LOCAL BIT_ULL(23)
2094 #define KNL_DDR_FAR BIT_ULL(24)
2095 #define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
2096 KNL_DDR_LOCAL | KNL_DDR_FAR)
2097 #define KNL_L2_READ SLM_DMND_READ
2098 #define KNL_L2_WRITE SLM_DMND_WRITE
2099 #define KNL_L2_PREFETCH SLM_DMND_PREFETCH
2100 #define KNL_L2_ACCESS SLM_LLC_ACCESS
2101 #define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
2102 KNL_DRAM_ANY | SNB_SNP_ANY | \
2105 static __initconst const u64 knl_hw_cache_extra_regs
2106 [PERF_COUNT_HW_CACHE_MAX]
2107 [PERF_COUNT_HW_CACHE_OP_MAX]
2108 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2111 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
2112 [C(RESULT_MISS)] = 0,
2115 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
2116 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS,
2118 [C(OP_PREFETCH)] = {
2119 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
2120 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS,
2126 * Used from PMIs where the LBRs are already disabled.
2128 * This function could be called consecutively. It is required to remain in
2129 * disabled state if called consecutively.
2131 * During consecutive calls, the same disable value will be written to related
2132 * registers, so the PMU state remains unchanged.
2134 * intel_bts events don't coexist with intel PMU's BTS events because of
2135 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
2136 * disabled around intel PMU's event batching etc, only inside the PMI handler.
2138 * Avoid PEBS_ENABLE MSR access in PMIs.
2139 * The GLOBAL_CTRL has been disabled. All the counters do not count anymore.
2140 * It doesn't matter if the PEBS is enabled or not.
2141 * Usually, the PEBS status are not changed in PMIs. It's unnecessary to
2142 * access PEBS_ENABLE MSR in disable_all()/enable_all().
2143 * However, there are some cases which may change PEBS status, e.g. PMI
2144 * throttle. The PEBS_ENABLE should be updated where the status changes.
2146 static __always_inline void __intel_pmu_disable_all(bool bts)
2148 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2150 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2152 if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
2153 intel_pmu_disable_bts();
2156 static __always_inline void intel_pmu_disable_all(void)
2158 __intel_pmu_disable_all(true);
2159 intel_pmu_pebs_disable_all();
2160 intel_pmu_lbr_disable_all();
2163 static void __intel_pmu_enable_all(int added, bool pmi)
2165 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2166 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2168 intel_pmu_lbr_enable_all(pmi);
2169 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
2170 intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
2172 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2173 struct perf_event *event =
2174 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
2176 if (WARN_ON_ONCE(!event))
2179 intel_pmu_enable_bts(event->hw.config);
2183 static void intel_pmu_enable_all(int added)
2185 intel_pmu_pebs_enable_all();
2186 __intel_pmu_enable_all(added, false);
2190 __intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries,
2191 unsigned int cnt, unsigned long flags)
2193 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2195 intel_pmu_lbr_read();
2196 cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr);
2198 memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt);
2199 intel_pmu_enable_all(0);
2200 local_irq_restore(flags);
2205 intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2207 unsigned long flags;
2209 /* must not have branches... */
2210 local_irq_save(flags);
2211 __intel_pmu_disable_all(false); /* we don't care about BTS */
2212 __intel_pmu_pebs_disable_all();
2213 __intel_pmu_lbr_disable();
2214 /* ... until here */
2215 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2219 intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2221 unsigned long flags;
2223 /* must not have branches... */
2224 local_irq_save(flags);
2225 __intel_pmu_disable_all(false); /* we don't care about BTS */
2226 __intel_pmu_pebs_disable_all();
2227 __intel_pmu_arch_lbr_disable();
2228 /* ... until here */
2229 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2234 * Intel Errata AAK100 (model 26)
2235 * Intel Errata AAP53 (model 30)
2236 * Intel Errata BD53 (model 44)
2238 * The official story:
2239 * These chips need to be 'reset' when adding counters by programming the
2240 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
2241 * in sequence on the same PMC or on different PMCs.
2243 * In practice it appears some of these events do in fact count, and
2244 * we need to program all 4 events.
2246 static void intel_pmu_nhm_workaround(void)
2248 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2249 static const unsigned long nhm_magic[4] = {
2255 struct perf_event *event;
2259 * The Errata requires below steps:
2260 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
2261 * 2) Configure 4 PERFEVTSELx with the magic events and clear
2262 * the corresponding PMCx;
2263 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
2264 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
2265 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
2269 * The real steps we choose are a little different from above.
2270 * A) To reduce MSR operations, we don't run step 1) as they
2271 * are already cleared before this function is called;
2272 * B) Call x86_perf_event_update to save PMCx before configuring
2273 * PERFEVTSELx with magic number;
2274 * C) With step 5), we do clear only when the PERFEVTSELx is
2275 * not used currently.
2276 * D) Call x86_perf_event_set_period to restore PMCx;
2279 /* We always operate 4 pairs of PERF Counters */
2280 for (i = 0; i < 4; i++) {
2281 event = cpuc->events[i];
2283 x86_perf_event_update(event);
2286 for (i = 0; i < 4; i++) {
2287 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
2288 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
2291 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
2292 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
2294 for (i = 0; i < 4; i++) {
2295 event = cpuc->events[i];
2298 x86_perf_event_set_period(event);
2299 __x86_pmu_enable_event(&event->hw,
2300 ARCH_PERFMON_EVENTSEL_ENABLE);
2302 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
2306 static void intel_pmu_nhm_enable_all(int added)
2309 intel_pmu_nhm_workaround();
2310 intel_pmu_enable_all(added);
2313 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2315 u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2317 if (cpuc->tfa_shadow != val) {
2318 cpuc->tfa_shadow = val;
2319 wrmsrl(MSR_TSX_FORCE_ABORT, val);
2323 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2326 * We're going to use PMC3, make sure TFA is set before we touch it.
2329 intel_set_tfa(cpuc, true);
2332 static void intel_tfa_pmu_enable_all(int added)
2334 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2337 * If we find PMC3 is no longer used when we enable the PMU, we can
2340 if (!test_bit(3, cpuc->active_mask))
2341 intel_set_tfa(cpuc, false);
2343 intel_pmu_enable_all(added);
2346 static inline u64 intel_pmu_get_status(void)
2350 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
2355 static inline void intel_pmu_ack_status(u64 ack)
2357 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2360 static inline bool event_is_checkpointed(struct perf_event *event)
2362 return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2365 static inline void intel_set_masks(struct perf_event *event, int idx)
2367 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2369 if (event->attr.exclude_host)
2370 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2371 if (event->attr.exclude_guest)
2372 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2373 if (event_is_checkpointed(event))
2374 __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2377 static inline void intel_clear_masks(struct perf_event *event, int idx)
2379 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2381 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2382 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2383 __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2386 static void intel_pmu_disable_fixed(struct perf_event *event)
2388 struct hw_perf_event *hwc = &event->hw;
2392 if (is_topdown_idx(idx)) {
2393 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2396 * When there are other active TopDown events,
2397 * don't disable the fixed counter 3.
2399 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2401 idx = INTEL_PMC_IDX_FIXED_SLOTS;
2404 intel_clear_masks(event, idx);
2406 mask = 0xfULL << ((idx - INTEL_PMC_IDX_FIXED) * 4);
2407 rdmsrl(hwc->config_base, ctrl_val);
2409 wrmsrl(hwc->config_base, ctrl_val);
2412 static void intel_pmu_disable_event(struct perf_event *event)
2414 struct hw_perf_event *hwc = &event->hw;
2418 case 0 ... INTEL_PMC_IDX_FIXED - 1:
2419 intel_clear_masks(event, idx);
2420 x86_pmu_disable_event(event);
2422 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2423 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2424 intel_pmu_disable_fixed(event);
2426 case INTEL_PMC_IDX_FIXED_BTS:
2427 intel_pmu_disable_bts();
2428 intel_pmu_drain_bts_buffer();
2430 case INTEL_PMC_IDX_FIXED_VLBR:
2431 intel_clear_masks(event, idx);
2434 intel_clear_masks(event, idx);
2435 pr_warn("Failed to disable the event with invalid index %d\n",
2441 * Needs to be called after x86_pmu_disable_event,
2442 * so we don't trigger the event without PEBS bit set.
2444 if (unlikely(event->attr.precise_ip))
2445 intel_pmu_pebs_disable(event);
2448 static void intel_pmu_del_event(struct perf_event *event)
2450 if (needs_branch_stack(event))
2451 intel_pmu_lbr_del(event);
2452 if (event->attr.precise_ip)
2453 intel_pmu_pebs_del(event);
2456 static int icl_set_topdown_event_period(struct perf_event *event)
2458 struct hw_perf_event *hwc = &event->hw;
2459 s64 left = local64_read(&hwc->period_left);
2462 * The values in PERF_METRICS MSR are derived from fixed counter 3.
2463 * Software should start both registers, PERF_METRICS and fixed
2464 * counter 3, from zero.
2465 * Clear PERF_METRICS and Fixed counter 3 in initialization.
2466 * After that, both MSRs will be cleared for each read.
2467 * Don't need to clear them again.
2469 if (left == x86_pmu.max_period) {
2470 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2471 wrmsrl(MSR_PERF_METRICS, 0);
2472 hwc->saved_slots = 0;
2473 hwc->saved_metric = 0;
2476 if ((hwc->saved_slots) && is_slots_event(event)) {
2477 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots);
2478 wrmsrl(MSR_PERF_METRICS, hwc->saved_metric);
2481 perf_event_update_userpage(event);
2486 static int adl_set_topdown_event_period(struct perf_event *event)
2488 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
2490 if (pmu->cpu_type != hybrid_big)
2493 return icl_set_topdown_event_period(event);
2496 static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx)
2501 * The metric is reported as an 8bit integer fraction
2502 * summing up to 0xff.
2503 * slots-in-metric = (Metric / 0xff) * slots
2505 val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff;
2506 return mul_u64_u32_div(slots, val, 0xff);
2509 static u64 icl_get_topdown_value(struct perf_event *event,
2510 u64 slots, u64 metrics)
2512 int idx = event->hw.idx;
2515 if (is_metric_idx(idx))
2516 delta = icl_get_metrics_event_value(metrics, slots, idx);
2523 static void __icl_update_topdown_event(struct perf_event *event,
2524 u64 slots, u64 metrics,
2525 u64 last_slots, u64 last_metrics)
2527 u64 delta, last = 0;
2529 delta = icl_get_topdown_value(event, slots, metrics);
2531 last = icl_get_topdown_value(event, last_slots, last_metrics);
2534 * The 8bit integer fraction of metric may be not accurate,
2535 * especially when the changes is very small.
2536 * For example, if only a few bad_spec happens, the fraction
2537 * may be reduced from 1 to 0. If so, the bad_spec event value
2538 * will be 0 which is definitely less than the last value.
2539 * Avoid update event->count for this case.
2543 local64_add(delta, &event->count);
2547 static void update_saved_topdown_regs(struct perf_event *event, u64 slots,
2548 u64 metrics, int metric_end)
2550 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2551 struct perf_event *other;
2554 event->hw.saved_slots = slots;
2555 event->hw.saved_metric = metrics;
2557 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2558 if (!is_topdown_idx(idx))
2560 other = cpuc->events[idx];
2561 other->hw.saved_slots = slots;
2562 other->hw.saved_metric = metrics;
2567 * Update all active Topdown events.
2569 * The PERF_METRICS and Fixed counter 3 are read separately. The values may be
2570 * modify by a NMI. PMU has to be disabled before calling this function.
2573 static u64 intel_update_topdown_event(struct perf_event *event, int metric_end)
2575 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2576 struct perf_event *other;
2581 /* read Fixed counter 3 */
2582 rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots);
2586 /* read PERF_METRICS */
2587 rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
2589 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2590 if (!is_topdown_idx(idx))
2592 other = cpuc->events[idx];
2593 __icl_update_topdown_event(other, slots, metrics,
2594 event ? event->hw.saved_slots : 0,
2595 event ? event->hw.saved_metric : 0);
2599 * Check and update this event, which may have been cleared
2600 * in active_mask e.g. x86_pmu_stop()
2602 if (event && !test_bit(event->hw.idx, cpuc->active_mask)) {
2603 __icl_update_topdown_event(event, slots, metrics,
2604 event->hw.saved_slots,
2605 event->hw.saved_metric);
2608 * In x86_pmu_stop(), the event is cleared in active_mask first,
2609 * then drain the delta, which indicates context switch for
2611 * Save metric and slots for context switch.
2612 * Don't need to reset the PERF_METRICS and Fixed counter 3.
2613 * Because the values will be restored in next schedule in.
2615 update_saved_topdown_regs(event, slots, metrics, metric_end);
2620 /* The fixed counter 3 has to be written before the PERF_METRICS. */
2621 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2622 wrmsrl(MSR_PERF_METRICS, 0);
2624 update_saved_topdown_regs(event, 0, 0, metric_end);
2630 static u64 icl_update_topdown_event(struct perf_event *event)
2632 return intel_update_topdown_event(event, INTEL_PMC_IDX_METRIC_BASE +
2633 x86_pmu.num_topdown_events - 1);
2636 static u64 adl_update_topdown_event(struct perf_event *event)
2638 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
2640 if (pmu->cpu_type != hybrid_big)
2643 return icl_update_topdown_event(event);
2647 static void intel_pmu_read_topdown_event(struct perf_event *event)
2649 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2651 /* Only need to call update_topdown_event() once for group read. */
2652 if ((cpuc->txn_flags & PERF_PMU_TXN_READ) &&
2653 !is_slots_event(event))
2656 perf_pmu_disable(event->pmu);
2657 x86_pmu.update_topdown_event(event);
2658 perf_pmu_enable(event->pmu);
2661 static void intel_pmu_read_event(struct perf_event *event)
2663 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2664 intel_pmu_auto_reload_read(event);
2665 else if (is_topdown_count(event) && x86_pmu.update_topdown_event)
2666 intel_pmu_read_topdown_event(event);
2668 x86_perf_event_update(event);
2671 static void intel_pmu_enable_fixed(struct perf_event *event)
2673 struct hw_perf_event *hwc = &event->hw;
2674 u64 ctrl_val, mask, bits = 0;
2677 if (is_topdown_idx(idx)) {
2678 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2680 * When there are other active TopDown events,
2681 * don't enable the fixed counter 3 again.
2683 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2686 idx = INTEL_PMC_IDX_FIXED_SLOTS;
2689 intel_set_masks(event, idx);
2692 * Enable IRQ generation (0x8), if not PEBS,
2693 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2696 if (!event->attr.precise_ip)
2698 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2700 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2704 * ANY bit is supported in v3 and up
2706 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2709 idx -= INTEL_PMC_IDX_FIXED;
2711 mask = 0xfULL << (idx * 4);
2713 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
2714 bits |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
2715 mask |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
2718 rdmsrl(hwc->config_base, ctrl_val);
2721 wrmsrl(hwc->config_base, ctrl_val);
2724 static void intel_pmu_enable_event(struct perf_event *event)
2726 struct hw_perf_event *hwc = &event->hw;
2729 if (unlikely(event->attr.precise_ip))
2730 intel_pmu_pebs_enable(event);
2733 case 0 ... INTEL_PMC_IDX_FIXED - 1:
2734 intel_set_masks(event, idx);
2735 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2737 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2738 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2739 intel_pmu_enable_fixed(event);
2741 case INTEL_PMC_IDX_FIXED_BTS:
2742 if (!__this_cpu_read(cpu_hw_events.enabled))
2744 intel_pmu_enable_bts(hwc->config);
2746 case INTEL_PMC_IDX_FIXED_VLBR:
2747 intel_set_masks(event, idx);
2750 pr_warn("Failed to enable the event with invalid index %d\n",
2755 static void intel_pmu_add_event(struct perf_event *event)
2757 if (event->attr.precise_ip)
2758 intel_pmu_pebs_add(event);
2759 if (needs_branch_stack(event))
2760 intel_pmu_lbr_add(event);
2764 * Save and restart an expired event. Called by NMI contexts,
2765 * so it has to be careful about preempting normal event ops:
2767 int intel_pmu_save_and_restart(struct perf_event *event)
2769 x86_perf_event_update(event);
2771 * For a checkpointed counter always reset back to 0. This
2772 * avoids a situation where the counter overflows, aborts the
2773 * transaction and is then set back to shortly before the
2774 * overflow, and overflows and aborts again.
2776 if (unlikely(event_is_checkpointed(event))) {
2777 /* No race with NMIs because the counter should not be armed */
2778 wrmsrl(event->hw.event_base, 0);
2779 local64_set(&event->hw.prev_count, 0);
2781 return x86_perf_event_set_period(event);
2784 static void intel_pmu_reset(void)
2786 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2787 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2788 int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
2789 int num_counters = hybrid(cpuc->pmu, num_counters);
2790 unsigned long flags;
2796 local_irq_save(flags);
2798 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2800 for (idx = 0; idx < num_counters; idx++) {
2801 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
2802 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
2804 for (idx = 0; idx < num_counters_fixed; idx++) {
2805 if (fixed_counter_disabled(idx, cpuc->pmu))
2807 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2811 ds->bts_index = ds->bts_buffer_base;
2813 /* Ack all overflows and disable fixed counters */
2814 if (x86_pmu.version >= 2) {
2815 intel_pmu_ack_status(intel_pmu_get_status());
2816 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2819 /* Reset LBRs and LBR freezing */
2820 if (x86_pmu.lbr_nr) {
2821 update_debugctlmsr(get_debugctlmsr() &
2822 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
2825 local_irq_restore(flags);
2828 static int handle_pmi_common(struct pt_regs *regs, u64 status)
2830 struct perf_sample_data data;
2831 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2834 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2836 inc_irq_stat(apic_perf_irqs);
2839 * Ignore a range of extra bits in status that do not indicate
2840 * overflow by themselves.
2842 status &= ~(GLOBAL_STATUS_COND_CHG |
2843 GLOBAL_STATUS_ASIF |
2844 GLOBAL_STATUS_LBRS_FROZEN);
2848 * In case multiple PEBS events are sampled at the same time,
2849 * it is possible to have GLOBAL_STATUS bit 62 set indicating
2850 * PEBS buffer overflow and also seeing at most 3 PEBS counters
2851 * having their bits set in the status register. This is a sign
2852 * that there was at least one PEBS record pending at the time
2853 * of the PMU interrupt. PEBS counters must only be processed
2854 * via the drain_pebs() calls and not via the regular sample
2855 * processing loop coming after that the function, otherwise
2856 * phony regular samples may be generated in the sampling buffer
2857 * not marked with the EXACT tag. Another possibility is to have
2858 * one PEBS event and at least one non-PEBS event which overflows
2859 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
2860 * not be set, yet the overflow status bit for the PEBS counter will
2863 * To avoid this problem, we systematically ignore the PEBS-enabled
2864 * counters from the GLOBAL_STATUS mask and we always process PEBS
2865 * events via drain_pebs().
2867 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
2868 status &= ~cpuc->pebs_enabled;
2870 status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
2873 * PEBS overflow sets bit 62 in the global status register
2875 if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) {
2876 u64 pebs_enabled = cpuc->pebs_enabled;
2879 x86_pmu.drain_pebs(regs, &data);
2880 status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
2883 * PMI throttle may be triggered, which stops the PEBS event.
2884 * Although cpuc->pebs_enabled is updated accordingly, the
2885 * MSR_IA32_PEBS_ENABLE is not updated. Because the
2886 * cpuc->enabled has been forced to 0 in PMI.
2887 * Update the MSR if pebs_enabled is changed.
2889 if (pebs_enabled != cpuc->pebs_enabled)
2890 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
2896 if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
2898 if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() &&
2899 perf_guest_cbs->handle_intel_pt_intr))
2900 perf_guest_cbs->handle_intel_pt_intr();
2902 intel_pt_interrupt();
2906 * Intel Perf metrics
2908 if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) {
2910 if (x86_pmu.update_topdown_event)
2911 x86_pmu.update_topdown_event(NULL);
2915 * Checkpointed counters can lead to 'spurious' PMIs because the
2916 * rollback caused by the PMI will have cleared the overflow status
2917 * bit. Therefore always force probe these counters.
2919 status |= cpuc->intel_cp_status;
2921 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2922 struct perf_event *event = cpuc->events[bit];
2926 if (!test_bit(bit, cpuc->active_mask))
2929 if (!intel_pmu_save_and_restart(event))
2932 perf_sample_data_init(&data, 0, event->hw.last_period);
2934 if (has_branch_stack(event))
2935 data.br_stack = &cpuc->lbr_stack;
2937 if (perf_event_overflow(event, &data, regs))
2938 x86_pmu_stop(event, 0);
2945 * This handler is triggered by the local APIC, so the APIC IRQ handling
2948 static int intel_pmu_handle_irq(struct pt_regs *regs)
2950 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2951 bool late_ack = hybrid_bit(cpuc->pmu, late_ack);
2952 bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack);
2959 * Save the PMU state.
2960 * It needs to be restored when leaving the handler.
2962 pmu_enabled = cpuc->enabled;
2964 * In general, the early ACK is only applied for old platforms.
2965 * For the big core starts from Haswell, the late ACK should be
2967 * For the small core after Tremont, we have to do the ACK right
2968 * before re-enabling counters, which is in the middle of the
2971 if (!late_ack && !mid_ack)
2972 apic_write(APIC_LVTPC, APIC_DM_NMI);
2973 intel_bts_disable_local();
2975 __intel_pmu_disable_all(true);
2976 handled = intel_pmu_drain_bts_buffer();
2977 handled += intel_bts_interrupt();
2978 status = intel_pmu_get_status();
2984 intel_pmu_lbr_read();
2985 intel_pmu_ack_status(status);
2986 if (++loops > 100) {
2990 WARN(1, "perfevents: irq loop stuck!\n");
2991 perf_event_print_debug();
2998 handled += handle_pmi_common(regs, status);
3001 * Repeat if there is more work to be done:
3003 status = intel_pmu_get_status();
3009 apic_write(APIC_LVTPC, APIC_DM_NMI);
3010 /* Only restore PMU state when it's active. See x86_pmu_disable(). */
3011 cpuc->enabled = pmu_enabled;
3013 __intel_pmu_enable_all(0, true);
3014 intel_bts_enable_local();
3017 * Only unmask the NMI after the overflow counters
3018 * have been reset. This avoids spurious NMIs on
3022 apic_write(APIC_LVTPC, APIC_DM_NMI);
3026 static struct event_constraint *
3027 intel_bts_constraints(struct perf_event *event)
3029 if (unlikely(intel_pmu_has_bts(event)))
3030 return &bts_constraint;
3036 * Note: matches a fake event, like Fixed2.
3038 static struct event_constraint *
3039 intel_vlbr_constraints(struct perf_event *event)
3041 struct event_constraint *c = &vlbr_constraint;
3043 if (unlikely(constraint_match(c, event->hw.config)))
3049 static int intel_alt_er(struct cpu_hw_events *cpuc,
3050 int idx, u64 config)
3052 struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs);
3055 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
3058 if (idx == EXTRA_REG_RSP_0)
3059 alt_idx = EXTRA_REG_RSP_1;
3061 if (idx == EXTRA_REG_RSP_1)
3062 alt_idx = EXTRA_REG_RSP_0;
3064 if (config & ~extra_regs[alt_idx].valid_mask)
3070 static void intel_fixup_er(struct perf_event *event, int idx)
3072 struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
3073 event->hw.extra_reg.idx = idx;
3075 if (idx == EXTRA_REG_RSP_0) {
3076 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3077 event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event;
3078 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
3079 } else if (idx == EXTRA_REG_RSP_1) {
3080 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3081 event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event;
3082 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
3087 * manage allocation of shared extra msr for certain events
3090 * per-cpu: to be shared between the various events on a single PMU
3091 * per-core: per-cpu + shared by HT threads
3093 static struct event_constraint *
3094 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
3095 struct perf_event *event,
3096 struct hw_perf_event_extra *reg)
3098 struct event_constraint *c = &emptyconstraint;
3099 struct er_account *era;
3100 unsigned long flags;
3104 * reg->alloc can be set due to existing state, so for fake cpuc we
3105 * need to ignore this, otherwise we might fail to allocate proper fake
3106 * state for this extra reg constraint. Also see the comment below.
3108 if (reg->alloc && !cpuc->is_fake)
3109 return NULL; /* call x86_get_event_constraint() */
3112 era = &cpuc->shared_regs->regs[idx];
3114 * we use spin_lock_irqsave() to avoid lockdep issues when
3115 * passing a fake cpuc
3117 raw_spin_lock_irqsave(&era->lock, flags);
3119 if (!atomic_read(&era->ref) || era->config == reg->config) {
3122 * If its a fake cpuc -- as per validate_{group,event}() we
3123 * shouldn't touch event state and we can avoid doing so
3124 * since both will only call get_event_constraints() once
3125 * on each event, this avoids the need for reg->alloc.
3127 * Not doing the ER fixup will only result in era->reg being
3128 * wrong, but since we won't actually try and program hardware
3129 * this isn't a problem either.
3131 if (!cpuc->is_fake) {
3132 if (idx != reg->idx)
3133 intel_fixup_er(event, idx);
3136 * x86_schedule_events() can call get_event_constraints()
3137 * multiple times on events in the case of incremental
3138 * scheduling(). reg->alloc ensures we only do the ER
3144 /* lock in msr value */
3145 era->config = reg->config;
3146 era->reg = reg->reg;
3149 atomic_inc(&era->ref);
3152 * need to call x86_get_event_constraint()
3153 * to check if associated event has constraints
3157 idx = intel_alt_er(cpuc, idx, reg->config);
3158 if (idx != reg->idx) {
3159 raw_spin_unlock_irqrestore(&era->lock, flags);
3163 raw_spin_unlock_irqrestore(&era->lock, flags);
3169 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
3170 struct hw_perf_event_extra *reg)
3172 struct er_account *era;
3175 * Only put constraint if extra reg was actually allocated. Also takes
3176 * care of event which do not use an extra shared reg.
3178 * Also, if this is a fake cpuc we shouldn't touch any event state
3179 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
3180 * either since it'll be thrown out.
3182 if (!reg->alloc || cpuc->is_fake)
3185 era = &cpuc->shared_regs->regs[reg->idx];
3187 /* one fewer user */
3188 atomic_dec(&era->ref);
3190 /* allocate again next time */
3194 static struct event_constraint *
3195 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
3196 struct perf_event *event)
3198 struct event_constraint *c = NULL, *d;
3199 struct hw_perf_event_extra *xreg, *breg;
3201 xreg = &event->hw.extra_reg;
3202 if (xreg->idx != EXTRA_REG_NONE) {
3203 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
3204 if (c == &emptyconstraint)
3207 breg = &event->hw.branch_reg;
3208 if (breg->idx != EXTRA_REG_NONE) {
3209 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
3210 if (d == &emptyconstraint) {
3211 __intel_shared_reg_put_constraints(cpuc, xreg);
3218 struct event_constraint *
3219 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3220 struct perf_event *event)
3222 struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints);
3223 struct event_constraint *c;
3225 if (event_constraints) {
3226 for_each_event_constraint(c, event_constraints) {
3227 if (constraint_match(c, event->hw.config)) {
3228 event->hw.flags |= c->flags;
3234 return &hybrid_var(cpuc->pmu, unconstrained);
3237 static struct event_constraint *
3238 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3239 struct perf_event *event)
3241 struct event_constraint *c;
3243 c = intel_vlbr_constraints(event);
3247 c = intel_bts_constraints(event);
3251 c = intel_shared_regs_constraints(cpuc, event);
3255 c = intel_pebs_constraints(event);
3259 return x86_get_event_constraints(cpuc, idx, event);
3263 intel_start_scheduling(struct cpu_hw_events *cpuc)
3265 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3266 struct intel_excl_states *xl;
3267 int tid = cpuc->excl_thread_id;
3270 * nothing needed if in group validation mode
3272 if (cpuc->is_fake || !is_ht_workaround_enabled())
3276 * no exclusion needed
3278 if (WARN_ON_ONCE(!excl_cntrs))
3281 xl = &excl_cntrs->states[tid];
3283 xl->sched_started = true;
3285 * lock shared state until we are done scheduling
3286 * in stop_event_scheduling()
3287 * makes scheduling appear as a transaction
3289 raw_spin_lock(&excl_cntrs->lock);
3292 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
3294 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3295 struct event_constraint *c = cpuc->event_constraint[idx];
3296 struct intel_excl_states *xl;
3297 int tid = cpuc->excl_thread_id;
3299 if (cpuc->is_fake || !is_ht_workaround_enabled())
3302 if (WARN_ON_ONCE(!excl_cntrs))
3305 if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
3308 xl = &excl_cntrs->states[tid];
3310 lockdep_assert_held(&excl_cntrs->lock);
3312 if (c->flags & PERF_X86_EVENT_EXCL)
3313 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
3315 xl->state[cntr] = INTEL_EXCL_SHARED;
3319 intel_stop_scheduling(struct cpu_hw_events *cpuc)
3321 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3322 struct intel_excl_states *xl;
3323 int tid = cpuc->excl_thread_id;
3326 * nothing needed if in group validation mode
3328 if (cpuc->is_fake || !is_ht_workaround_enabled())
3331 * no exclusion needed
3333 if (WARN_ON_ONCE(!excl_cntrs))
3336 xl = &excl_cntrs->states[tid];
3338 xl->sched_started = false;
3340 * release shared state lock (acquired in intel_start_scheduling())
3342 raw_spin_unlock(&excl_cntrs->lock);
3345 static struct event_constraint *
3346 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
3348 WARN_ON_ONCE(!cpuc->constraint_list);
3350 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
3351 struct event_constraint *cx;
3354 * grab pre-allocated constraint entry
3356 cx = &cpuc->constraint_list[idx];
3359 * initialize dynamic constraint
3360 * with static constraint
3365 * mark constraint as dynamic
3367 cx->flags |= PERF_X86_EVENT_DYNAMIC;
3374 static struct event_constraint *
3375 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
3376 int idx, struct event_constraint *c)
3378 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3379 struct intel_excl_states *xlo;
3380 int tid = cpuc->excl_thread_id;
3384 * validating a group does not require
3385 * enforcing cross-thread exclusion
3387 if (cpuc->is_fake || !is_ht_workaround_enabled())
3391 * no exclusion needed
3393 if (WARN_ON_ONCE(!excl_cntrs))
3397 * because we modify the constraint, we need
3398 * to make a copy. Static constraints come
3399 * from static const tables.
3401 * only needed when constraint has not yet
3402 * been cloned (marked dynamic)
3404 c = dyn_constraint(cpuc, c, idx);
3407 * From here on, the constraint is dynamic.
3408 * Either it was just allocated above, or it
3409 * was allocated during a earlier invocation
3414 * state of sibling HT
3416 xlo = &excl_cntrs->states[tid ^ 1];
3419 * event requires exclusive counter access
3422 is_excl = c->flags & PERF_X86_EVENT_EXCL;
3423 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
3424 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
3425 if (!cpuc->n_excl++)
3426 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
3430 * Modify static constraint with current dynamic
3433 * EXCLUSIVE: sibling counter measuring exclusive event
3434 * SHARED : sibling counter measuring non-exclusive event
3435 * UNUSED : sibling counter unused
3438 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
3440 * exclusive event in sibling counter
3441 * our corresponding counter cannot be used
3442 * regardless of our event
3444 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
3445 __clear_bit(i, c->idxmsk);
3450 * if measuring an exclusive event, sibling
3451 * measuring non-exclusive, then counter cannot
3454 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
3455 __clear_bit(i, c->idxmsk);
3462 * if we return an empty mask, then switch
3463 * back to static empty constraint to avoid
3464 * the cost of freeing later on
3467 c = &emptyconstraint;
3474 static struct event_constraint *
3475 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3476 struct perf_event *event)
3478 struct event_constraint *c1, *c2;
3480 c1 = cpuc->event_constraint[idx];
3484 * - static constraint: no change across incremental scheduling calls
3485 * - dynamic constraint: handled by intel_get_excl_constraints()
3487 c2 = __intel_get_event_constraints(cpuc, idx, event);
3489 WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
3490 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
3491 c1->weight = c2->weight;
3495 if (cpuc->excl_cntrs)
3496 return intel_get_excl_constraints(cpuc, event, idx, c2);
3501 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
3502 struct perf_event *event)
3504 struct hw_perf_event *hwc = &event->hw;
3505 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3506 int tid = cpuc->excl_thread_id;
3507 struct intel_excl_states *xl;
3510 * nothing needed if in group validation mode
3515 if (WARN_ON_ONCE(!excl_cntrs))
3518 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
3519 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
3520 if (!--cpuc->n_excl)
3521 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
3525 * If event was actually assigned, then mark the counter state as
3528 if (hwc->idx >= 0) {
3529 xl = &excl_cntrs->states[tid];
3532 * put_constraint may be called from x86_schedule_events()
3533 * which already has the lock held so here make locking
3536 if (!xl->sched_started)
3537 raw_spin_lock(&excl_cntrs->lock);
3539 xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
3541 if (!xl->sched_started)
3542 raw_spin_unlock(&excl_cntrs->lock);
3547 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
3548 struct perf_event *event)
3550 struct hw_perf_event_extra *reg;
3552 reg = &event->hw.extra_reg;
3553 if (reg->idx != EXTRA_REG_NONE)
3554 __intel_shared_reg_put_constraints(cpuc, reg);
3556 reg = &event->hw.branch_reg;
3557 if (reg->idx != EXTRA_REG_NONE)
3558 __intel_shared_reg_put_constraints(cpuc, reg);
3561 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
3562 struct perf_event *event)
3564 intel_put_shared_regs_event_constraints(cpuc, event);
3567 * is PMU has exclusive counter restrictions, then
3568 * all events are subject to and must call the
3569 * put_excl_constraints() routine
3571 if (cpuc->excl_cntrs)
3572 intel_put_excl_constraints(cpuc, event);
3575 static void intel_pebs_aliases_core2(struct perf_event *event)
3577 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3579 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3580 * (0x003c) so that we can use it with PEBS.
3582 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3583 * PEBS capable. However we can use INST_RETIRED.ANY_P
3584 * (0x00c0), which is a PEBS capable event, to get the same
3587 * INST_RETIRED.ANY_P counts the number of cycles that retires
3588 * CNTMASK instructions. By setting CNTMASK to a value (16)
3589 * larger than the maximum number of instructions that can be
3590 * retired per cycle (4) and then inverting the condition, we
3591 * count all cycles that retire 16 or less instructions, which
3594 * Thereby we gain a PEBS capable cycle counter.
3596 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
3598 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3599 event->hw.config = alt_config;
3603 static void intel_pebs_aliases_snb(struct perf_event *event)
3605 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3607 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3608 * (0x003c) so that we can use it with PEBS.
3610 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3611 * PEBS capable. However we can use UOPS_RETIRED.ALL
3612 * (0x01c2), which is a PEBS capable event, to get the same
3615 * UOPS_RETIRED.ALL counts the number of cycles that retires
3616 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
3617 * larger than the maximum number of micro-ops that can be
3618 * retired per cycle (4) and then inverting the condition, we
3619 * count all cycles that retire 16 or less micro-ops, which
3622 * Thereby we gain a PEBS capable cycle counter.
3624 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
3626 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3627 event->hw.config = alt_config;
3631 static void intel_pebs_aliases_precdist(struct perf_event *event)
3633 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3635 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3636 * (0x003c) so that we can use it with PEBS.
3638 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3639 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
3640 * (0x01c0), which is a PEBS capable event, to get the same
3643 * The PREC_DIST event has special support to minimize sample
3644 * shadowing effects. One drawback is that it can be
3645 * only programmed on counter 1, but that seems like an
3646 * acceptable trade off.
3648 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
3650 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3651 event->hw.config = alt_config;
3655 static void intel_pebs_aliases_ivb(struct perf_event *event)
3657 if (event->attr.precise_ip < 3)
3658 return intel_pebs_aliases_snb(event);
3659 return intel_pebs_aliases_precdist(event);
3662 static void intel_pebs_aliases_skl(struct perf_event *event)
3664 if (event->attr.precise_ip < 3)
3665 return intel_pebs_aliases_core2(event);
3666 return intel_pebs_aliases_precdist(event);
3669 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3671 unsigned long flags = x86_pmu.large_pebs_flags;
3673 if (event->attr.use_clockid)
3674 flags &= ~PERF_SAMPLE_TIME;
3675 if (!event->attr.exclude_kernel)
3676 flags &= ~PERF_SAMPLE_REGS_USER;
3677 if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
3678 flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
3682 static int intel_pmu_bts_config(struct perf_event *event)
3684 struct perf_event_attr *attr = &event->attr;
3686 if (unlikely(intel_pmu_has_bts(event))) {
3687 /* BTS is not supported by this architecture. */
3688 if (!x86_pmu.bts_active)
3691 /* BTS is currently only allowed for user-mode. */
3692 if (!attr->exclude_kernel)
3695 /* BTS is not allowed for precise events. */
3696 if (attr->precise_ip)
3699 /* disallow bts if conflicting events are present */
3700 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3703 event->destroy = hw_perf_lbr_event_destroy;
3709 static int core_pmu_hw_config(struct perf_event *event)
3711 int ret = x86_pmu_hw_config(event);
3716 return intel_pmu_bts_config(event);
3719 #define INTEL_TD_METRIC_AVAILABLE_MAX (INTEL_TD_METRIC_RETIRING + \
3720 ((x86_pmu.num_topdown_events - 1) << 8))
3722 static bool is_available_metric_event(struct perf_event *event)
3724 return is_metric_event(event) &&
3725 event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX;
3728 static inline bool is_mem_loads_event(struct perf_event *event)
3730 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xcd, .umask=0x01);
3733 static inline bool is_mem_loads_aux_event(struct perf_event *event)
3735 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82);
3738 static inline bool require_mem_loads_aux_event(struct perf_event *event)
3740 if (!(x86_pmu.flags & PMU_FL_MEM_LOADS_AUX))
3744 return hybrid_pmu(event->pmu)->cpu_type == hybrid_big;
3749 static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
3751 union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap);
3753 return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
3756 static int intel_pmu_hw_config(struct perf_event *event)
3758 int ret = x86_pmu_hw_config(event);
3763 ret = intel_pmu_bts_config(event);
3767 if (event->attr.precise_ip) {
3768 if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
3771 if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
3772 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
3773 if (!(event->attr.sample_type &
3774 ~intel_pmu_large_pebs_flags(event))) {
3775 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
3776 event->attach_state |= PERF_ATTACH_SCHED_CB;
3779 if (x86_pmu.pebs_aliases)
3780 x86_pmu.pebs_aliases(event);
3782 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3783 event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
3786 if (needs_branch_stack(event)) {
3787 ret = intel_pmu_setup_lbr_filter(event);
3790 event->attach_state |= PERF_ATTACH_SCHED_CB;
3793 * BTS is set up earlier in this path, so don't account twice
3795 if (!unlikely(intel_pmu_has_bts(event))) {
3796 /* disallow lbr if conflicting events are present */
3797 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3800 event->destroy = hw_perf_lbr_event_destroy;
3804 if (event->attr.aux_output) {
3805 if (!event->attr.precise_ip)
3808 event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT;
3811 if ((event->attr.type == PERF_TYPE_HARDWARE) ||
3812 (event->attr.type == PERF_TYPE_HW_CACHE))
3816 * Config Topdown slots and metric events
3818 * The slots event on Fixed Counter 3 can support sampling,
3819 * which will be handled normally in x86_perf_event_update().
3821 * Metric events don't support sampling and require being paired
3822 * with a slots event as group leader. When the slots event
3823 * is used in a metrics group, it too cannot support sampling.
3825 if (intel_pmu_has_cap(event, PERF_CAP_METRICS_IDX) && is_topdown_event(event)) {
3826 if (event->attr.config1 || event->attr.config2)
3830 * The TopDown metrics events and slots event don't
3831 * support any filters.
3833 if (event->attr.config & X86_ALL_EVENT_FLAGS)
3836 if (is_available_metric_event(event)) {
3837 struct perf_event *leader = event->group_leader;
3839 /* The metric events don't support sampling. */
3840 if (is_sampling_event(event))
3843 /* The metric events require a slots group leader. */
3844 if (!is_slots_event(leader))
3848 * The leader/SLOTS must not be a sampling event for
3849 * metric use; hardware requires it starts at 0 when used
3850 * in conjunction with MSR_PERF_METRICS.
3852 if (is_sampling_event(leader))
3855 event->event_caps |= PERF_EV_CAP_SIBLING;
3857 * Only once we have a METRICs sibling do we
3858 * need TopDown magic.
3860 leader->hw.flags |= PERF_X86_EVENT_TOPDOWN;
3861 event->hw.flags |= PERF_X86_EVENT_TOPDOWN;
3866 * The load latency event X86_CONFIG(.event=0xcd, .umask=0x01) on SPR
3867 * doesn't function quite right. As a work-around it needs to always be
3868 * co-scheduled with a auxiliary event X86_CONFIG(.event=0x03, .umask=0x82).
3869 * The actual count of this second event is irrelevant it just needs
3870 * to be active to make the first event function correctly.
3872 * In a group, the auxiliary event must be in front of the load latency
3873 * event. The rule is to simplify the implementation of the check.
3874 * That's because perf cannot have a complete group at the moment.
3876 if (require_mem_loads_aux_event(event) &&
3877 (event->attr.sample_type & PERF_SAMPLE_DATA_SRC) &&
3878 is_mem_loads_event(event)) {
3879 struct perf_event *leader = event->group_leader;
3880 struct perf_event *sibling = NULL;
3882 if (!is_mem_loads_aux_event(leader)) {
3883 for_each_sibling_event(sibling, leader) {
3884 if (is_mem_loads_aux_event(sibling))
3887 if (list_entry_is_head(sibling, &leader->sibling_list, sibling_list))
3892 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
3895 if (x86_pmu.version < 3)
3898 ret = perf_allow_cpu(&event->attr);
3902 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
3907 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
3909 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3910 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3911 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
3913 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
3914 arr[0].host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
3915 arr[0].guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask;
3916 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
3917 arr[0].guest &= ~cpuc->pebs_enabled;
3919 arr[0].guest &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
3922 if (x86_pmu.pebs && x86_pmu.pebs_no_isolation) {
3924 * If PMU counter has PEBS enabled it is not enough to
3925 * disable counter on a guest entry since PEBS memory
3926 * write can overshoot guest entry and corrupt guest
3927 * memory. Disabling PEBS solves the problem.
3929 * Don't do this if the CPU already enforces it.
3931 arr[1].msr = MSR_IA32_PEBS_ENABLE;
3932 arr[1].host = cpuc->pebs_enabled;
3940 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
3942 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3943 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3946 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3947 struct perf_event *event = cpuc->events[idx];
3949 arr[idx].msr = x86_pmu_config_addr(idx);
3950 arr[idx].host = arr[idx].guest = 0;
3952 if (!test_bit(idx, cpuc->active_mask))
3955 arr[idx].host = arr[idx].guest =
3956 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
3958 if (event->attr.exclude_host)
3959 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3960 else if (event->attr.exclude_guest)
3961 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3964 *nr = x86_pmu.num_counters;
3968 static void core_pmu_enable_event(struct perf_event *event)
3970 if (!event->attr.exclude_host)
3971 x86_pmu_enable_event(event);
3974 static void core_pmu_enable_all(int added)
3976 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3979 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3980 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
3982 if (!test_bit(idx, cpuc->active_mask) ||
3983 cpuc->events[idx]->attr.exclude_host)
3986 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
3990 static int hsw_hw_config(struct perf_event *event)
3992 int ret = intel_pmu_hw_config(event);
3996 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
3998 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
4001 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
4002 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
4005 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
4006 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
4007 event->attr.precise_ip > 0))
4010 if (event_is_checkpointed(event)) {
4012 * Sampling of checkpointed events can cause situations where
4013 * the CPU constantly aborts because of a overflow, which is
4014 * then checkpointed back and ignored. Forbid checkpointing
4017 * But still allow a long sampling period, so that perf stat
4020 if (event->attr.sample_period > 0 &&
4021 event->attr.sample_period < 0x7fffffff)
4027 static struct event_constraint counter0_constraint =
4028 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
4030 static struct event_constraint counter2_constraint =
4031 EVENT_CONSTRAINT(0, 0x4, 0);
4033 static struct event_constraint fixed0_constraint =
4034 FIXED_EVENT_CONSTRAINT(0x00c0, 0);
4036 static struct event_constraint fixed0_counter0_constraint =
4037 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
4039 static struct event_constraint *
4040 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4041 struct perf_event *event)
4043 struct event_constraint *c;
4045 c = intel_get_event_constraints(cpuc, idx, event);
4047 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
4048 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
4049 if (c->idxmsk64 & (1U << 2))
4050 return &counter2_constraint;
4051 return &emptyconstraint;
4057 static struct event_constraint *
4058 icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4059 struct perf_event *event)
4062 * Fixed counter 0 has less skid.
4063 * Force instruction:ppp in Fixed counter 0
4065 if ((event->attr.precise_ip == 3) &&
4066 constraint_match(&fixed0_constraint, event->hw.config))
4067 return &fixed0_constraint;
4069 return hsw_get_event_constraints(cpuc, idx, event);
4072 static struct event_constraint *
4073 spr_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4074 struct perf_event *event)
4076 struct event_constraint *c;
4078 c = icl_get_event_constraints(cpuc, idx, event);
4081 * The :ppp indicates the Precise Distribution (PDist) facility, which
4082 * is only supported on the GP counter 0. If a :ppp event which is not
4083 * available on the GP counter 0, error out.
4084 * Exception: Instruction PDIR is only available on the fixed counter 0.
4086 if ((event->attr.precise_ip == 3) &&
4087 !constraint_match(&fixed0_constraint, event->hw.config)) {
4088 if (c->idxmsk64 & BIT_ULL(0))
4089 return &counter0_constraint;
4091 return &emptyconstraint;
4097 static struct event_constraint *
4098 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4099 struct perf_event *event)
4101 struct event_constraint *c;
4103 /* :ppp means to do reduced skid PEBS which is PMC0 only. */
4104 if (event->attr.precise_ip == 3)
4105 return &counter0_constraint;
4107 c = intel_get_event_constraints(cpuc, idx, event);
4112 static struct event_constraint *
4113 tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4114 struct perf_event *event)
4116 struct event_constraint *c;
4119 * :ppp means to do reduced skid PEBS,
4120 * which is available on PMC0 and fixed counter 0.
4122 if (event->attr.precise_ip == 3) {
4123 /* Force instruction:ppp on PMC0 and Fixed counter 0 */
4124 if (constraint_match(&fixed0_constraint, event->hw.config))
4125 return &fixed0_counter0_constraint;
4127 return &counter0_constraint;
4130 c = intel_get_event_constraints(cpuc, idx, event);
4135 static bool allow_tsx_force_abort = true;
4137 static struct event_constraint *
4138 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4139 struct perf_event *event)
4141 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
4144 * Without TFA we must not use PMC3.
4146 if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
4147 c = dyn_constraint(cpuc, c, idx);
4148 c->idxmsk64 &= ~(1ULL << 3);
4155 static struct event_constraint *
4156 adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4157 struct perf_event *event)
4159 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4161 if (pmu->cpu_type == hybrid_big)
4162 return spr_get_event_constraints(cpuc, idx, event);
4163 else if (pmu->cpu_type == hybrid_small)
4164 return tnt_get_event_constraints(cpuc, idx, event);
4167 return &emptyconstraint;
4170 static int adl_hw_config(struct perf_event *event)
4172 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4174 if (pmu->cpu_type == hybrid_big)
4175 return hsw_hw_config(event);
4176 else if (pmu->cpu_type == hybrid_small)
4177 return intel_pmu_hw_config(event);
4183 static u8 adl_get_hybrid_cpu_type(void)
4191 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
4192 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
4193 * the two to enforce a minimum period of 128 (the smallest value that has bits
4194 * 0-5 cleared and >= 100).
4196 * Because of how the code in x86_perf_event_set_period() works, the truncation
4197 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
4198 * to make up for the 'lost' events due to carrying the 'error' in period_left.
4200 * Therefore the effective (average) period matches the requested period,
4201 * despite coarser hardware granularity.
4203 static u64 bdw_limit_period(struct perf_event *event, u64 left)
4205 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
4206 X86_CONFIG(.event=0xc0, .umask=0x01)) {
4214 static u64 nhm_limit_period(struct perf_event *event, u64 left)
4216 return max(left, 32ULL);
4219 static u64 spr_limit_period(struct perf_event *event, u64 left)
4221 if (event->attr.precise_ip == 3)
4222 return max(left, 128ULL);
4227 PMU_FORMAT_ATTR(event, "config:0-7" );
4228 PMU_FORMAT_ATTR(umask, "config:8-15" );
4229 PMU_FORMAT_ATTR(edge, "config:18" );
4230 PMU_FORMAT_ATTR(pc, "config:19" );
4231 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
4232 PMU_FORMAT_ATTR(inv, "config:23" );
4233 PMU_FORMAT_ATTR(cmask, "config:24-31" );
4234 PMU_FORMAT_ATTR(in_tx, "config:32");
4235 PMU_FORMAT_ATTR(in_tx_cp, "config:33");
4237 static struct attribute *intel_arch_formats_attr[] = {
4238 &format_attr_event.attr,
4239 &format_attr_umask.attr,
4240 &format_attr_edge.attr,
4241 &format_attr_pc.attr,
4242 &format_attr_inv.attr,
4243 &format_attr_cmask.attr,
4247 ssize_t intel_event_sysfs_show(char *page, u64 config)
4249 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
4251 return x86_event_sysfs_show(page, config, event);
4254 static struct intel_shared_regs *allocate_shared_regs(int cpu)
4256 struct intel_shared_regs *regs;
4259 regs = kzalloc_node(sizeof(struct intel_shared_regs),
4260 GFP_KERNEL, cpu_to_node(cpu));
4263 * initialize the locks to keep lockdep happy
4265 for (i = 0; i < EXTRA_REG_MAX; i++)
4266 raw_spin_lock_init(®s->regs[i].lock);
4273 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
4275 struct intel_excl_cntrs *c;
4277 c = kzalloc_node(sizeof(struct intel_excl_cntrs),
4278 GFP_KERNEL, cpu_to_node(cpu));
4280 raw_spin_lock_init(&c->lock);
4287 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
4289 cpuc->pebs_record_size = x86_pmu.pebs_record_size;
4291 if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
4292 cpuc->shared_regs = allocate_shared_regs(cpu);
4293 if (!cpuc->shared_regs)
4297 if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
4298 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
4300 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
4301 if (!cpuc->constraint_list)
4302 goto err_shared_regs;
4305 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4306 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
4307 if (!cpuc->excl_cntrs)
4308 goto err_constraint_list;
4310 cpuc->excl_thread_id = 0;
4315 err_constraint_list:
4316 kfree(cpuc->constraint_list);
4317 cpuc->constraint_list = NULL;
4320 kfree(cpuc->shared_regs);
4321 cpuc->shared_regs = NULL;
4327 static int intel_pmu_cpu_prepare(int cpu)
4329 return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
4332 static void flip_smm_bit(void *data)
4334 unsigned long set = *(unsigned long *)data;
4337 msr_set_bit(MSR_IA32_DEBUGCTLMSR,
4338 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
4340 msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
4341 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
4345 static bool init_hybrid_pmu(int cpu)
4347 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4348 u8 cpu_type = get_this_hybrid_cpu_type();
4349 struct x86_hybrid_pmu *pmu = NULL;
4352 if (!cpu_type && x86_pmu.get_hybrid_cpu_type)
4353 cpu_type = x86_pmu.get_hybrid_cpu_type();
4355 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
4356 if (x86_pmu.hybrid_pmu[i].cpu_type == cpu_type) {
4357 pmu = &x86_pmu.hybrid_pmu[i];
4361 if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) {
4366 /* Only check and dump the PMU information for the first CPU */
4367 if (!cpumask_empty(&pmu->supported_cpus))
4370 if (!check_hw_exists(&pmu->pmu, pmu->num_counters, pmu->num_counters_fixed))
4373 pr_info("%s PMU driver: ", pmu->name);
4375 if (pmu->intel_cap.pebs_output_pt_available)
4376 pr_cont("PEBS-via-PT ");
4380 x86_pmu_show_pmu_cap(pmu->num_counters, pmu->num_counters_fixed,
4384 cpumask_set_cpu(cpu, &pmu->supported_cpus);
4385 cpuc->pmu = &pmu->pmu;
4387 x86_pmu_update_cpu_context(&pmu->pmu, cpu);
4392 static void intel_pmu_cpu_starting(int cpu)
4394 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4395 int core_id = topology_core_id(cpu);
4398 if (is_hybrid() && !init_hybrid_pmu(cpu))
4401 init_debug_store_on_cpu(cpu);
4403 * Deal with CPUs that don't clear their LBRs on power-up.
4405 intel_pmu_lbr_reset();
4407 cpuc->lbr_sel = NULL;
4409 if (x86_pmu.flags & PMU_FL_TFA) {
4410 WARN_ON_ONCE(cpuc->tfa_shadow);
4411 cpuc->tfa_shadow = ~0ULL;
4412 intel_set_tfa(cpuc, false);
4415 if (x86_pmu.version > 1)
4416 flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
4419 * Disable perf metrics if any added CPU doesn't support it.
4421 * Turn off the check for a hybrid architecture, because the
4422 * architecture MSR, MSR_IA32_PERF_CAPABILITIES, only indicate
4423 * the architecture features. The perf metrics is a model-specific
4424 * feature for now. The corresponding bit should always be 0 on
4425 * a hybrid platform, e.g., Alder Lake.
4427 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) {
4428 union perf_capabilities perf_cap;
4430 rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
4431 if (!perf_cap.perf_metrics) {
4432 x86_pmu.intel_cap.perf_metrics = 0;
4433 x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
4437 if (!cpuc->shared_regs)
4440 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
4441 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
4442 struct intel_shared_regs *pc;
4444 pc = per_cpu(cpu_hw_events, i).shared_regs;
4445 if (pc && pc->core_id == core_id) {
4446 cpuc->kfree_on_online[0] = cpuc->shared_regs;
4447 cpuc->shared_regs = pc;
4451 cpuc->shared_regs->core_id = core_id;
4452 cpuc->shared_regs->refcnt++;
4455 if (x86_pmu.lbr_sel_map)
4456 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
4458 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4459 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
4460 struct cpu_hw_events *sibling;
4461 struct intel_excl_cntrs *c;
4463 sibling = &per_cpu(cpu_hw_events, i);
4464 c = sibling->excl_cntrs;
4465 if (c && c->core_id == core_id) {
4466 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
4467 cpuc->excl_cntrs = c;
4468 if (!sibling->excl_thread_id)
4469 cpuc->excl_thread_id = 1;
4473 cpuc->excl_cntrs->core_id = core_id;
4474 cpuc->excl_cntrs->refcnt++;
4478 static void free_excl_cntrs(struct cpu_hw_events *cpuc)
4480 struct intel_excl_cntrs *c;
4482 c = cpuc->excl_cntrs;
4484 if (c->core_id == -1 || --c->refcnt == 0)
4486 cpuc->excl_cntrs = NULL;
4489 kfree(cpuc->constraint_list);
4490 cpuc->constraint_list = NULL;
4493 static void intel_pmu_cpu_dying(int cpu)
4495 fini_debug_store_on_cpu(cpu);
4498 void intel_cpuc_finish(struct cpu_hw_events *cpuc)
4500 struct intel_shared_regs *pc;
4502 pc = cpuc->shared_regs;
4504 if (pc->core_id == -1 || --pc->refcnt == 0)
4506 cpuc->shared_regs = NULL;
4509 free_excl_cntrs(cpuc);
4512 static void intel_pmu_cpu_dead(int cpu)
4514 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4516 intel_cpuc_finish(cpuc);
4518 if (is_hybrid() && cpuc->pmu)
4519 cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus);
4522 static void intel_pmu_sched_task(struct perf_event_context *ctx,
4525 intel_pmu_pebs_sched_task(ctx, sched_in);
4526 intel_pmu_lbr_sched_task(ctx, sched_in);
4529 static void intel_pmu_swap_task_ctx(struct perf_event_context *prev,
4530 struct perf_event_context *next)
4532 intel_pmu_lbr_swap_task_ctx(prev, next);
4535 static int intel_pmu_check_period(struct perf_event *event, u64 value)
4537 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
4540 static int intel_pmu_aux_output_match(struct perf_event *event)
4542 if (!x86_pmu.intel_cap.pebs_output_pt_available)
4545 return is_intel_pt_event(event);
4548 static int intel_pmu_filter_match(struct perf_event *event)
4550 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4551 unsigned int cpu = smp_processor_id();
4553 return cpumask_test_cpu(cpu, &pmu->supported_cpus);
4556 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
4558 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
4560 PMU_FORMAT_ATTR(frontend, "config1:0-23");
4562 static struct attribute *intel_arch3_formats_attr[] = {
4563 &format_attr_event.attr,
4564 &format_attr_umask.attr,
4565 &format_attr_edge.attr,
4566 &format_attr_pc.attr,
4567 &format_attr_any.attr,
4568 &format_attr_inv.attr,
4569 &format_attr_cmask.attr,
4573 static struct attribute *hsw_format_attr[] = {
4574 &format_attr_in_tx.attr,
4575 &format_attr_in_tx_cp.attr,
4576 &format_attr_offcore_rsp.attr,
4577 &format_attr_ldlat.attr,
4581 static struct attribute *nhm_format_attr[] = {
4582 &format_attr_offcore_rsp.attr,
4583 &format_attr_ldlat.attr,
4587 static struct attribute *slm_format_attr[] = {
4588 &format_attr_offcore_rsp.attr,
4592 static struct attribute *skl_format_attr[] = {
4593 &format_attr_frontend.attr,
4597 static __initconst const struct x86_pmu core_pmu = {
4599 .handle_irq = x86_pmu_handle_irq,
4600 .disable_all = x86_pmu_disable_all,
4601 .enable_all = core_pmu_enable_all,
4602 .enable = core_pmu_enable_event,
4603 .disable = x86_pmu_disable_event,
4604 .hw_config = core_pmu_hw_config,
4605 .schedule_events = x86_schedule_events,
4606 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
4607 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
4608 .event_map = intel_pmu_event_map,
4609 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
4611 .large_pebs_flags = LARGE_PEBS_FLAGS,
4614 * Intel PMCs cannot be accessed sanely above 32-bit width,
4615 * so we install an artificial 1<<31 period regardless of
4616 * the generic event period:
4618 .max_period = (1ULL<<31) - 1,
4619 .get_event_constraints = intel_get_event_constraints,
4620 .put_event_constraints = intel_put_event_constraints,
4621 .event_constraints = intel_core_event_constraints,
4622 .guest_get_msrs = core_guest_get_msrs,
4623 .format_attrs = intel_arch_formats_attr,
4624 .events_sysfs_show = intel_event_sysfs_show,
4627 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
4628 * together with PMU version 1 and thus be using core_pmu with
4629 * shared_regs. We need following callbacks here to allocate
4632 .cpu_prepare = intel_pmu_cpu_prepare,
4633 .cpu_starting = intel_pmu_cpu_starting,
4634 .cpu_dying = intel_pmu_cpu_dying,
4635 .cpu_dead = intel_pmu_cpu_dead,
4637 .check_period = intel_pmu_check_period,
4639 .lbr_reset = intel_pmu_lbr_reset_64,
4640 .lbr_read = intel_pmu_lbr_read_64,
4641 .lbr_save = intel_pmu_lbr_save,
4642 .lbr_restore = intel_pmu_lbr_restore,
4645 static __initconst const struct x86_pmu intel_pmu = {
4647 .handle_irq = intel_pmu_handle_irq,
4648 .disable_all = intel_pmu_disable_all,
4649 .enable_all = intel_pmu_enable_all,
4650 .enable = intel_pmu_enable_event,
4651 .disable = intel_pmu_disable_event,
4652 .add = intel_pmu_add_event,
4653 .del = intel_pmu_del_event,
4654 .read = intel_pmu_read_event,
4655 .hw_config = intel_pmu_hw_config,
4656 .schedule_events = x86_schedule_events,
4657 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
4658 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
4659 .event_map = intel_pmu_event_map,
4660 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
4662 .large_pebs_flags = LARGE_PEBS_FLAGS,
4664 * Intel PMCs cannot be accessed sanely above 32 bit width,
4665 * so we install an artificial 1<<31 period regardless of
4666 * the generic event period:
4668 .max_period = (1ULL << 31) - 1,
4669 .get_event_constraints = intel_get_event_constraints,
4670 .put_event_constraints = intel_put_event_constraints,
4671 .pebs_aliases = intel_pebs_aliases_core2,
4673 .format_attrs = intel_arch3_formats_attr,
4674 .events_sysfs_show = intel_event_sysfs_show,
4676 .cpu_prepare = intel_pmu_cpu_prepare,
4677 .cpu_starting = intel_pmu_cpu_starting,
4678 .cpu_dying = intel_pmu_cpu_dying,
4679 .cpu_dead = intel_pmu_cpu_dead,
4681 .guest_get_msrs = intel_guest_get_msrs,
4682 .sched_task = intel_pmu_sched_task,
4683 .swap_task_ctx = intel_pmu_swap_task_ctx,
4685 .check_period = intel_pmu_check_period,
4687 .aux_output_match = intel_pmu_aux_output_match,
4689 .lbr_reset = intel_pmu_lbr_reset_64,
4690 .lbr_read = intel_pmu_lbr_read_64,
4691 .lbr_save = intel_pmu_lbr_save,
4692 .lbr_restore = intel_pmu_lbr_restore,
4695 static __init void intel_clovertown_quirk(void)
4698 * PEBS is unreliable due to:
4700 * AJ67 - PEBS may experience CPL leaks
4701 * AJ68 - PEBS PMI may be delayed by one event
4702 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
4703 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
4705 * AJ67 could be worked around by restricting the OS/USR flags.
4706 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
4708 * AJ106 could possibly be worked around by not allowing LBR
4709 * usage from PEBS, including the fixup.
4710 * AJ68 could possibly be worked around by always programming
4711 * a pebs_event_reset[0] value and coping with the lost events.
4713 * But taken together it might just make sense to not enable PEBS on
4716 pr_warn("PEBS disabled due to CPU errata\n");
4718 x86_pmu.pebs_constraints = NULL;
4721 static const struct x86_cpu_desc isolation_ucodes[] = {
4722 INTEL_CPU_DESC(INTEL_FAM6_HASWELL, 3, 0x0000001f),
4723 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_L, 1, 0x0000001e),
4724 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_G, 1, 0x00000015),
4725 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037),
4726 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a),
4727 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL, 4, 0x00000023),
4728 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_G, 1, 0x00000014),
4729 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 2, 0x00000010),
4730 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009),
4731 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009),
4732 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002),
4733 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 1, 0x0b000014),
4734 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021),
4735 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000),
4736 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000),
4737 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 6, 0x00000000),
4738 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 7, 0x00000000),
4739 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c),
4740 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c),
4741 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e),
4742 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 9, 0x0000004e),
4743 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 10, 0x0000004e),
4744 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 11, 0x0000004e),
4745 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 12, 0x0000004e),
4746 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 10, 0x0000004e),
4747 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 11, 0x0000004e),
4748 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 12, 0x0000004e),
4749 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 13, 0x0000004e),
4753 static void intel_check_pebs_isolation(void)
4755 x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes);
4758 static __init void intel_pebs_isolation_quirk(void)
4760 WARN_ON_ONCE(x86_pmu.check_microcode);
4761 x86_pmu.check_microcode = intel_check_pebs_isolation;
4762 intel_check_pebs_isolation();
4765 static const struct x86_cpu_desc pebs_ucodes[] = {
4766 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE, 7, 0x00000028),
4767 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 6, 0x00000618),
4768 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 7, 0x0000070c),
4772 static bool intel_snb_pebs_broken(void)
4774 return !x86_cpu_has_min_microcode_rev(pebs_ucodes);
4777 static void intel_snb_check_microcode(void)
4779 if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
4783 * Serialized by the microcode lock..
4785 if (x86_pmu.pebs_broken) {
4786 pr_info("PEBS enabled due to microcode update\n");
4787 x86_pmu.pebs_broken = 0;
4789 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
4790 x86_pmu.pebs_broken = 1;
4794 static bool is_lbr_from(unsigned long msr)
4796 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
4798 return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
4802 * Under certain circumstances, access certain MSR may cause #GP.
4803 * The function tests if the input MSR can be safely accessed.
4805 static bool check_msr(unsigned long msr, u64 mask)
4807 u64 val_old, val_new, val_tmp;
4810 * Disable the check for real HW, so we don't
4811 * mess with potentially enabled registers:
4813 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
4817 * Read the current value, change it and read it back to see if it
4818 * matches, this is needed to detect certain hardware emulators
4819 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
4821 if (rdmsrl_safe(msr, &val_old))
4825 * Only change the bits which can be updated by wrmsrl.
4827 val_tmp = val_old ^ mask;
4829 if (is_lbr_from(msr))
4830 val_tmp = lbr_from_signext_quirk_wr(val_tmp);
4832 if (wrmsrl_safe(msr, val_tmp) ||
4833 rdmsrl_safe(msr, &val_new))
4837 * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
4838 * should equal rdmsrl()'s even with the quirk.
4840 if (val_new != val_tmp)
4843 if (is_lbr_from(msr))
4844 val_old = lbr_from_signext_quirk_wr(val_old);
4846 /* Here it's sure that the MSR can be safely accessed.
4847 * Restore the old value and return.
4849 wrmsrl(msr, val_old);
4854 static __init void intel_sandybridge_quirk(void)
4856 x86_pmu.check_microcode = intel_snb_check_microcode;
4858 intel_snb_check_microcode();
4862 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
4863 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
4864 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
4865 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
4866 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
4867 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
4868 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
4869 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
4872 static __init void intel_arch_events_quirk(void)
4876 /* disable event that reported as not present by cpuid */
4877 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
4878 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
4879 pr_warn("CPUID marked event: \'%s\' unavailable\n",
4880 intel_arch_events_map[bit].name);
4884 static __init void intel_nehalem_quirk(void)
4886 union cpuid10_ebx ebx;
4888 ebx.full = x86_pmu.events_maskl;
4889 if (ebx.split.no_branch_misses_retired) {
4891 * Erratum AAJ80 detected, we work it around by using
4892 * the BR_MISP_EXEC.ANY event. This will over-count
4893 * branch-misses, but it's still much better than the
4894 * architectural event which is often completely bogus:
4896 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
4897 ebx.split.no_branch_misses_retired = 0;
4898 x86_pmu.events_maskl = ebx.full;
4899 pr_info("CPU erratum AAJ80 worked around\n");
4904 * enable software workaround for errata:
4909 * Only needed when HT is enabled. However detecting
4910 * if HT is enabled is difficult (model specific). So instead,
4911 * we enable the workaround in the early boot, and verify if
4912 * it is needed in a later initcall phase once we have valid
4913 * topology information to check if HT is actually enabled
4915 static __init void intel_ht_bug(void)
4917 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
4919 x86_pmu.start_scheduling = intel_start_scheduling;
4920 x86_pmu.commit_scheduling = intel_commit_scheduling;
4921 x86_pmu.stop_scheduling = intel_stop_scheduling;
4924 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
4925 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
4927 /* Haswell special events */
4928 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
4929 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
4930 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
4931 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
4932 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
4933 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
4934 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
4935 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
4936 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
4937 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
4938 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
4939 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
4941 static struct attribute *hsw_events_attrs[] = {
4942 EVENT_PTR(td_slots_issued),
4943 EVENT_PTR(td_slots_retired),
4944 EVENT_PTR(td_fetch_bubbles),
4945 EVENT_PTR(td_total_slots),
4946 EVENT_PTR(td_total_slots_scale),
4947 EVENT_PTR(td_recovery_bubbles),
4948 EVENT_PTR(td_recovery_bubbles_scale),
4952 static struct attribute *hsw_mem_events_attrs[] = {
4953 EVENT_PTR(mem_ld_hsw),
4954 EVENT_PTR(mem_st_hsw),
4958 static struct attribute *hsw_tsx_events_attrs[] = {
4959 EVENT_PTR(tx_start),
4960 EVENT_PTR(tx_commit),
4961 EVENT_PTR(tx_abort),
4962 EVENT_PTR(tx_capacity),
4963 EVENT_PTR(tx_conflict),
4964 EVENT_PTR(el_start),
4965 EVENT_PTR(el_commit),
4966 EVENT_PTR(el_abort),
4967 EVENT_PTR(el_capacity),
4968 EVENT_PTR(el_conflict),
4969 EVENT_PTR(cycles_t),
4970 EVENT_PTR(cycles_ct),
4974 EVENT_ATTR_STR(tx-capacity-read, tx_capacity_read, "event=0x54,umask=0x80");
4975 EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2");
4976 EVENT_ATTR_STR(el-capacity-read, el_capacity_read, "event=0x54,umask=0x80");
4977 EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2");
4979 static struct attribute *icl_events_attrs[] = {
4980 EVENT_PTR(mem_ld_hsw),
4981 EVENT_PTR(mem_st_hsw),
4985 static struct attribute *icl_td_events_attrs[] = {
4987 EVENT_PTR(td_retiring),
4988 EVENT_PTR(td_bad_spec),
4989 EVENT_PTR(td_fe_bound),
4990 EVENT_PTR(td_be_bound),
4994 static struct attribute *icl_tsx_events_attrs[] = {
4995 EVENT_PTR(tx_start),
4996 EVENT_PTR(tx_abort),
4997 EVENT_PTR(tx_commit),
4998 EVENT_PTR(tx_capacity_read),
4999 EVENT_PTR(tx_capacity_write),
5000 EVENT_PTR(tx_conflict),
5001 EVENT_PTR(el_start),
5002 EVENT_PTR(el_abort),
5003 EVENT_PTR(el_commit),
5004 EVENT_PTR(el_capacity_read),
5005 EVENT_PTR(el_capacity_write),
5006 EVENT_PTR(el_conflict),
5007 EVENT_PTR(cycles_t),
5008 EVENT_PTR(cycles_ct),
5013 EVENT_ATTR_STR(mem-stores, mem_st_spr, "event=0xcd,umask=0x2");
5014 EVENT_ATTR_STR(mem-loads-aux, mem_ld_aux, "event=0x03,umask=0x82");
5016 static struct attribute *spr_events_attrs[] = {
5017 EVENT_PTR(mem_ld_hsw),
5018 EVENT_PTR(mem_st_spr),
5019 EVENT_PTR(mem_ld_aux),
5023 static struct attribute *spr_td_events_attrs[] = {
5025 EVENT_PTR(td_retiring),
5026 EVENT_PTR(td_bad_spec),
5027 EVENT_PTR(td_fe_bound),
5028 EVENT_PTR(td_be_bound),
5029 EVENT_PTR(td_heavy_ops),
5030 EVENT_PTR(td_br_mispredict),
5031 EVENT_PTR(td_fetch_lat),
5032 EVENT_PTR(td_mem_bound),
5036 static struct attribute *spr_tsx_events_attrs[] = {
5037 EVENT_PTR(tx_start),
5038 EVENT_PTR(tx_abort),
5039 EVENT_PTR(tx_commit),
5040 EVENT_PTR(tx_capacity_read),
5041 EVENT_PTR(tx_capacity_write),
5042 EVENT_PTR(tx_conflict),
5043 EVENT_PTR(cycles_t),
5044 EVENT_PTR(cycles_ct),
5048 static ssize_t freeze_on_smi_show(struct device *cdev,
5049 struct device_attribute *attr,
5052 return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
5055 static DEFINE_MUTEX(freeze_on_smi_mutex);
5057 static ssize_t freeze_on_smi_store(struct device *cdev,
5058 struct device_attribute *attr,
5059 const char *buf, size_t count)
5064 ret = kstrtoul(buf, 0, &val);
5071 mutex_lock(&freeze_on_smi_mutex);
5073 if (x86_pmu.attr_freeze_on_smi == val)
5076 x86_pmu.attr_freeze_on_smi = val;
5079 on_each_cpu(flip_smm_bit, &val, 1);
5082 mutex_unlock(&freeze_on_smi_mutex);
5087 static void update_tfa_sched(void *ignored)
5089 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
5092 * check if PMC3 is used
5093 * and if so force schedule out for all event types all contexts
5095 if (test_bit(3, cpuc->active_mask))
5096 perf_pmu_resched(x86_get_pmu(smp_processor_id()));
5099 static ssize_t show_sysctl_tfa(struct device *cdev,
5100 struct device_attribute *attr,
5103 return snprintf(buf, 40, "%d\n", allow_tsx_force_abort);
5106 static ssize_t set_sysctl_tfa(struct device *cdev,
5107 struct device_attribute *attr,
5108 const char *buf, size_t count)
5113 ret = kstrtobool(buf, &val);
5118 if (val == allow_tsx_force_abort)
5121 allow_tsx_force_abort = val;
5124 on_each_cpu(update_tfa_sched, NULL, 1);
5131 static DEVICE_ATTR_RW(freeze_on_smi);
5133 static ssize_t branches_show(struct device *cdev,
5134 struct device_attribute *attr,
5137 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
5140 static DEVICE_ATTR_RO(branches);
5142 static struct attribute *lbr_attrs[] = {
5143 &dev_attr_branches.attr,
5147 static char pmu_name_str[30];
5149 static ssize_t pmu_name_show(struct device *cdev,
5150 struct device_attribute *attr,
5153 return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str);
5156 static DEVICE_ATTR_RO(pmu_name);
5158 static struct attribute *intel_pmu_caps_attrs[] = {
5159 &dev_attr_pmu_name.attr,
5163 static DEVICE_ATTR(allow_tsx_force_abort, 0644,
5167 static struct attribute *intel_pmu_attrs[] = {
5168 &dev_attr_freeze_on_smi.attr,
5169 &dev_attr_allow_tsx_force_abort.attr,
5174 tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5176 return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0;
5180 pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5182 return x86_pmu.pebs ? attr->mode : 0;
5186 lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5188 return x86_pmu.lbr_nr ? attr->mode : 0;
5192 exra_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5194 return x86_pmu.version >= 2 ? attr->mode : 0;
5198 default_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5200 if (attr == &dev_attr_allow_tsx_force_abort.attr)
5201 return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0;
5206 static struct attribute_group group_events_td = {
5210 static struct attribute_group group_events_mem = {
5212 .is_visible = pebs_is_visible,
5215 static struct attribute_group group_events_tsx = {
5217 .is_visible = tsx_is_visible,
5220 static struct attribute_group group_caps_gen = {
5222 .attrs = intel_pmu_caps_attrs,
5225 static struct attribute_group group_caps_lbr = {
5228 .is_visible = lbr_is_visible,
5231 static struct attribute_group group_format_extra = {
5233 .is_visible = exra_is_visible,
5236 static struct attribute_group group_format_extra_skl = {
5238 .is_visible = exra_is_visible,
5241 static struct attribute_group group_default = {
5242 .attrs = intel_pmu_attrs,
5243 .is_visible = default_is_visible,
5246 static const struct attribute_group *attr_update[] = {
5252 &group_format_extra,
5253 &group_format_extra_skl,
5258 EVENT_ATTR_STR_HYBRID(slots, slots_adl, "event=0x00,umask=0x4", hybrid_big);
5259 EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_adl, "event=0xc2,umask=0x0;event=0x00,umask=0x80", hybrid_big_small);
5260 EVENT_ATTR_STR_HYBRID(topdown-bad-spec, td_bad_spec_adl, "event=0x73,umask=0x0;event=0x00,umask=0x81", hybrid_big_small);
5261 EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_adl, "event=0x71,umask=0x0;event=0x00,umask=0x82", hybrid_big_small);
5262 EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_adl, "event=0x74,umask=0x0;event=0x00,umask=0x83", hybrid_big_small);
5263 EVENT_ATTR_STR_HYBRID(topdown-heavy-ops, td_heavy_ops_adl, "event=0x00,umask=0x84", hybrid_big);
5264 EVENT_ATTR_STR_HYBRID(topdown-br-mispredict, td_br_mis_adl, "event=0x00,umask=0x85", hybrid_big);
5265 EVENT_ATTR_STR_HYBRID(topdown-fetch-lat, td_fetch_lat_adl, "event=0x00,umask=0x86", hybrid_big);
5266 EVENT_ATTR_STR_HYBRID(topdown-mem-bound, td_mem_bound_adl, "event=0x00,umask=0x87", hybrid_big);
5268 static struct attribute *adl_hybrid_events_attrs[] = {
5269 EVENT_PTR(slots_adl),
5270 EVENT_PTR(td_retiring_adl),
5271 EVENT_PTR(td_bad_spec_adl),
5272 EVENT_PTR(td_fe_bound_adl),
5273 EVENT_PTR(td_be_bound_adl),
5274 EVENT_PTR(td_heavy_ops_adl),
5275 EVENT_PTR(td_br_mis_adl),
5276 EVENT_PTR(td_fetch_lat_adl),
5277 EVENT_PTR(td_mem_bound_adl),
5281 /* Must be in IDX order */
5282 EVENT_ATTR_STR_HYBRID(mem-loads, mem_ld_adl, "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small);
5283 EVENT_ATTR_STR_HYBRID(mem-stores, mem_st_adl, "event=0xd0,umask=0x6;event=0xcd,umask=0x2", hybrid_big_small);
5284 EVENT_ATTR_STR_HYBRID(mem-loads-aux, mem_ld_aux_adl, "event=0x03,umask=0x82", hybrid_big);
5286 static struct attribute *adl_hybrid_mem_attrs[] = {
5287 EVENT_PTR(mem_ld_adl),
5288 EVENT_PTR(mem_st_adl),
5289 EVENT_PTR(mem_ld_aux_adl),
5293 EVENT_ATTR_STR_HYBRID(tx-start, tx_start_adl, "event=0xc9,umask=0x1", hybrid_big);
5294 EVENT_ATTR_STR_HYBRID(tx-commit, tx_commit_adl, "event=0xc9,umask=0x2", hybrid_big);
5295 EVENT_ATTR_STR_HYBRID(tx-abort, tx_abort_adl, "event=0xc9,umask=0x4", hybrid_big);
5296 EVENT_ATTR_STR_HYBRID(tx-conflict, tx_conflict_adl, "event=0x54,umask=0x1", hybrid_big);
5297 EVENT_ATTR_STR_HYBRID(cycles-t, cycles_t_adl, "event=0x3c,in_tx=1", hybrid_big);
5298 EVENT_ATTR_STR_HYBRID(cycles-ct, cycles_ct_adl, "event=0x3c,in_tx=1,in_tx_cp=1", hybrid_big);
5299 EVENT_ATTR_STR_HYBRID(tx-capacity-read, tx_capacity_read_adl, "event=0x54,umask=0x80", hybrid_big);
5300 EVENT_ATTR_STR_HYBRID(tx-capacity-write, tx_capacity_write_adl, "event=0x54,umask=0x2", hybrid_big);
5302 static struct attribute *adl_hybrid_tsx_attrs[] = {
5303 EVENT_PTR(tx_start_adl),
5304 EVENT_PTR(tx_abort_adl),
5305 EVENT_PTR(tx_commit_adl),
5306 EVENT_PTR(tx_capacity_read_adl),
5307 EVENT_PTR(tx_capacity_write_adl),
5308 EVENT_PTR(tx_conflict_adl),
5309 EVENT_PTR(cycles_t_adl),
5310 EVENT_PTR(cycles_ct_adl),
5314 FORMAT_ATTR_HYBRID(in_tx, hybrid_big);
5315 FORMAT_ATTR_HYBRID(in_tx_cp, hybrid_big);
5316 FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small);
5317 FORMAT_ATTR_HYBRID(ldlat, hybrid_big_small);
5318 FORMAT_ATTR_HYBRID(frontend, hybrid_big);
5320 static struct attribute *adl_hybrid_extra_attr_rtm[] = {
5321 FORMAT_HYBRID_PTR(in_tx),
5322 FORMAT_HYBRID_PTR(in_tx_cp),
5323 FORMAT_HYBRID_PTR(offcore_rsp),
5324 FORMAT_HYBRID_PTR(ldlat),
5325 FORMAT_HYBRID_PTR(frontend),
5329 static struct attribute *adl_hybrid_extra_attr[] = {
5330 FORMAT_HYBRID_PTR(offcore_rsp),
5331 FORMAT_HYBRID_PTR(ldlat),
5332 FORMAT_HYBRID_PTR(frontend),
5336 static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr)
5338 struct device *dev = kobj_to_dev(kobj);
5339 struct x86_hybrid_pmu *pmu =
5340 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5341 struct perf_pmu_events_hybrid_attr *pmu_attr =
5342 container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr);
5344 return pmu->cpu_type & pmu_attr->pmu_type;
5347 static umode_t hybrid_events_is_visible(struct kobject *kobj,
5348 struct attribute *attr, int i)
5350 return is_attr_for_this_pmu(kobj, attr) ? attr->mode : 0;
5353 static inline int hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu)
5355 int cpu = cpumask_first(&pmu->supported_cpus);
5357 return (cpu >= nr_cpu_ids) ? -1 : cpu;
5360 static umode_t hybrid_tsx_is_visible(struct kobject *kobj,
5361 struct attribute *attr, int i)
5363 struct device *dev = kobj_to_dev(kobj);
5364 struct x86_hybrid_pmu *pmu =
5365 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5366 int cpu = hybrid_find_supported_cpu(pmu);
5368 return (cpu >= 0) && is_attr_for_this_pmu(kobj, attr) && cpu_has(&cpu_data(cpu), X86_FEATURE_RTM) ? attr->mode : 0;
5371 static umode_t hybrid_format_is_visible(struct kobject *kobj,
5372 struct attribute *attr, int i)
5374 struct device *dev = kobj_to_dev(kobj);
5375 struct x86_hybrid_pmu *pmu =
5376 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5377 struct perf_pmu_format_hybrid_attr *pmu_attr =
5378 container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr);
5379 int cpu = hybrid_find_supported_cpu(pmu);
5381 return (cpu >= 0) && (pmu->cpu_type & pmu_attr->pmu_type) ? attr->mode : 0;
5384 static struct attribute_group hybrid_group_events_td = {
5386 .is_visible = hybrid_events_is_visible,
5389 static struct attribute_group hybrid_group_events_mem = {
5391 .is_visible = hybrid_events_is_visible,
5394 static struct attribute_group hybrid_group_events_tsx = {
5396 .is_visible = hybrid_tsx_is_visible,
5399 static struct attribute_group hybrid_group_format_extra = {
5401 .is_visible = hybrid_format_is_visible,
5404 static ssize_t intel_hybrid_get_attr_cpus(struct device *dev,
5405 struct device_attribute *attr,
5408 struct x86_hybrid_pmu *pmu =
5409 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5411 return cpumap_print_to_pagebuf(true, buf, &pmu->supported_cpus);
5414 static DEVICE_ATTR(cpus, S_IRUGO, intel_hybrid_get_attr_cpus, NULL);
5415 static struct attribute *intel_hybrid_cpus_attrs[] = {
5416 &dev_attr_cpus.attr,
5420 static struct attribute_group hybrid_group_cpus = {
5421 .attrs = intel_hybrid_cpus_attrs,
5424 static const struct attribute_group *hybrid_attr_update[] = {
5425 &hybrid_group_events_td,
5426 &hybrid_group_events_mem,
5427 &hybrid_group_events_tsx,
5430 &hybrid_group_format_extra,
5436 static struct attribute *empty_attrs;
5438 static void intel_pmu_check_num_counters(int *num_counters,
5439 int *num_counters_fixed,
5440 u64 *intel_ctrl, u64 fixed_mask)
5442 if (*num_counters > INTEL_PMC_MAX_GENERIC) {
5443 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
5444 *num_counters, INTEL_PMC_MAX_GENERIC);
5445 *num_counters = INTEL_PMC_MAX_GENERIC;
5447 *intel_ctrl = (1ULL << *num_counters) - 1;
5449 if (*num_counters_fixed > INTEL_PMC_MAX_FIXED) {
5450 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
5451 *num_counters_fixed, INTEL_PMC_MAX_FIXED);
5452 *num_counters_fixed = INTEL_PMC_MAX_FIXED;
5455 *intel_ctrl |= fixed_mask << INTEL_PMC_IDX_FIXED;
5458 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
5460 int num_counters_fixed,
5463 struct event_constraint *c;
5465 if (!event_constraints)
5469 * event on fixed counter2 (REF_CYCLES) only works on this
5470 * counter, so do not extend mask to generic counters
5472 for_each_event_constraint(c, event_constraints) {
5474 * Don't extend the topdown slots and metrics
5475 * events to the generic counters.
5477 if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
5479 * Disable topdown slots and metrics events,
5480 * if slots event is not in CPUID.
5482 if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl))
5484 c->weight = hweight64(c->idxmsk64);
5488 if (c->cmask == FIXED_EVENT_FLAGS) {
5489 /* Disabled fixed counters which are not in CPUID */
5490 c->idxmsk64 &= intel_ctrl;
5492 if (c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES)
5493 c->idxmsk64 |= (1ULL << num_counters) - 1;
5496 ~(~0ULL << (INTEL_PMC_IDX_FIXED + num_counters_fixed));
5497 c->weight = hweight64(c->idxmsk64);
5501 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs)
5503 struct extra_reg *er;
5506 * Access extra MSR may cause #GP under certain circumstances.
5507 * E.g. KVM doesn't support offcore event
5508 * Check all extra_regs here.
5513 for (er = extra_regs; er->msr; er++) {
5514 er->extra_msr_access = check_msr(er->msr, 0x11UL);
5515 /* Disable LBR select mapping */
5516 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
5517 x86_pmu.lbr_sel_map = NULL;
5521 static void intel_pmu_check_hybrid_pmus(u64 fixed_mask)
5523 struct x86_hybrid_pmu *pmu;
5526 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
5527 pmu = &x86_pmu.hybrid_pmu[i];
5529 intel_pmu_check_num_counters(&pmu->num_counters,
5530 &pmu->num_counters_fixed,
5534 if (pmu->intel_cap.perf_metrics) {
5535 pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
5536 pmu->intel_ctrl |= INTEL_PMC_MSK_FIXED_SLOTS;
5539 if (pmu->intel_cap.pebs_output_pt_available)
5540 pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
5542 intel_pmu_check_event_constraints(pmu->event_constraints,
5544 pmu->num_counters_fixed,
5547 intel_pmu_check_extra_regs(pmu->extra_regs);
5551 __init int intel_pmu_init(void)
5553 struct attribute **extra_skl_attr = &empty_attrs;
5554 struct attribute **extra_attr = &empty_attrs;
5555 struct attribute **td_attr = &empty_attrs;
5556 struct attribute **mem_attr = &empty_attrs;
5557 struct attribute **tsx_attr = &empty_attrs;
5558 union cpuid10_edx edx;
5559 union cpuid10_eax eax;
5560 union cpuid10_ebx ebx;
5561 unsigned int fixed_mask;
5565 struct x86_hybrid_pmu *pmu;
5567 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
5568 switch (boot_cpu_data.x86) {
5570 return p6_pmu_init();
5572 return knc_pmu_init();
5574 return p4_pmu_init();
5580 * Check whether the Architectural PerfMon supports
5581 * Branch Misses Retired hw_event or not.
5583 cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full);
5584 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
5587 version = eax.split.version_id;
5591 x86_pmu = intel_pmu;
5593 x86_pmu.version = version;
5594 x86_pmu.num_counters = eax.split.num_counters;
5595 x86_pmu.cntval_bits = eax.split.bit_width;
5596 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
5598 x86_pmu.events_maskl = ebx.full;
5599 x86_pmu.events_mask_len = eax.split.mask_length;
5601 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
5604 * Quirk: v2 perfmon does not report fixed-purpose events, so
5605 * assume at least 3 events, when not running in a hypervisor:
5607 if (version > 1 && version < 5) {
5608 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
5610 x86_pmu.num_counters_fixed =
5611 max((int)edx.split.num_counters_fixed, assume);
5613 fixed_mask = (1L << x86_pmu.num_counters_fixed) - 1;
5614 } else if (version >= 5)
5615 x86_pmu.num_counters_fixed = fls(fixed_mask);
5617 if (boot_cpu_has(X86_FEATURE_PDCM)) {
5620 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
5621 x86_pmu.intel_cap.capabilities = capabilities;
5624 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) {
5625 x86_pmu.lbr_reset = intel_pmu_lbr_reset_32;
5626 x86_pmu.lbr_read = intel_pmu_lbr_read_32;
5629 if (boot_cpu_has(X86_FEATURE_ARCH_LBR))
5630 intel_pmu_arch_lbr_init();
5634 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
5637 x86_pmu.intel_cap.anythread_deprecated = edx.split.anythread_deprecated;
5638 if (x86_pmu.intel_cap.anythread_deprecated)
5639 pr_cont(" AnyThread deprecated, ");
5643 * Install the hw-cache-events table:
5645 switch (boot_cpu_data.x86_model) {
5646 case INTEL_FAM6_CORE_YONAH:
5647 pr_cont("Core events, ");
5651 case INTEL_FAM6_CORE2_MEROM:
5652 x86_add_quirk(intel_clovertown_quirk);
5655 case INTEL_FAM6_CORE2_MEROM_L:
5656 case INTEL_FAM6_CORE2_PENRYN:
5657 case INTEL_FAM6_CORE2_DUNNINGTON:
5658 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
5659 sizeof(hw_cache_event_ids));
5661 intel_pmu_lbr_init_core();
5663 x86_pmu.event_constraints = intel_core2_event_constraints;
5664 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
5665 pr_cont("Core2 events, ");
5669 case INTEL_FAM6_NEHALEM:
5670 case INTEL_FAM6_NEHALEM_EP:
5671 case INTEL_FAM6_NEHALEM_EX:
5672 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
5673 sizeof(hw_cache_event_ids));
5674 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
5675 sizeof(hw_cache_extra_regs));
5677 intel_pmu_lbr_init_nhm();
5679 x86_pmu.event_constraints = intel_nehalem_event_constraints;
5680 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
5681 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
5682 x86_pmu.extra_regs = intel_nehalem_extra_regs;
5683 x86_pmu.limit_period = nhm_limit_period;
5685 mem_attr = nhm_mem_events_attrs;
5687 /* UOPS_ISSUED.STALLED_CYCLES */
5688 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
5689 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
5690 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
5691 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
5692 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
5694 intel_pmu_pebs_data_source_nhm();
5695 x86_add_quirk(intel_nehalem_quirk);
5696 x86_pmu.pebs_no_tlb = 1;
5697 extra_attr = nhm_format_attr;
5699 pr_cont("Nehalem events, ");
5703 case INTEL_FAM6_ATOM_BONNELL:
5704 case INTEL_FAM6_ATOM_BONNELL_MID:
5705 case INTEL_FAM6_ATOM_SALTWELL:
5706 case INTEL_FAM6_ATOM_SALTWELL_MID:
5707 case INTEL_FAM6_ATOM_SALTWELL_TABLET:
5708 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
5709 sizeof(hw_cache_event_ids));
5711 intel_pmu_lbr_init_atom();
5713 x86_pmu.event_constraints = intel_gen_event_constraints;
5714 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
5715 x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
5716 pr_cont("Atom events, ");
5720 case INTEL_FAM6_ATOM_SILVERMONT:
5721 case INTEL_FAM6_ATOM_SILVERMONT_D:
5722 case INTEL_FAM6_ATOM_SILVERMONT_MID:
5723 case INTEL_FAM6_ATOM_AIRMONT:
5724 case INTEL_FAM6_ATOM_AIRMONT_MID:
5725 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
5726 sizeof(hw_cache_event_ids));
5727 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
5728 sizeof(hw_cache_extra_regs));
5730 intel_pmu_lbr_init_slm();
5732 x86_pmu.event_constraints = intel_slm_event_constraints;
5733 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
5734 x86_pmu.extra_regs = intel_slm_extra_regs;
5735 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5736 td_attr = slm_events_attrs;
5737 extra_attr = slm_format_attr;
5738 pr_cont("Silvermont events, ");
5739 name = "silvermont";
5742 case INTEL_FAM6_ATOM_GOLDMONT:
5743 case INTEL_FAM6_ATOM_GOLDMONT_D:
5744 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
5745 sizeof(hw_cache_event_ids));
5746 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
5747 sizeof(hw_cache_extra_regs));
5749 intel_pmu_lbr_init_skl();
5751 x86_pmu.event_constraints = intel_slm_event_constraints;
5752 x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
5753 x86_pmu.extra_regs = intel_glm_extra_regs;
5755 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
5756 * for precise cycles.
5757 * :pp is identical to :ppp
5759 x86_pmu.pebs_aliases = NULL;
5760 x86_pmu.pebs_prec_dist = true;
5761 x86_pmu.lbr_pt_coexist = true;
5762 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5763 td_attr = glm_events_attrs;
5764 extra_attr = slm_format_attr;
5765 pr_cont("Goldmont events, ");
5769 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
5770 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
5771 sizeof(hw_cache_event_ids));
5772 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
5773 sizeof(hw_cache_extra_regs));
5775 intel_pmu_lbr_init_skl();
5777 x86_pmu.event_constraints = intel_slm_event_constraints;
5778 x86_pmu.extra_regs = intel_glm_extra_regs;
5780 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
5781 * for precise cycles.
5783 x86_pmu.pebs_aliases = NULL;
5784 x86_pmu.pebs_prec_dist = true;
5785 x86_pmu.lbr_pt_coexist = true;
5786 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5787 x86_pmu.flags |= PMU_FL_PEBS_ALL;
5788 x86_pmu.get_event_constraints = glp_get_event_constraints;
5789 td_attr = glm_events_attrs;
5790 /* Goldmont Plus has 4-wide pipeline */
5791 event_attr_td_total_slots_scale_glm.event_str = "4";
5792 extra_attr = slm_format_attr;
5793 pr_cont("Goldmont plus events, ");
5794 name = "goldmont_plus";
5797 case INTEL_FAM6_ATOM_TREMONT_D:
5798 case INTEL_FAM6_ATOM_TREMONT:
5799 case INTEL_FAM6_ATOM_TREMONT_L:
5800 x86_pmu.late_ack = true;
5801 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
5802 sizeof(hw_cache_event_ids));
5803 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
5804 sizeof(hw_cache_extra_regs));
5805 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
5807 intel_pmu_lbr_init_skl();
5809 x86_pmu.event_constraints = intel_slm_event_constraints;
5810 x86_pmu.extra_regs = intel_tnt_extra_regs;
5812 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
5813 * for precise cycles.
5815 x86_pmu.pebs_aliases = NULL;
5816 x86_pmu.pebs_prec_dist = true;
5817 x86_pmu.lbr_pt_coexist = true;
5818 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5819 x86_pmu.get_event_constraints = tnt_get_event_constraints;
5820 td_attr = tnt_events_attrs;
5821 extra_attr = slm_format_attr;
5822 pr_cont("Tremont events, ");
5826 case INTEL_FAM6_WESTMERE:
5827 case INTEL_FAM6_WESTMERE_EP:
5828 case INTEL_FAM6_WESTMERE_EX:
5829 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
5830 sizeof(hw_cache_event_ids));
5831 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
5832 sizeof(hw_cache_extra_regs));
5834 intel_pmu_lbr_init_nhm();
5836 x86_pmu.event_constraints = intel_westmere_event_constraints;
5837 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
5838 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
5839 x86_pmu.extra_regs = intel_westmere_extra_regs;
5840 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5842 mem_attr = nhm_mem_events_attrs;
5844 /* UOPS_ISSUED.STALLED_CYCLES */
5845 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
5846 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
5847 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
5848 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
5849 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
5851 intel_pmu_pebs_data_source_nhm();
5852 extra_attr = nhm_format_attr;
5853 pr_cont("Westmere events, ");
5857 case INTEL_FAM6_SANDYBRIDGE:
5858 case INTEL_FAM6_SANDYBRIDGE_X:
5859 x86_add_quirk(intel_sandybridge_quirk);
5860 x86_add_quirk(intel_ht_bug);
5861 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
5862 sizeof(hw_cache_event_ids));
5863 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
5864 sizeof(hw_cache_extra_regs));
5866 intel_pmu_lbr_init_snb();
5868 x86_pmu.event_constraints = intel_snb_event_constraints;
5869 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
5870 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
5871 if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
5872 x86_pmu.extra_regs = intel_snbep_extra_regs;
5874 x86_pmu.extra_regs = intel_snb_extra_regs;
5877 /* all extra regs are per-cpu when HT is on */
5878 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5879 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5881 td_attr = snb_events_attrs;
5882 mem_attr = snb_mem_events_attrs;
5884 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
5885 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
5886 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
5887 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
5888 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
5889 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
5891 extra_attr = nhm_format_attr;
5893 pr_cont("SandyBridge events, ");
5894 name = "sandybridge";
5897 case INTEL_FAM6_IVYBRIDGE:
5898 case INTEL_FAM6_IVYBRIDGE_X:
5899 x86_add_quirk(intel_ht_bug);
5900 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
5901 sizeof(hw_cache_event_ids));
5902 /* dTLB-load-misses on IVB is different than SNB */
5903 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
5905 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
5906 sizeof(hw_cache_extra_regs));
5908 intel_pmu_lbr_init_snb();
5910 x86_pmu.event_constraints = intel_ivb_event_constraints;
5911 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
5912 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
5913 x86_pmu.pebs_prec_dist = true;
5914 if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
5915 x86_pmu.extra_regs = intel_snbep_extra_regs;
5917 x86_pmu.extra_regs = intel_snb_extra_regs;
5918 /* all extra regs are per-cpu when HT is on */
5919 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5920 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5922 td_attr = snb_events_attrs;
5923 mem_attr = snb_mem_events_attrs;
5925 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
5926 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
5927 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
5929 extra_attr = nhm_format_attr;
5931 pr_cont("IvyBridge events, ");
5936 case INTEL_FAM6_HASWELL:
5937 case INTEL_FAM6_HASWELL_X:
5938 case INTEL_FAM6_HASWELL_L:
5939 case INTEL_FAM6_HASWELL_G:
5940 x86_add_quirk(intel_ht_bug);
5941 x86_add_quirk(intel_pebs_isolation_quirk);
5942 x86_pmu.late_ack = true;
5943 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
5944 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
5946 intel_pmu_lbr_init_hsw();
5948 x86_pmu.event_constraints = intel_hsw_event_constraints;
5949 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
5950 x86_pmu.extra_regs = intel_snbep_extra_regs;
5951 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
5952 x86_pmu.pebs_prec_dist = true;
5953 /* all extra regs are per-cpu when HT is on */
5954 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5955 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5957 x86_pmu.hw_config = hsw_hw_config;
5958 x86_pmu.get_event_constraints = hsw_get_event_constraints;
5959 x86_pmu.lbr_double_abort = true;
5960 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
5961 hsw_format_attr : nhm_format_attr;
5962 td_attr = hsw_events_attrs;
5963 mem_attr = hsw_mem_events_attrs;
5964 tsx_attr = hsw_tsx_events_attrs;
5965 pr_cont("Haswell events, ");
5969 case INTEL_FAM6_BROADWELL:
5970 case INTEL_FAM6_BROADWELL_D:
5971 case INTEL_FAM6_BROADWELL_G:
5972 case INTEL_FAM6_BROADWELL_X:
5973 x86_add_quirk(intel_pebs_isolation_quirk);
5974 x86_pmu.late_ack = true;
5975 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
5976 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
5978 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
5979 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
5980 BDW_L3_MISS|HSW_SNOOP_DRAM;
5981 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
5983 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
5984 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
5985 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
5986 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
5988 intel_pmu_lbr_init_hsw();
5990 x86_pmu.event_constraints = intel_bdw_event_constraints;
5991 x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
5992 x86_pmu.extra_regs = intel_snbep_extra_regs;
5993 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
5994 x86_pmu.pebs_prec_dist = true;
5995 /* all extra regs are per-cpu when HT is on */
5996 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5997 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5999 x86_pmu.hw_config = hsw_hw_config;
6000 x86_pmu.get_event_constraints = hsw_get_event_constraints;
6001 x86_pmu.limit_period = bdw_limit_period;
6002 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6003 hsw_format_attr : nhm_format_attr;
6004 td_attr = hsw_events_attrs;
6005 mem_attr = hsw_mem_events_attrs;
6006 tsx_attr = hsw_tsx_events_attrs;
6007 pr_cont("Broadwell events, ");
6011 case INTEL_FAM6_XEON_PHI_KNL:
6012 case INTEL_FAM6_XEON_PHI_KNM:
6013 memcpy(hw_cache_event_ids,
6014 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6015 memcpy(hw_cache_extra_regs,
6016 knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6017 intel_pmu_lbr_init_knl();
6019 x86_pmu.event_constraints = intel_slm_event_constraints;
6020 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
6021 x86_pmu.extra_regs = intel_knl_extra_regs;
6023 /* all extra regs are per-cpu when HT is on */
6024 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6025 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6026 extra_attr = slm_format_attr;
6027 pr_cont("Knights Landing/Mill events, ");
6028 name = "knights-landing";
6031 case INTEL_FAM6_SKYLAKE_X:
6034 case INTEL_FAM6_SKYLAKE_L:
6035 case INTEL_FAM6_SKYLAKE:
6036 case INTEL_FAM6_KABYLAKE_L:
6037 case INTEL_FAM6_KABYLAKE:
6038 case INTEL_FAM6_COMETLAKE_L:
6039 case INTEL_FAM6_COMETLAKE:
6040 x86_add_quirk(intel_pebs_isolation_quirk);
6041 x86_pmu.late_ack = true;
6042 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6043 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6044 intel_pmu_lbr_init_skl();
6046 /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
6047 event_attr_td_recovery_bubbles.event_str_noht =
6048 "event=0xd,umask=0x1,cmask=1";
6049 event_attr_td_recovery_bubbles.event_str_ht =
6050 "event=0xd,umask=0x1,cmask=1,any=1";
6052 x86_pmu.event_constraints = intel_skl_event_constraints;
6053 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
6054 x86_pmu.extra_regs = intel_skl_extra_regs;
6055 x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
6056 x86_pmu.pebs_prec_dist = true;
6057 /* all extra regs are per-cpu when HT is on */
6058 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6059 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6061 x86_pmu.hw_config = hsw_hw_config;
6062 x86_pmu.get_event_constraints = hsw_get_event_constraints;
6063 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6064 hsw_format_attr : nhm_format_attr;
6065 extra_skl_attr = skl_format_attr;
6066 td_attr = hsw_events_attrs;
6067 mem_attr = hsw_mem_events_attrs;
6068 tsx_attr = hsw_tsx_events_attrs;
6069 intel_pmu_pebs_data_source_skl(pmem);
6072 * Processors with CPUID.RTM_ALWAYS_ABORT have TSX deprecated by default.
6073 * TSX force abort hooks are not required on these systems. Only deploy
6074 * workaround when microcode has not enabled X86_FEATURE_RTM_ALWAYS_ABORT.
6076 if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) &&
6077 !boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
6078 x86_pmu.flags |= PMU_FL_TFA;
6079 x86_pmu.get_event_constraints = tfa_get_event_constraints;
6080 x86_pmu.enable_all = intel_tfa_pmu_enable_all;
6081 x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
6084 pr_cont("Skylake events, ");
6088 case INTEL_FAM6_ICELAKE_X:
6089 case INTEL_FAM6_ICELAKE_D:
6092 case INTEL_FAM6_ICELAKE_L:
6093 case INTEL_FAM6_ICELAKE:
6094 case INTEL_FAM6_TIGERLAKE_L:
6095 case INTEL_FAM6_TIGERLAKE:
6096 case INTEL_FAM6_ROCKETLAKE:
6097 x86_pmu.late_ack = true;
6098 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6099 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6100 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6101 intel_pmu_lbr_init_skl();
6103 x86_pmu.event_constraints = intel_icl_event_constraints;
6104 x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints;
6105 x86_pmu.extra_regs = intel_icl_extra_regs;
6106 x86_pmu.pebs_aliases = NULL;
6107 x86_pmu.pebs_prec_dist = true;
6108 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6109 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6111 x86_pmu.hw_config = hsw_hw_config;
6112 x86_pmu.get_event_constraints = icl_get_event_constraints;
6113 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6114 hsw_format_attr : nhm_format_attr;
6115 extra_skl_attr = skl_format_attr;
6116 mem_attr = icl_events_attrs;
6117 td_attr = icl_td_events_attrs;
6118 tsx_attr = icl_tsx_events_attrs;
6119 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6120 x86_pmu.lbr_pt_coexist = true;
6121 intel_pmu_pebs_data_source_skl(pmem);
6122 x86_pmu.num_topdown_events = 4;
6123 x86_pmu.update_topdown_event = icl_update_topdown_event;
6124 x86_pmu.set_topdown_event_period = icl_set_topdown_event_period;
6125 pr_cont("Icelake events, ");
6129 case INTEL_FAM6_SAPPHIRERAPIDS_X:
6131 x86_pmu.late_ack = true;
6132 memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6133 memcpy(hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6135 x86_pmu.event_constraints = intel_spr_event_constraints;
6136 x86_pmu.pebs_constraints = intel_spr_pebs_event_constraints;
6137 x86_pmu.extra_regs = intel_spr_extra_regs;
6138 x86_pmu.limit_period = spr_limit_period;
6139 x86_pmu.pebs_aliases = NULL;
6140 x86_pmu.pebs_prec_dist = true;
6141 x86_pmu.pebs_block = true;
6142 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6143 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6144 x86_pmu.flags |= PMU_FL_PEBS_ALL;
6145 x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6146 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
6148 x86_pmu.hw_config = hsw_hw_config;
6149 x86_pmu.get_event_constraints = spr_get_event_constraints;
6150 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6151 hsw_format_attr : nhm_format_attr;
6152 extra_skl_attr = skl_format_attr;
6153 mem_attr = spr_events_attrs;
6154 td_attr = spr_td_events_attrs;
6155 tsx_attr = spr_tsx_events_attrs;
6156 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6157 x86_pmu.lbr_pt_coexist = true;
6158 intel_pmu_pebs_data_source_skl(pmem);
6159 x86_pmu.num_topdown_events = 8;
6160 x86_pmu.update_topdown_event = icl_update_topdown_event;
6161 x86_pmu.set_topdown_event_period = icl_set_topdown_event_period;
6162 pr_cont("Sapphire Rapids events, ");
6163 name = "sapphire_rapids";
6166 case INTEL_FAM6_ALDERLAKE:
6167 case INTEL_FAM6_ALDERLAKE_L:
6169 * Alder Lake has 2 types of CPU, core and atom.
6171 * Initialize the common PerfMon capabilities here.
6173 x86_pmu.hybrid_pmu = kcalloc(X86_HYBRID_NUM_PMUS,
6174 sizeof(struct x86_hybrid_pmu),
6176 if (!x86_pmu.hybrid_pmu)
6178 static_branch_enable(&perf_is_hybrid);
6179 x86_pmu.num_hybrid_pmus = X86_HYBRID_NUM_PMUS;
6181 x86_pmu.pebs_aliases = NULL;
6182 x86_pmu.pebs_prec_dist = true;
6183 x86_pmu.pebs_block = true;
6184 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6185 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6186 x86_pmu.flags |= PMU_FL_PEBS_ALL;
6187 x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6188 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
6189 x86_pmu.lbr_pt_coexist = true;
6190 intel_pmu_pebs_data_source_skl(false);
6191 x86_pmu.num_topdown_events = 8;
6192 x86_pmu.update_topdown_event = adl_update_topdown_event;
6193 x86_pmu.set_topdown_event_period = adl_set_topdown_event_period;
6195 x86_pmu.filter_match = intel_pmu_filter_match;
6196 x86_pmu.get_event_constraints = adl_get_event_constraints;
6197 x86_pmu.hw_config = adl_hw_config;
6198 x86_pmu.limit_period = spr_limit_period;
6199 x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type;
6201 * The rtm_abort_event is used to check whether to enable GPRs
6202 * for the RTM abort event. Atom doesn't have the RTM abort
6203 * event. There is no harmful to set it in the common
6204 * x86_pmu.rtm_abort_event.
6206 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6208 td_attr = adl_hybrid_events_attrs;
6209 mem_attr = adl_hybrid_mem_attrs;
6210 tsx_attr = adl_hybrid_tsx_attrs;
6211 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6212 adl_hybrid_extra_attr_rtm : adl_hybrid_extra_attr;
6214 /* Initialize big core specific PerfMon capabilities.*/
6215 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
6216 pmu->name = "cpu_core";
6217 pmu->cpu_type = hybrid_big;
6218 pmu->late_ack = true;
6219 if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
6220 pmu->num_counters = x86_pmu.num_counters + 2;
6221 pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1;
6223 pmu->num_counters = x86_pmu.num_counters;
6224 pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
6226 pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
6227 pmu->unconstrained = (struct event_constraint)
6228 __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
6229 0, pmu->num_counters, 0, 0);
6230 pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
6231 pmu->intel_cap.perf_metrics = 1;
6232 pmu->intel_cap.pebs_output_pt_available = 0;
6234 memcpy(pmu->hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids));
6235 memcpy(pmu->hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs));
6236 pmu->event_constraints = intel_spr_event_constraints;
6237 pmu->pebs_constraints = intel_spr_pebs_event_constraints;
6238 pmu->extra_regs = intel_spr_extra_regs;
6240 /* Initialize Atom core specific PerfMon capabilities.*/
6241 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
6242 pmu->name = "cpu_atom";
6243 pmu->cpu_type = hybrid_small;
6244 pmu->mid_ack = true;
6245 pmu->num_counters = x86_pmu.num_counters;
6246 pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
6247 pmu->max_pebs_events = x86_pmu.max_pebs_events;
6248 pmu->unconstrained = (struct event_constraint)
6249 __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
6250 0, pmu->num_counters, 0, 0);
6251 pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
6252 pmu->intel_cap.perf_metrics = 0;
6253 pmu->intel_cap.pebs_output_pt_available = 1;
6255 memcpy(pmu->hw_cache_event_ids, glp_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids));
6256 memcpy(pmu->hw_cache_extra_regs, tnt_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs));
6257 pmu->hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6258 pmu->event_constraints = intel_slm_event_constraints;
6259 pmu->pebs_constraints = intel_grt_pebs_event_constraints;
6260 pmu->extra_regs = intel_grt_extra_regs;
6261 pr_cont("Alderlake Hybrid events, ");
6262 name = "alderlake_hybrid";
6266 switch (x86_pmu.version) {
6268 x86_pmu.event_constraints = intel_v1_event_constraints;
6269 pr_cont("generic architected perfmon v1, ");
6270 name = "generic_arch_v1";
6274 * default constraints for v2 and up
6276 x86_pmu.event_constraints = intel_gen_event_constraints;
6277 pr_cont("generic architected perfmon, ");
6278 name = "generic_arch_v2+";
6283 snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
6286 group_events_td.attrs = td_attr;
6287 group_events_mem.attrs = mem_attr;
6288 group_events_tsx.attrs = tsx_attr;
6289 group_format_extra.attrs = extra_attr;
6290 group_format_extra_skl.attrs = extra_skl_attr;
6292 x86_pmu.attr_update = attr_update;
6294 hybrid_group_events_td.attrs = td_attr;
6295 hybrid_group_events_mem.attrs = mem_attr;
6296 hybrid_group_events_tsx.attrs = tsx_attr;
6297 hybrid_group_format_extra.attrs = extra_attr;
6299 x86_pmu.attr_update = hybrid_attr_update;
6302 intel_pmu_check_num_counters(&x86_pmu.num_counters,
6303 &x86_pmu.num_counters_fixed,
6304 &x86_pmu.intel_ctrl,
6307 /* AnyThread may be deprecated on arch perfmon v5 or later */
6308 if (x86_pmu.intel_cap.anythread_deprecated)
6309 x86_pmu.format_attrs = intel_arch_formats_attr;
6311 intel_pmu_check_event_constraints(x86_pmu.event_constraints,
6312 x86_pmu.num_counters,
6313 x86_pmu.num_counters_fixed,
6314 x86_pmu.intel_ctrl);
6316 * Access LBR MSR may cause #GP under certain circumstances.
6317 * E.g. KVM doesn't support LBR MSR
6318 * Check all LBT MSR here.
6319 * Disable LBR access if any LBR MSRs can not be accessed.
6321 if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
6323 for (i = 0; i < x86_pmu.lbr_nr; i++) {
6324 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
6325 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
6329 if (x86_pmu.lbr_nr) {
6330 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
6332 /* only support branch_stack snapshot for perfmon >= v2 */
6333 if (x86_pmu.disable_all == intel_pmu_disable_all) {
6334 if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) {
6335 static_call_update(perf_snapshot_branch_stack,
6336 intel_pmu_snapshot_arch_branch_stack);
6338 static_call_update(perf_snapshot_branch_stack,
6339 intel_pmu_snapshot_branch_stack);
6344 intel_pmu_check_extra_regs(x86_pmu.extra_regs);
6346 /* Support full width counters using alternative MSR range */
6347 if (x86_pmu.intel_cap.full_width_write) {
6348 x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
6349 x86_pmu.perfctr = MSR_IA32_PMC0;
6350 pr_cont("full-width counters, ");
6353 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics)
6354 x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
6357 intel_pmu_check_hybrid_pmus((u64)fixed_mask);
6363 * HT bug: phase 2 init
6364 * Called once we have valid topology information to check
6365 * whether or not HT is enabled
6366 * If HT is off, then we disable the workaround
6368 static __init int fixup_ht_bug(void)
6372 * problem not present on this CPU model, nothing to do
6374 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
6377 if (topology_max_smt_threads() > 1) {
6378 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
6384 hardlockup_detector_perf_stop();
6386 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
6388 x86_pmu.start_scheduling = NULL;
6389 x86_pmu.commit_scheduling = NULL;
6390 x86_pmu.stop_scheduling = NULL;
6392 hardlockup_detector_perf_restart();
6394 for_each_online_cpu(c)
6395 free_excl_cntrs(&per_cpu(cpu_hw_events, c));
6398 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
6401 subsys_initcall(fixup_ht_bug)