1 // SPDX-License-Identifier: GPL-2.0-only
5 * Used to coordinate shared registers between HT threads or
6 * among events on a single PMU.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/stddef.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/nmi.h>
17 #include <linux/kvm_host.h>
19 #include <asm/cpufeature.h>
20 #include <asm/hardirq.h>
21 #include <asm/intel-family.h>
22 #include <asm/intel_pt.h>
24 #include <asm/cpu_device_id.h>
26 #include "../perf_event.h"
29 * Intel PerfMon, used on Core and later.
31 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
33 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
34 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
35 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
36 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
37 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
38 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
39 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
40 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
43 static struct event_constraint intel_core_event_constraints[] __read_mostly =
45 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
46 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
47 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
48 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
49 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
50 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
54 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
56 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
57 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
58 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
59 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
60 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
61 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
62 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
63 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
64 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
65 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
66 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
67 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
68 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
72 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
74 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
75 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
76 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
77 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
78 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
79 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
80 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
81 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
82 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
83 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
84 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
88 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
90 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
91 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
92 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
96 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
98 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
99 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
100 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
101 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
102 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
103 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
104 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
108 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
110 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
111 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
112 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
113 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
114 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
115 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
116 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
117 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
118 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
119 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
120 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
121 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
124 * When HT is off these events can only run on the bottom 4 counters
125 * When HT is on, they are impacted by the HT bug and require EXCL access
127 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
128 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
129 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
130 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
135 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
137 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
138 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
139 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
140 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
141 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */
142 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
143 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
144 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
145 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
146 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
147 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
148 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
149 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
152 * When HT is off these events can only run on the bottom 4 counters
153 * When HT is on, they are impacted by the HT bug and require EXCL access
155 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
156 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
157 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
158 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
163 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
165 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
166 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
167 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
168 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
172 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
177 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
179 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
180 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
181 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
185 static struct event_constraint intel_v5_gen_event_constraints[] __read_mostly =
187 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
188 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
189 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
190 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
191 FIXED_EVENT_CONSTRAINT(0x0500, 4),
192 FIXED_EVENT_CONSTRAINT(0x0600, 5),
193 FIXED_EVENT_CONSTRAINT(0x0700, 6),
194 FIXED_EVENT_CONSTRAINT(0x0800, 7),
195 FIXED_EVENT_CONSTRAINT(0x0900, 8),
196 FIXED_EVENT_CONSTRAINT(0x0a00, 9),
197 FIXED_EVENT_CONSTRAINT(0x0b00, 10),
198 FIXED_EVENT_CONSTRAINT(0x0c00, 11),
199 FIXED_EVENT_CONSTRAINT(0x0d00, 12),
200 FIXED_EVENT_CONSTRAINT(0x0e00, 13),
201 FIXED_EVENT_CONSTRAINT(0x0f00, 14),
202 FIXED_EVENT_CONSTRAINT(0x1000, 15),
206 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
208 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
209 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
210 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
214 static struct event_constraint intel_skl_event_constraints[] = {
215 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
216 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
217 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
218 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
221 * when HT is off, these can only run on the bottom 4 counters
223 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
224 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
225 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
226 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
227 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
232 static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
233 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
234 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
238 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
239 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
240 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
241 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
242 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
246 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
247 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
248 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
249 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
250 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
254 static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
255 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
256 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
257 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
259 * Note the low 8 bits eventsel code is not a continuous field, containing
260 * some #GPing bits. These are masked out.
262 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
266 static struct event_constraint intel_icl_event_constraints[] = {
267 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
268 FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* old INST_RETIRED.PREC_DIST */
269 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */
270 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
271 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
272 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
273 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
274 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
275 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
276 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
277 INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
278 INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
279 INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */
280 INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf),
281 INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
282 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */
283 INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */
284 INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
285 INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */
286 INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
287 INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
288 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
289 INTEL_EVENT_CONSTRAINT(0xef, 0xf),
290 INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
294 static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
295 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
296 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
297 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
298 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
302 static struct extra_reg intel_spr_extra_regs[] __read_mostly = {
303 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
304 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
305 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
306 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
307 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
308 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
312 static struct event_constraint intel_spr_event_constraints[] = {
313 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
314 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */
315 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
316 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
317 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
318 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
319 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
320 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
321 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
322 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
323 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
324 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
325 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
327 INTEL_EVENT_CONSTRAINT(0x2e, 0xff),
328 INTEL_EVENT_CONSTRAINT(0x3c, 0xff),
330 * Generally event codes < 0x90 are restricted to counters 0-3.
331 * The 0x2E and 0x3C are exception, which has no restriction.
333 INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
335 INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
336 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
337 INTEL_UEVENT_CONSTRAINT(0x08a3, 0xf),
338 INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
339 INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
340 INTEL_UEVENT_CONSTRAINT(0x02cd, 0x1),
341 INTEL_EVENT_CONSTRAINT(0xce, 0x1),
342 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
344 * Generally event codes >= 0x90 are likely to have no restrictions.
345 * The exception are defined as above.
347 INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0xff),
353 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
354 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
355 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
357 static struct attribute *nhm_mem_events_attrs[] = {
358 EVENT_PTR(mem_ld_nhm),
363 * topdown events for Intel Core CPUs.
365 * The events are all in slots, which is a free slot in a 4 wide
366 * pipeline. Some events are already reported in slots, for cycle
367 * events we multiply by the pipeline width (4).
369 * With Hyper Threading on, topdown metrics are either summed or averaged
370 * between the threads of a core: (count_t0 + count_t1).
372 * For the average case the metric is always scaled to pipeline width,
373 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
376 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
377 "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */
378 "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */
379 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
380 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
381 "event=0xe,umask=0x1"); /* uops_issued.any */
382 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
383 "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */
384 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
385 "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */
386 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
387 "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */
388 "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */
389 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
392 EVENT_ATTR_STR(slots, slots, "event=0x00,umask=0x4");
393 EVENT_ATTR_STR(topdown-retiring, td_retiring, "event=0x00,umask=0x80");
394 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec, "event=0x00,umask=0x81");
395 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound, "event=0x00,umask=0x82");
396 EVENT_ATTR_STR(topdown-be-bound, td_be_bound, "event=0x00,umask=0x83");
397 EVENT_ATTR_STR(topdown-heavy-ops, td_heavy_ops, "event=0x00,umask=0x84");
398 EVENT_ATTR_STR(topdown-br-mispredict, td_br_mispredict, "event=0x00,umask=0x85");
399 EVENT_ATTR_STR(topdown-fetch-lat, td_fetch_lat, "event=0x00,umask=0x86");
400 EVENT_ATTR_STR(topdown-mem-bound, td_mem_bound, "event=0x00,umask=0x87");
402 static struct attribute *snb_events_attrs[] = {
403 EVENT_PTR(td_slots_issued),
404 EVENT_PTR(td_slots_retired),
405 EVENT_PTR(td_fetch_bubbles),
406 EVENT_PTR(td_total_slots),
407 EVENT_PTR(td_total_slots_scale),
408 EVENT_PTR(td_recovery_bubbles),
409 EVENT_PTR(td_recovery_bubbles_scale),
413 static struct attribute *snb_mem_events_attrs[] = {
414 EVENT_PTR(mem_ld_snb),
415 EVENT_PTR(mem_st_snb),
419 static struct event_constraint intel_hsw_event_constraints[] = {
420 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
421 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
422 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
423 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
424 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
425 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
426 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
427 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
428 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
429 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
430 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
431 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
434 * When HT is off these events can only run on the bottom 4 counters
435 * When HT is on, they are impacted by the HT bug and require EXCL access
437 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
438 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
439 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
440 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
445 static struct event_constraint intel_bdw_event_constraints[] = {
446 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
447 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
448 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
449 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
450 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
452 * when HT is off, these can only run on the bottom 4 counters
454 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
455 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
456 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
457 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
461 static u64 intel_pmu_event_map(int hw_event)
463 return intel_perfmon_event_map[hw_event];
466 static __initconst const u64 spr_hw_cache_event_ids
467 [PERF_COUNT_HW_CACHE_MAX]
468 [PERF_COUNT_HW_CACHE_OP_MAX]
469 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
473 [ C(RESULT_ACCESS) ] = 0x81d0,
474 [ C(RESULT_MISS) ] = 0xe124,
477 [ C(RESULT_ACCESS) ] = 0x82d0,
482 [ C(RESULT_MISS) ] = 0xe424,
485 [ C(RESULT_ACCESS) ] = -1,
486 [ C(RESULT_MISS) ] = -1,
491 [ C(RESULT_ACCESS) ] = 0x12a,
492 [ C(RESULT_MISS) ] = 0x12a,
495 [ C(RESULT_ACCESS) ] = 0x12a,
496 [ C(RESULT_MISS) ] = 0x12a,
501 [ C(RESULT_ACCESS) ] = 0x81d0,
502 [ C(RESULT_MISS) ] = 0xe12,
505 [ C(RESULT_ACCESS) ] = 0x82d0,
506 [ C(RESULT_MISS) ] = 0xe13,
511 [ C(RESULT_ACCESS) ] = -1,
512 [ C(RESULT_MISS) ] = 0xe11,
515 [ C(RESULT_ACCESS) ] = -1,
516 [ C(RESULT_MISS) ] = -1,
518 [ C(OP_PREFETCH) ] = {
519 [ C(RESULT_ACCESS) ] = -1,
520 [ C(RESULT_MISS) ] = -1,
525 [ C(RESULT_ACCESS) ] = 0x4c4,
526 [ C(RESULT_MISS) ] = 0x4c5,
529 [ C(RESULT_ACCESS) ] = -1,
530 [ C(RESULT_MISS) ] = -1,
532 [ C(OP_PREFETCH) ] = {
533 [ C(RESULT_ACCESS) ] = -1,
534 [ C(RESULT_MISS) ] = -1,
539 [ C(RESULT_ACCESS) ] = 0x12a,
540 [ C(RESULT_MISS) ] = 0x12a,
545 static __initconst const u64 spr_hw_cache_extra_regs
546 [PERF_COUNT_HW_CACHE_MAX]
547 [PERF_COUNT_HW_CACHE_OP_MAX]
548 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
552 [ C(RESULT_ACCESS) ] = 0x10001,
553 [ C(RESULT_MISS) ] = 0x3fbfc00001,
556 [ C(RESULT_ACCESS) ] = 0x3f3ffc0002,
557 [ C(RESULT_MISS) ] = 0x3f3fc00002,
562 [ C(RESULT_ACCESS) ] = 0x10c000001,
563 [ C(RESULT_MISS) ] = 0x3fb3000001,
569 * Notes on the events:
570 * - data reads do not include code reads (comparable to earlier tables)
571 * - data counts include speculative execution (except L1 write, dtlb, bpu)
572 * - remote node access includes remote memory, remote cache, remote mmio.
573 * - prefetches are not included in the counts.
574 * - icache miss does not include decoded icache
577 #define SKL_DEMAND_DATA_RD BIT_ULL(0)
578 #define SKL_DEMAND_RFO BIT_ULL(1)
579 #define SKL_ANY_RESPONSE BIT_ULL(16)
580 #define SKL_SUPPLIER_NONE BIT_ULL(17)
581 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26)
582 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27)
583 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28)
584 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29)
585 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \
586 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
587 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
588 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
589 #define SKL_SPL_HIT BIT_ULL(30)
590 #define SKL_SNOOP_NONE BIT_ULL(31)
591 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32)
592 #define SKL_SNOOP_MISS BIT_ULL(33)
593 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34)
594 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35)
595 #define SKL_SNOOP_HITM BIT_ULL(36)
596 #define SKL_SNOOP_NON_DRAM BIT_ULL(37)
597 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \
598 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
599 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
600 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
601 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD
602 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \
603 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
604 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
605 SKL_SNOOP_HITM|SKL_SPL_HIT)
606 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO
607 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE
608 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
609 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
610 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
612 static __initconst const u64 skl_hw_cache_event_ids
613 [PERF_COUNT_HW_CACHE_MAX]
614 [PERF_COUNT_HW_CACHE_OP_MAX]
615 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
619 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
620 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
623 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
624 [ C(RESULT_MISS) ] = 0x0,
626 [ C(OP_PREFETCH) ] = {
627 [ C(RESULT_ACCESS) ] = 0x0,
628 [ C(RESULT_MISS) ] = 0x0,
633 [ C(RESULT_ACCESS) ] = 0x0,
634 [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */
637 [ C(RESULT_ACCESS) ] = -1,
638 [ C(RESULT_MISS) ] = -1,
640 [ C(OP_PREFETCH) ] = {
641 [ C(RESULT_ACCESS) ] = 0x0,
642 [ C(RESULT_MISS) ] = 0x0,
647 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
648 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
651 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
652 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
654 [ C(OP_PREFETCH) ] = {
655 [ C(RESULT_ACCESS) ] = 0x0,
656 [ C(RESULT_MISS) ] = 0x0,
661 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
662 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
665 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
666 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
668 [ C(OP_PREFETCH) ] = {
669 [ C(RESULT_ACCESS) ] = 0x0,
670 [ C(RESULT_MISS) ] = 0x0,
675 [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */
676 [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */
679 [ C(RESULT_ACCESS) ] = -1,
680 [ C(RESULT_MISS) ] = -1,
682 [ C(OP_PREFETCH) ] = {
683 [ C(RESULT_ACCESS) ] = -1,
684 [ C(RESULT_MISS) ] = -1,
689 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
690 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
693 [ C(RESULT_ACCESS) ] = -1,
694 [ C(RESULT_MISS) ] = -1,
696 [ C(OP_PREFETCH) ] = {
697 [ C(RESULT_ACCESS) ] = -1,
698 [ C(RESULT_MISS) ] = -1,
703 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
704 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
707 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
708 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
710 [ C(OP_PREFETCH) ] = {
711 [ C(RESULT_ACCESS) ] = 0x0,
712 [ C(RESULT_MISS) ] = 0x0,
717 static __initconst const u64 skl_hw_cache_extra_regs
718 [PERF_COUNT_HW_CACHE_MAX]
719 [PERF_COUNT_HW_CACHE_OP_MAX]
720 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
724 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
725 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
726 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
727 SKL_L3_MISS|SKL_ANY_SNOOP|
731 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
732 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
733 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
734 SKL_L3_MISS|SKL_ANY_SNOOP|
737 [ C(OP_PREFETCH) ] = {
738 [ C(RESULT_ACCESS) ] = 0x0,
739 [ C(RESULT_MISS) ] = 0x0,
744 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
745 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
746 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
747 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
750 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
751 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
752 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
753 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
755 [ C(OP_PREFETCH) ] = {
756 [ C(RESULT_ACCESS) ] = 0x0,
757 [ C(RESULT_MISS) ] = 0x0,
762 #define SNB_DMND_DATA_RD (1ULL << 0)
763 #define SNB_DMND_RFO (1ULL << 1)
764 #define SNB_DMND_IFETCH (1ULL << 2)
765 #define SNB_DMND_WB (1ULL << 3)
766 #define SNB_PF_DATA_RD (1ULL << 4)
767 #define SNB_PF_RFO (1ULL << 5)
768 #define SNB_PF_IFETCH (1ULL << 6)
769 #define SNB_LLC_DATA_RD (1ULL << 7)
770 #define SNB_LLC_RFO (1ULL << 8)
771 #define SNB_LLC_IFETCH (1ULL << 9)
772 #define SNB_BUS_LOCKS (1ULL << 10)
773 #define SNB_STRM_ST (1ULL << 11)
774 #define SNB_OTHER (1ULL << 15)
775 #define SNB_RESP_ANY (1ULL << 16)
776 #define SNB_NO_SUPP (1ULL << 17)
777 #define SNB_LLC_HITM (1ULL << 18)
778 #define SNB_LLC_HITE (1ULL << 19)
779 #define SNB_LLC_HITS (1ULL << 20)
780 #define SNB_LLC_HITF (1ULL << 21)
781 #define SNB_LOCAL (1ULL << 22)
782 #define SNB_REMOTE (0xffULL << 23)
783 #define SNB_SNP_NONE (1ULL << 31)
784 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
785 #define SNB_SNP_MISS (1ULL << 33)
786 #define SNB_NO_FWD (1ULL << 34)
787 #define SNB_SNP_FWD (1ULL << 35)
788 #define SNB_HITM (1ULL << 36)
789 #define SNB_NON_DRAM (1ULL << 37)
791 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
792 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
793 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
795 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
796 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
799 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
800 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
802 #define SNB_L3_ACCESS SNB_RESP_ANY
803 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
805 static __initconst const u64 snb_hw_cache_extra_regs
806 [PERF_COUNT_HW_CACHE_MAX]
807 [PERF_COUNT_HW_CACHE_OP_MAX]
808 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
812 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
813 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
816 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
817 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
819 [ C(OP_PREFETCH) ] = {
820 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
821 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
826 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
827 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
830 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
831 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
833 [ C(OP_PREFETCH) ] = {
834 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
835 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
840 static __initconst const u64 snb_hw_cache_event_ids
841 [PERF_COUNT_HW_CACHE_MAX]
842 [PERF_COUNT_HW_CACHE_OP_MAX]
843 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
847 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
848 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
851 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
852 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
854 [ C(OP_PREFETCH) ] = {
855 [ C(RESULT_ACCESS) ] = 0x0,
856 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
861 [ C(RESULT_ACCESS) ] = 0x0,
862 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
865 [ C(RESULT_ACCESS) ] = -1,
866 [ C(RESULT_MISS) ] = -1,
868 [ C(OP_PREFETCH) ] = {
869 [ C(RESULT_ACCESS) ] = 0x0,
870 [ C(RESULT_MISS) ] = 0x0,
875 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
876 [ C(RESULT_ACCESS) ] = 0x01b7,
877 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
878 [ C(RESULT_MISS) ] = 0x01b7,
881 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
882 [ C(RESULT_ACCESS) ] = 0x01b7,
883 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
884 [ C(RESULT_MISS) ] = 0x01b7,
886 [ C(OP_PREFETCH) ] = {
887 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
888 [ C(RESULT_ACCESS) ] = 0x01b7,
889 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
890 [ C(RESULT_MISS) ] = 0x01b7,
895 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
896 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
899 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
900 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
902 [ C(OP_PREFETCH) ] = {
903 [ C(RESULT_ACCESS) ] = 0x0,
904 [ C(RESULT_MISS) ] = 0x0,
909 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
910 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
913 [ C(RESULT_ACCESS) ] = -1,
914 [ C(RESULT_MISS) ] = -1,
916 [ C(OP_PREFETCH) ] = {
917 [ C(RESULT_ACCESS) ] = -1,
918 [ C(RESULT_MISS) ] = -1,
923 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
924 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
927 [ C(RESULT_ACCESS) ] = -1,
928 [ C(RESULT_MISS) ] = -1,
930 [ C(OP_PREFETCH) ] = {
931 [ C(RESULT_ACCESS) ] = -1,
932 [ C(RESULT_MISS) ] = -1,
937 [ C(RESULT_ACCESS) ] = 0x01b7,
938 [ C(RESULT_MISS) ] = 0x01b7,
941 [ C(RESULT_ACCESS) ] = 0x01b7,
942 [ C(RESULT_MISS) ] = 0x01b7,
944 [ C(OP_PREFETCH) ] = {
945 [ C(RESULT_ACCESS) ] = 0x01b7,
946 [ C(RESULT_MISS) ] = 0x01b7,
953 * Notes on the events:
954 * - data reads do not include code reads (comparable to earlier tables)
955 * - data counts include speculative execution (except L1 write, dtlb, bpu)
956 * - remote node access includes remote memory, remote cache, remote mmio.
957 * - prefetches are not included in the counts because they are not
961 #define HSW_DEMAND_DATA_RD BIT_ULL(0)
962 #define HSW_DEMAND_RFO BIT_ULL(1)
963 #define HSW_ANY_RESPONSE BIT_ULL(16)
964 #define HSW_SUPPLIER_NONE BIT_ULL(17)
965 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
966 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
967 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
968 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
969 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
970 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
971 HSW_L3_MISS_REMOTE_HOP2P)
972 #define HSW_SNOOP_NONE BIT_ULL(31)
973 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
974 #define HSW_SNOOP_MISS BIT_ULL(33)
975 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
976 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
977 #define HSW_SNOOP_HITM BIT_ULL(36)
978 #define HSW_SNOOP_NON_DRAM BIT_ULL(37)
979 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
980 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
981 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
982 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
983 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
984 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
985 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO
986 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
987 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
988 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE
990 #define BDW_L3_MISS_LOCAL BIT(26)
991 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
992 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
993 HSW_L3_MISS_REMOTE_HOP2P)
996 static __initconst const u64 hsw_hw_cache_event_ids
997 [PERF_COUNT_HW_CACHE_MAX]
998 [PERF_COUNT_HW_CACHE_OP_MAX]
999 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1003 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1004 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
1007 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1008 [ C(RESULT_MISS) ] = 0x0,
1010 [ C(OP_PREFETCH) ] = {
1011 [ C(RESULT_ACCESS) ] = 0x0,
1012 [ C(RESULT_MISS) ] = 0x0,
1017 [ C(RESULT_ACCESS) ] = 0x0,
1018 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
1021 [ C(RESULT_ACCESS) ] = -1,
1022 [ C(RESULT_MISS) ] = -1,
1024 [ C(OP_PREFETCH) ] = {
1025 [ C(RESULT_ACCESS) ] = 0x0,
1026 [ C(RESULT_MISS) ] = 0x0,
1031 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1032 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1035 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1036 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1038 [ C(OP_PREFETCH) ] = {
1039 [ C(RESULT_ACCESS) ] = 0x0,
1040 [ C(RESULT_MISS) ] = 0x0,
1045 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1046 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
1049 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1050 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
1052 [ C(OP_PREFETCH) ] = {
1053 [ C(RESULT_ACCESS) ] = 0x0,
1054 [ C(RESULT_MISS) ] = 0x0,
1059 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
1060 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
1063 [ C(RESULT_ACCESS) ] = -1,
1064 [ C(RESULT_MISS) ] = -1,
1066 [ C(OP_PREFETCH) ] = {
1067 [ C(RESULT_ACCESS) ] = -1,
1068 [ C(RESULT_MISS) ] = -1,
1073 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
1074 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1077 [ C(RESULT_ACCESS) ] = -1,
1078 [ C(RESULT_MISS) ] = -1,
1080 [ C(OP_PREFETCH) ] = {
1081 [ C(RESULT_ACCESS) ] = -1,
1082 [ C(RESULT_MISS) ] = -1,
1087 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1088 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1091 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1092 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1094 [ C(OP_PREFETCH) ] = {
1095 [ C(RESULT_ACCESS) ] = 0x0,
1096 [ C(RESULT_MISS) ] = 0x0,
1101 static __initconst const u64 hsw_hw_cache_extra_regs
1102 [PERF_COUNT_HW_CACHE_MAX]
1103 [PERF_COUNT_HW_CACHE_OP_MAX]
1104 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1108 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1110 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
1111 HSW_L3_MISS|HSW_ANY_SNOOP,
1114 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1116 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
1117 HSW_L3_MISS|HSW_ANY_SNOOP,
1119 [ C(OP_PREFETCH) ] = {
1120 [ C(RESULT_ACCESS) ] = 0x0,
1121 [ C(RESULT_MISS) ] = 0x0,
1126 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1127 HSW_L3_MISS_LOCAL_DRAM|
1129 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
1134 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1135 HSW_L3_MISS_LOCAL_DRAM|
1137 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
1141 [ C(OP_PREFETCH) ] = {
1142 [ C(RESULT_ACCESS) ] = 0x0,
1143 [ C(RESULT_MISS) ] = 0x0,
1148 static __initconst const u64 westmere_hw_cache_event_ids
1149 [PERF_COUNT_HW_CACHE_MAX]
1150 [PERF_COUNT_HW_CACHE_OP_MAX]
1151 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1155 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1156 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1159 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1160 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1162 [ C(OP_PREFETCH) ] = {
1163 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1164 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1169 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1170 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1173 [ C(RESULT_ACCESS) ] = -1,
1174 [ C(RESULT_MISS) ] = -1,
1176 [ C(OP_PREFETCH) ] = {
1177 [ C(RESULT_ACCESS) ] = 0x0,
1178 [ C(RESULT_MISS) ] = 0x0,
1183 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1184 [ C(RESULT_ACCESS) ] = 0x01b7,
1185 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1186 [ C(RESULT_MISS) ] = 0x01b7,
1189 * Use RFO, not WRITEBACK, because a write miss would typically occur
1193 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1194 [ C(RESULT_ACCESS) ] = 0x01b7,
1195 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1196 [ C(RESULT_MISS) ] = 0x01b7,
1198 [ C(OP_PREFETCH) ] = {
1199 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1200 [ C(RESULT_ACCESS) ] = 0x01b7,
1201 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1202 [ C(RESULT_MISS) ] = 0x01b7,
1207 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1208 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1211 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1212 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1214 [ C(OP_PREFETCH) ] = {
1215 [ C(RESULT_ACCESS) ] = 0x0,
1216 [ C(RESULT_MISS) ] = 0x0,
1221 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1222 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
1225 [ C(RESULT_ACCESS) ] = -1,
1226 [ C(RESULT_MISS) ] = -1,
1228 [ C(OP_PREFETCH) ] = {
1229 [ C(RESULT_ACCESS) ] = -1,
1230 [ C(RESULT_MISS) ] = -1,
1235 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1236 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1239 [ C(RESULT_ACCESS) ] = -1,
1240 [ C(RESULT_MISS) ] = -1,
1242 [ C(OP_PREFETCH) ] = {
1243 [ C(RESULT_ACCESS) ] = -1,
1244 [ C(RESULT_MISS) ] = -1,
1249 [ C(RESULT_ACCESS) ] = 0x01b7,
1250 [ C(RESULT_MISS) ] = 0x01b7,
1253 [ C(RESULT_ACCESS) ] = 0x01b7,
1254 [ C(RESULT_MISS) ] = 0x01b7,
1256 [ C(OP_PREFETCH) ] = {
1257 [ C(RESULT_ACCESS) ] = 0x01b7,
1258 [ C(RESULT_MISS) ] = 0x01b7,
1264 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1265 * See IA32 SDM Vol 3B 30.6.1.3
1268 #define NHM_DMND_DATA_RD (1 << 0)
1269 #define NHM_DMND_RFO (1 << 1)
1270 #define NHM_DMND_IFETCH (1 << 2)
1271 #define NHM_DMND_WB (1 << 3)
1272 #define NHM_PF_DATA_RD (1 << 4)
1273 #define NHM_PF_DATA_RFO (1 << 5)
1274 #define NHM_PF_IFETCH (1 << 6)
1275 #define NHM_OFFCORE_OTHER (1 << 7)
1276 #define NHM_UNCORE_HIT (1 << 8)
1277 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
1278 #define NHM_OTHER_CORE_HITM (1 << 10)
1280 #define NHM_REMOTE_CACHE_FWD (1 << 12)
1281 #define NHM_REMOTE_DRAM (1 << 13)
1282 #define NHM_LOCAL_DRAM (1 << 14)
1283 #define NHM_NON_DRAM (1 << 15)
1285 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1286 #define NHM_REMOTE (NHM_REMOTE_DRAM)
1288 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
1289 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
1290 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1292 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1293 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1294 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
1296 static __initconst const u64 nehalem_hw_cache_extra_regs
1297 [PERF_COUNT_HW_CACHE_MAX]
1298 [PERF_COUNT_HW_CACHE_OP_MAX]
1299 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1303 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1304 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
1307 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1308 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
1310 [ C(OP_PREFETCH) ] = {
1311 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1312 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1317 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1318 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
1321 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1322 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
1324 [ C(OP_PREFETCH) ] = {
1325 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1326 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1331 static __initconst const u64 nehalem_hw_cache_event_ids
1332 [PERF_COUNT_HW_CACHE_MAX]
1333 [PERF_COUNT_HW_CACHE_OP_MAX]
1334 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1338 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1339 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1342 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1343 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1345 [ C(OP_PREFETCH) ] = {
1346 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1347 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1352 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1353 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1356 [ C(RESULT_ACCESS) ] = -1,
1357 [ C(RESULT_MISS) ] = -1,
1359 [ C(OP_PREFETCH) ] = {
1360 [ C(RESULT_ACCESS) ] = 0x0,
1361 [ C(RESULT_MISS) ] = 0x0,
1366 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1367 [ C(RESULT_ACCESS) ] = 0x01b7,
1368 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1369 [ C(RESULT_MISS) ] = 0x01b7,
1372 * Use RFO, not WRITEBACK, because a write miss would typically occur
1376 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1377 [ C(RESULT_ACCESS) ] = 0x01b7,
1378 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1379 [ C(RESULT_MISS) ] = 0x01b7,
1381 [ C(OP_PREFETCH) ] = {
1382 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1383 [ C(RESULT_ACCESS) ] = 0x01b7,
1384 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1385 [ C(RESULT_MISS) ] = 0x01b7,
1390 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1391 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1394 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1395 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1397 [ C(OP_PREFETCH) ] = {
1398 [ C(RESULT_ACCESS) ] = 0x0,
1399 [ C(RESULT_MISS) ] = 0x0,
1404 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1405 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
1408 [ C(RESULT_ACCESS) ] = -1,
1409 [ C(RESULT_MISS) ] = -1,
1411 [ C(OP_PREFETCH) ] = {
1412 [ C(RESULT_ACCESS) ] = -1,
1413 [ C(RESULT_MISS) ] = -1,
1418 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1419 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1422 [ C(RESULT_ACCESS) ] = -1,
1423 [ C(RESULT_MISS) ] = -1,
1425 [ C(OP_PREFETCH) ] = {
1426 [ C(RESULT_ACCESS) ] = -1,
1427 [ C(RESULT_MISS) ] = -1,
1432 [ C(RESULT_ACCESS) ] = 0x01b7,
1433 [ C(RESULT_MISS) ] = 0x01b7,
1436 [ C(RESULT_ACCESS) ] = 0x01b7,
1437 [ C(RESULT_MISS) ] = 0x01b7,
1439 [ C(OP_PREFETCH) ] = {
1440 [ C(RESULT_ACCESS) ] = 0x01b7,
1441 [ C(RESULT_MISS) ] = 0x01b7,
1446 static __initconst const u64 core2_hw_cache_event_ids
1447 [PERF_COUNT_HW_CACHE_MAX]
1448 [PERF_COUNT_HW_CACHE_OP_MAX]
1449 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1453 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
1454 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
1457 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
1458 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
1460 [ C(OP_PREFETCH) ] = {
1461 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
1462 [ C(RESULT_MISS) ] = 0,
1467 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
1468 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
1471 [ C(RESULT_ACCESS) ] = -1,
1472 [ C(RESULT_MISS) ] = -1,
1474 [ C(OP_PREFETCH) ] = {
1475 [ C(RESULT_ACCESS) ] = 0,
1476 [ C(RESULT_MISS) ] = 0,
1481 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1482 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1485 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1486 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1488 [ C(OP_PREFETCH) ] = {
1489 [ C(RESULT_ACCESS) ] = 0,
1490 [ C(RESULT_MISS) ] = 0,
1495 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1496 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
1499 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1500 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
1502 [ C(OP_PREFETCH) ] = {
1503 [ C(RESULT_ACCESS) ] = 0,
1504 [ C(RESULT_MISS) ] = 0,
1509 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1510 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
1513 [ C(RESULT_ACCESS) ] = -1,
1514 [ C(RESULT_MISS) ] = -1,
1516 [ C(OP_PREFETCH) ] = {
1517 [ C(RESULT_ACCESS) ] = -1,
1518 [ C(RESULT_MISS) ] = -1,
1523 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1524 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1527 [ C(RESULT_ACCESS) ] = -1,
1528 [ C(RESULT_MISS) ] = -1,
1530 [ C(OP_PREFETCH) ] = {
1531 [ C(RESULT_ACCESS) ] = -1,
1532 [ C(RESULT_MISS) ] = -1,
1537 static __initconst const u64 atom_hw_cache_event_ids
1538 [PERF_COUNT_HW_CACHE_MAX]
1539 [PERF_COUNT_HW_CACHE_OP_MAX]
1540 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1544 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
1545 [ C(RESULT_MISS) ] = 0,
1548 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
1549 [ C(RESULT_MISS) ] = 0,
1551 [ C(OP_PREFETCH) ] = {
1552 [ C(RESULT_ACCESS) ] = 0x0,
1553 [ C(RESULT_MISS) ] = 0,
1558 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1559 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1562 [ C(RESULT_ACCESS) ] = -1,
1563 [ C(RESULT_MISS) ] = -1,
1565 [ C(OP_PREFETCH) ] = {
1566 [ C(RESULT_ACCESS) ] = 0,
1567 [ C(RESULT_MISS) ] = 0,
1572 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1573 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1576 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1577 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1579 [ C(OP_PREFETCH) ] = {
1580 [ C(RESULT_ACCESS) ] = 0,
1581 [ C(RESULT_MISS) ] = 0,
1586 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1587 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1590 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1591 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1593 [ C(OP_PREFETCH) ] = {
1594 [ C(RESULT_ACCESS) ] = 0,
1595 [ C(RESULT_MISS) ] = 0,
1600 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1601 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1604 [ C(RESULT_ACCESS) ] = -1,
1605 [ C(RESULT_MISS) ] = -1,
1607 [ C(OP_PREFETCH) ] = {
1608 [ C(RESULT_ACCESS) ] = -1,
1609 [ C(RESULT_MISS) ] = -1,
1614 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1615 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1618 [ C(RESULT_ACCESS) ] = -1,
1619 [ C(RESULT_MISS) ] = -1,
1621 [ C(OP_PREFETCH) ] = {
1622 [ C(RESULT_ACCESS) ] = -1,
1623 [ C(RESULT_MISS) ] = -1,
1628 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1629 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1630 /* no_alloc_cycles.not_delivered */
1631 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1632 "event=0xca,umask=0x50");
1633 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1634 /* uops_retired.all */
1635 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1636 "event=0xc2,umask=0x10");
1637 /* uops_retired.all */
1638 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1639 "event=0xc2,umask=0x10");
1641 static struct attribute *slm_events_attrs[] = {
1642 EVENT_PTR(td_total_slots_slm),
1643 EVENT_PTR(td_total_slots_scale_slm),
1644 EVENT_PTR(td_fetch_bubbles_slm),
1645 EVENT_PTR(td_fetch_bubbles_scale_slm),
1646 EVENT_PTR(td_slots_issued_slm),
1647 EVENT_PTR(td_slots_retired_slm),
1651 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1653 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1654 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1655 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1659 #define SLM_DMND_READ SNB_DMND_DATA_RD
1660 #define SLM_DMND_WRITE SNB_DMND_RFO
1661 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1663 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1664 #define SLM_LLC_ACCESS SNB_RESP_ANY
1665 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1667 static __initconst const u64 slm_hw_cache_extra_regs
1668 [PERF_COUNT_HW_CACHE_MAX]
1669 [PERF_COUNT_HW_CACHE_OP_MAX]
1670 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1674 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1675 [ C(RESULT_MISS) ] = 0,
1678 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1679 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1681 [ C(OP_PREFETCH) ] = {
1682 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1683 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1688 static __initconst const u64 slm_hw_cache_event_ids
1689 [PERF_COUNT_HW_CACHE_MAX]
1690 [PERF_COUNT_HW_CACHE_OP_MAX]
1691 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1695 [ C(RESULT_ACCESS) ] = 0,
1696 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
1699 [ C(RESULT_ACCESS) ] = 0,
1700 [ C(RESULT_MISS) ] = 0,
1702 [ C(OP_PREFETCH) ] = {
1703 [ C(RESULT_ACCESS) ] = 0,
1704 [ C(RESULT_MISS) ] = 0,
1709 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1710 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
1713 [ C(RESULT_ACCESS) ] = -1,
1714 [ C(RESULT_MISS) ] = -1,
1716 [ C(OP_PREFETCH) ] = {
1717 [ C(RESULT_ACCESS) ] = 0,
1718 [ C(RESULT_MISS) ] = 0,
1723 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1724 [ C(RESULT_ACCESS) ] = 0x01b7,
1725 [ C(RESULT_MISS) ] = 0,
1728 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1729 [ C(RESULT_ACCESS) ] = 0x01b7,
1730 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1731 [ C(RESULT_MISS) ] = 0x01b7,
1733 [ C(OP_PREFETCH) ] = {
1734 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1735 [ C(RESULT_ACCESS) ] = 0x01b7,
1736 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1737 [ C(RESULT_MISS) ] = 0x01b7,
1742 [ C(RESULT_ACCESS) ] = 0,
1743 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
1746 [ C(RESULT_ACCESS) ] = 0,
1747 [ C(RESULT_MISS) ] = 0,
1749 [ C(OP_PREFETCH) ] = {
1750 [ C(RESULT_ACCESS) ] = 0,
1751 [ C(RESULT_MISS) ] = 0,
1756 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1757 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1760 [ C(RESULT_ACCESS) ] = -1,
1761 [ C(RESULT_MISS) ] = -1,
1763 [ C(OP_PREFETCH) ] = {
1764 [ C(RESULT_ACCESS) ] = -1,
1765 [ C(RESULT_MISS) ] = -1,
1770 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1771 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1774 [ C(RESULT_ACCESS) ] = -1,
1775 [ C(RESULT_MISS) ] = -1,
1777 [ C(OP_PREFETCH) ] = {
1778 [ C(RESULT_ACCESS) ] = -1,
1779 [ C(RESULT_MISS) ] = -1,
1784 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1785 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1786 /* UOPS_NOT_DELIVERED.ANY */
1787 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1788 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1789 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1790 /* UOPS_RETIRED.ANY */
1791 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1792 /* UOPS_ISSUED.ANY */
1793 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1795 static struct attribute *glm_events_attrs[] = {
1796 EVENT_PTR(td_total_slots_glm),
1797 EVENT_PTR(td_total_slots_scale_glm),
1798 EVENT_PTR(td_fetch_bubbles_glm),
1799 EVENT_PTR(td_recovery_bubbles_glm),
1800 EVENT_PTR(td_slots_issued_glm),
1801 EVENT_PTR(td_slots_retired_glm),
1805 static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1806 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1807 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1808 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1812 #define GLM_DEMAND_DATA_RD BIT_ULL(0)
1813 #define GLM_DEMAND_RFO BIT_ULL(1)
1814 #define GLM_ANY_RESPONSE BIT_ULL(16)
1815 #define GLM_SNP_NONE_OR_MISS BIT_ULL(33)
1816 #define GLM_DEMAND_READ GLM_DEMAND_DATA_RD
1817 #define GLM_DEMAND_WRITE GLM_DEMAND_RFO
1818 #define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1819 #define GLM_LLC_ACCESS GLM_ANY_RESPONSE
1820 #define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1821 #define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM)
1823 static __initconst const u64 glm_hw_cache_event_ids
1824 [PERF_COUNT_HW_CACHE_MAX]
1825 [PERF_COUNT_HW_CACHE_OP_MAX]
1826 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1829 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1830 [C(RESULT_MISS)] = 0x0,
1833 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1834 [C(RESULT_MISS)] = 0x0,
1836 [C(OP_PREFETCH)] = {
1837 [C(RESULT_ACCESS)] = 0x0,
1838 [C(RESULT_MISS)] = 0x0,
1843 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1844 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1847 [C(RESULT_ACCESS)] = -1,
1848 [C(RESULT_MISS)] = -1,
1850 [C(OP_PREFETCH)] = {
1851 [C(RESULT_ACCESS)] = 0x0,
1852 [C(RESULT_MISS)] = 0x0,
1857 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1858 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1861 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1862 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1864 [C(OP_PREFETCH)] = {
1865 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1866 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1871 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1872 [C(RESULT_MISS)] = 0x0,
1875 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1876 [C(RESULT_MISS)] = 0x0,
1878 [C(OP_PREFETCH)] = {
1879 [C(RESULT_ACCESS)] = 0x0,
1880 [C(RESULT_MISS)] = 0x0,
1885 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1886 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1889 [C(RESULT_ACCESS)] = -1,
1890 [C(RESULT_MISS)] = -1,
1892 [C(OP_PREFETCH)] = {
1893 [C(RESULT_ACCESS)] = -1,
1894 [C(RESULT_MISS)] = -1,
1899 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1900 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1903 [C(RESULT_ACCESS)] = -1,
1904 [C(RESULT_MISS)] = -1,
1906 [C(OP_PREFETCH)] = {
1907 [C(RESULT_ACCESS)] = -1,
1908 [C(RESULT_MISS)] = -1,
1913 static __initconst const u64 glm_hw_cache_extra_regs
1914 [PERF_COUNT_HW_CACHE_MAX]
1915 [PERF_COUNT_HW_CACHE_OP_MAX]
1916 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1919 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1921 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1925 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1927 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1930 [C(OP_PREFETCH)] = {
1931 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH|
1933 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH|
1939 static __initconst const u64 glp_hw_cache_event_ids
1940 [PERF_COUNT_HW_CACHE_MAX]
1941 [PERF_COUNT_HW_CACHE_OP_MAX]
1942 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1945 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1946 [C(RESULT_MISS)] = 0x0,
1949 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1950 [C(RESULT_MISS)] = 0x0,
1952 [C(OP_PREFETCH)] = {
1953 [C(RESULT_ACCESS)] = 0x0,
1954 [C(RESULT_MISS)] = 0x0,
1959 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1960 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1963 [C(RESULT_ACCESS)] = -1,
1964 [C(RESULT_MISS)] = -1,
1966 [C(OP_PREFETCH)] = {
1967 [C(RESULT_ACCESS)] = 0x0,
1968 [C(RESULT_MISS)] = 0x0,
1973 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1974 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1977 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1978 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1980 [C(OP_PREFETCH)] = {
1981 [C(RESULT_ACCESS)] = 0x0,
1982 [C(RESULT_MISS)] = 0x0,
1987 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1988 [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
1991 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1992 [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
1994 [C(OP_PREFETCH)] = {
1995 [C(RESULT_ACCESS)] = 0x0,
1996 [C(RESULT_MISS)] = 0x0,
2001 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
2002 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
2005 [C(RESULT_ACCESS)] = -1,
2006 [C(RESULT_MISS)] = -1,
2008 [C(OP_PREFETCH)] = {
2009 [C(RESULT_ACCESS)] = -1,
2010 [C(RESULT_MISS)] = -1,
2015 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
2016 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
2019 [C(RESULT_ACCESS)] = -1,
2020 [C(RESULT_MISS)] = -1,
2022 [C(OP_PREFETCH)] = {
2023 [C(RESULT_ACCESS)] = -1,
2024 [C(RESULT_MISS)] = -1,
2029 static __initconst const u64 glp_hw_cache_extra_regs
2030 [PERF_COUNT_HW_CACHE_MAX]
2031 [PERF_COUNT_HW_CACHE_OP_MAX]
2032 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2035 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
2037 [C(RESULT_MISS)] = GLM_DEMAND_READ|
2041 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
2043 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
2046 [C(OP_PREFETCH)] = {
2047 [C(RESULT_ACCESS)] = 0x0,
2048 [C(RESULT_MISS)] = 0x0,
2053 #define TNT_LOCAL_DRAM BIT_ULL(26)
2054 #define TNT_DEMAND_READ GLM_DEMAND_DATA_RD
2055 #define TNT_DEMAND_WRITE GLM_DEMAND_RFO
2056 #define TNT_LLC_ACCESS GLM_ANY_RESPONSE
2057 #define TNT_SNP_ANY (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
2058 SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
2059 #define TNT_LLC_MISS (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
2061 static __initconst const u64 tnt_hw_cache_extra_regs
2062 [PERF_COUNT_HW_CACHE_MAX]
2063 [PERF_COUNT_HW_CACHE_OP_MAX]
2064 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2067 [C(RESULT_ACCESS)] = TNT_DEMAND_READ|
2069 [C(RESULT_MISS)] = TNT_DEMAND_READ|
2073 [C(RESULT_ACCESS)] = TNT_DEMAND_WRITE|
2075 [C(RESULT_MISS)] = TNT_DEMAND_WRITE|
2078 [C(OP_PREFETCH)] = {
2079 [C(RESULT_ACCESS)] = 0x0,
2080 [C(RESULT_MISS)] = 0x0,
2085 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_tnt, "event=0x71,umask=0x0");
2086 EVENT_ATTR_STR(topdown-retiring, td_retiring_tnt, "event=0xc2,umask=0x0");
2087 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_tnt, "event=0x73,umask=0x6");
2088 EVENT_ATTR_STR(topdown-be-bound, td_be_bound_tnt, "event=0x74,umask=0x0");
2090 static struct attribute *tnt_events_attrs[] = {
2091 EVENT_PTR(td_fe_bound_tnt),
2092 EVENT_PTR(td_retiring_tnt),
2093 EVENT_PTR(td_bad_spec_tnt),
2094 EVENT_PTR(td_be_bound_tnt),
2098 static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
2099 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2100 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0),
2101 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1),
2105 static struct extra_reg intel_grt_extra_regs[] __read_mostly = {
2106 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2107 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
2108 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
2109 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2113 #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
2114 #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
2115 #define KNL_MCDRAM_LOCAL BIT_ULL(21)
2116 #define KNL_MCDRAM_FAR BIT_ULL(22)
2117 #define KNL_DDR_LOCAL BIT_ULL(23)
2118 #define KNL_DDR_FAR BIT_ULL(24)
2119 #define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
2120 KNL_DDR_LOCAL | KNL_DDR_FAR)
2121 #define KNL_L2_READ SLM_DMND_READ
2122 #define KNL_L2_WRITE SLM_DMND_WRITE
2123 #define KNL_L2_PREFETCH SLM_DMND_PREFETCH
2124 #define KNL_L2_ACCESS SLM_LLC_ACCESS
2125 #define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
2126 KNL_DRAM_ANY | SNB_SNP_ANY | \
2129 static __initconst const u64 knl_hw_cache_extra_regs
2130 [PERF_COUNT_HW_CACHE_MAX]
2131 [PERF_COUNT_HW_CACHE_OP_MAX]
2132 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2135 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
2136 [C(RESULT_MISS)] = 0,
2139 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
2140 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS,
2142 [C(OP_PREFETCH)] = {
2143 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
2144 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS,
2150 * Used from PMIs where the LBRs are already disabled.
2152 * This function could be called consecutively. It is required to remain in
2153 * disabled state if called consecutively.
2155 * During consecutive calls, the same disable value will be written to related
2156 * registers, so the PMU state remains unchanged.
2158 * intel_bts events don't coexist with intel PMU's BTS events because of
2159 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
2160 * disabled around intel PMU's event batching etc, only inside the PMI handler.
2162 * Avoid PEBS_ENABLE MSR access in PMIs.
2163 * The GLOBAL_CTRL has been disabled. All the counters do not count anymore.
2164 * It doesn't matter if the PEBS is enabled or not.
2165 * Usually, the PEBS status are not changed in PMIs. It's unnecessary to
2166 * access PEBS_ENABLE MSR in disable_all()/enable_all().
2167 * However, there are some cases which may change PEBS status, e.g. PMI
2168 * throttle. The PEBS_ENABLE should be updated where the status changes.
2170 static __always_inline void __intel_pmu_disable_all(bool bts)
2172 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2174 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2176 if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
2177 intel_pmu_disable_bts();
2180 static __always_inline void intel_pmu_disable_all(void)
2182 __intel_pmu_disable_all(true);
2183 intel_pmu_pebs_disable_all();
2184 intel_pmu_lbr_disable_all();
2187 static void __intel_pmu_enable_all(int added, bool pmi)
2189 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2190 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2192 intel_pmu_lbr_enable_all(pmi);
2193 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
2194 intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
2196 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2197 struct perf_event *event =
2198 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
2200 if (WARN_ON_ONCE(!event))
2203 intel_pmu_enable_bts(event->hw.config);
2207 static void intel_pmu_enable_all(int added)
2209 intel_pmu_pebs_enable_all();
2210 __intel_pmu_enable_all(added, false);
2214 __intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries,
2215 unsigned int cnt, unsigned long flags)
2217 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2219 intel_pmu_lbr_read();
2220 cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr);
2222 memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt);
2223 intel_pmu_enable_all(0);
2224 local_irq_restore(flags);
2229 intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2231 unsigned long flags;
2233 /* must not have branches... */
2234 local_irq_save(flags);
2235 __intel_pmu_disable_all(false); /* we don't care about BTS */
2236 __intel_pmu_lbr_disable();
2237 /* ... until here */
2238 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2242 intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2244 unsigned long flags;
2246 /* must not have branches... */
2247 local_irq_save(flags);
2248 __intel_pmu_disable_all(false); /* we don't care about BTS */
2249 __intel_pmu_arch_lbr_disable();
2250 /* ... until here */
2251 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2256 * Intel Errata AAK100 (model 26)
2257 * Intel Errata AAP53 (model 30)
2258 * Intel Errata BD53 (model 44)
2260 * The official story:
2261 * These chips need to be 'reset' when adding counters by programming the
2262 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
2263 * in sequence on the same PMC or on different PMCs.
2265 * In practice it appears some of these events do in fact count, and
2266 * we need to program all 4 events.
2268 static void intel_pmu_nhm_workaround(void)
2270 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2271 static const unsigned long nhm_magic[4] = {
2277 struct perf_event *event;
2281 * The Errata requires below steps:
2282 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
2283 * 2) Configure 4 PERFEVTSELx with the magic events and clear
2284 * the corresponding PMCx;
2285 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
2286 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
2287 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
2291 * The real steps we choose are a little different from above.
2292 * A) To reduce MSR operations, we don't run step 1) as they
2293 * are already cleared before this function is called;
2294 * B) Call x86_perf_event_update to save PMCx before configuring
2295 * PERFEVTSELx with magic number;
2296 * C) With step 5), we do clear only when the PERFEVTSELx is
2297 * not used currently.
2298 * D) Call x86_perf_event_set_period to restore PMCx;
2301 /* We always operate 4 pairs of PERF Counters */
2302 for (i = 0; i < 4; i++) {
2303 event = cpuc->events[i];
2305 x86_perf_event_update(event);
2308 for (i = 0; i < 4; i++) {
2309 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
2310 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
2313 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
2314 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
2316 for (i = 0; i < 4; i++) {
2317 event = cpuc->events[i];
2320 x86_perf_event_set_period(event);
2321 __x86_pmu_enable_event(&event->hw,
2322 ARCH_PERFMON_EVENTSEL_ENABLE);
2324 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
2328 static void intel_pmu_nhm_enable_all(int added)
2331 intel_pmu_nhm_workaround();
2332 intel_pmu_enable_all(added);
2335 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2337 u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2339 if (cpuc->tfa_shadow != val) {
2340 cpuc->tfa_shadow = val;
2341 wrmsrl(MSR_TSX_FORCE_ABORT, val);
2345 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2348 * We're going to use PMC3, make sure TFA is set before we touch it.
2351 intel_set_tfa(cpuc, true);
2354 static void intel_tfa_pmu_enable_all(int added)
2356 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2359 * If we find PMC3 is no longer used when we enable the PMU, we can
2362 if (!test_bit(3, cpuc->active_mask))
2363 intel_set_tfa(cpuc, false);
2365 intel_pmu_enable_all(added);
2368 static inline u64 intel_pmu_get_status(void)
2372 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
2377 static inline void intel_pmu_ack_status(u64 ack)
2379 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2382 static inline bool event_is_checkpointed(struct perf_event *event)
2384 return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2387 static inline void intel_set_masks(struct perf_event *event, int idx)
2389 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2391 if (event->attr.exclude_host)
2392 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2393 if (event->attr.exclude_guest)
2394 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2395 if (event_is_checkpointed(event))
2396 __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2399 static inline void intel_clear_masks(struct perf_event *event, int idx)
2401 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2403 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2404 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2405 __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2408 static void intel_pmu_disable_fixed(struct perf_event *event)
2410 struct hw_perf_event *hwc = &event->hw;
2414 if (is_topdown_idx(idx)) {
2415 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2418 * When there are other active TopDown events,
2419 * don't disable the fixed counter 3.
2421 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2423 idx = INTEL_PMC_IDX_FIXED_SLOTS;
2426 intel_clear_masks(event, idx);
2428 mask = 0xfULL << ((idx - INTEL_PMC_IDX_FIXED) * 4);
2429 rdmsrl(hwc->config_base, ctrl_val);
2431 wrmsrl(hwc->config_base, ctrl_val);
2434 static void intel_pmu_disable_event(struct perf_event *event)
2436 struct hw_perf_event *hwc = &event->hw;
2440 case 0 ... INTEL_PMC_IDX_FIXED - 1:
2441 intel_clear_masks(event, idx);
2442 x86_pmu_disable_event(event);
2444 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2445 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2446 intel_pmu_disable_fixed(event);
2448 case INTEL_PMC_IDX_FIXED_BTS:
2449 intel_pmu_disable_bts();
2450 intel_pmu_drain_bts_buffer();
2452 case INTEL_PMC_IDX_FIXED_VLBR:
2453 intel_clear_masks(event, idx);
2456 intel_clear_masks(event, idx);
2457 pr_warn("Failed to disable the event with invalid index %d\n",
2463 * Needs to be called after x86_pmu_disable_event,
2464 * so we don't trigger the event without PEBS bit set.
2466 if (unlikely(event->attr.precise_ip))
2467 intel_pmu_pebs_disable(event);
2470 static void intel_pmu_assign_event(struct perf_event *event, int idx)
2472 if (is_pebs_pt(event))
2473 perf_report_aux_output_id(event, idx);
2476 static void intel_pmu_del_event(struct perf_event *event)
2478 if (needs_branch_stack(event))
2479 intel_pmu_lbr_del(event);
2480 if (event->attr.precise_ip)
2481 intel_pmu_pebs_del(event);
2484 static int icl_set_topdown_event_period(struct perf_event *event)
2486 struct hw_perf_event *hwc = &event->hw;
2487 s64 left = local64_read(&hwc->period_left);
2490 * The values in PERF_METRICS MSR are derived from fixed counter 3.
2491 * Software should start both registers, PERF_METRICS and fixed
2492 * counter 3, from zero.
2493 * Clear PERF_METRICS and Fixed counter 3 in initialization.
2494 * After that, both MSRs will be cleared for each read.
2495 * Don't need to clear them again.
2497 if (left == x86_pmu.max_period) {
2498 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2499 wrmsrl(MSR_PERF_METRICS, 0);
2500 hwc->saved_slots = 0;
2501 hwc->saved_metric = 0;
2504 if ((hwc->saved_slots) && is_slots_event(event)) {
2505 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots);
2506 wrmsrl(MSR_PERF_METRICS, hwc->saved_metric);
2509 perf_event_update_userpage(event);
2514 static int adl_set_topdown_event_period(struct perf_event *event)
2516 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
2518 if (pmu->cpu_type != hybrid_big)
2521 return icl_set_topdown_event_period(event);
2524 static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx)
2529 * The metric is reported as an 8bit integer fraction
2530 * summing up to 0xff.
2531 * slots-in-metric = (Metric / 0xff) * slots
2533 val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff;
2534 return mul_u64_u32_div(slots, val, 0xff);
2537 static u64 icl_get_topdown_value(struct perf_event *event,
2538 u64 slots, u64 metrics)
2540 int idx = event->hw.idx;
2543 if (is_metric_idx(idx))
2544 delta = icl_get_metrics_event_value(metrics, slots, idx);
2551 static void __icl_update_topdown_event(struct perf_event *event,
2552 u64 slots, u64 metrics,
2553 u64 last_slots, u64 last_metrics)
2555 u64 delta, last = 0;
2557 delta = icl_get_topdown_value(event, slots, metrics);
2559 last = icl_get_topdown_value(event, last_slots, last_metrics);
2562 * The 8bit integer fraction of metric may be not accurate,
2563 * especially when the changes is very small.
2564 * For example, if only a few bad_spec happens, the fraction
2565 * may be reduced from 1 to 0. If so, the bad_spec event value
2566 * will be 0 which is definitely less than the last value.
2567 * Avoid update event->count for this case.
2571 local64_add(delta, &event->count);
2575 static void update_saved_topdown_regs(struct perf_event *event, u64 slots,
2576 u64 metrics, int metric_end)
2578 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2579 struct perf_event *other;
2582 event->hw.saved_slots = slots;
2583 event->hw.saved_metric = metrics;
2585 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2586 if (!is_topdown_idx(idx))
2588 other = cpuc->events[idx];
2589 other->hw.saved_slots = slots;
2590 other->hw.saved_metric = metrics;
2595 * Update all active Topdown events.
2597 * The PERF_METRICS and Fixed counter 3 are read separately. The values may be
2598 * modify by a NMI. PMU has to be disabled before calling this function.
2601 static u64 intel_update_topdown_event(struct perf_event *event, int metric_end)
2603 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2604 struct perf_event *other;
2609 /* read Fixed counter 3 */
2610 rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots);
2614 /* read PERF_METRICS */
2615 rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
2617 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2618 if (!is_topdown_idx(idx))
2620 other = cpuc->events[idx];
2621 __icl_update_topdown_event(other, slots, metrics,
2622 event ? event->hw.saved_slots : 0,
2623 event ? event->hw.saved_metric : 0);
2627 * Check and update this event, which may have been cleared
2628 * in active_mask e.g. x86_pmu_stop()
2630 if (event && !test_bit(event->hw.idx, cpuc->active_mask)) {
2631 __icl_update_topdown_event(event, slots, metrics,
2632 event->hw.saved_slots,
2633 event->hw.saved_metric);
2636 * In x86_pmu_stop(), the event is cleared in active_mask first,
2637 * then drain the delta, which indicates context switch for
2639 * Save metric and slots for context switch.
2640 * Don't need to reset the PERF_METRICS and Fixed counter 3.
2641 * Because the values will be restored in next schedule in.
2643 update_saved_topdown_regs(event, slots, metrics, metric_end);
2648 /* The fixed counter 3 has to be written before the PERF_METRICS. */
2649 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2650 wrmsrl(MSR_PERF_METRICS, 0);
2652 update_saved_topdown_regs(event, 0, 0, metric_end);
2658 static u64 icl_update_topdown_event(struct perf_event *event)
2660 return intel_update_topdown_event(event, INTEL_PMC_IDX_METRIC_BASE +
2661 x86_pmu.num_topdown_events - 1);
2664 static u64 adl_update_topdown_event(struct perf_event *event)
2666 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
2668 if (pmu->cpu_type != hybrid_big)
2671 return icl_update_topdown_event(event);
2675 static void intel_pmu_read_topdown_event(struct perf_event *event)
2677 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2679 /* Only need to call update_topdown_event() once for group read. */
2680 if ((cpuc->txn_flags & PERF_PMU_TXN_READ) &&
2681 !is_slots_event(event))
2684 perf_pmu_disable(event->pmu);
2685 x86_pmu.update_topdown_event(event);
2686 perf_pmu_enable(event->pmu);
2689 static void intel_pmu_read_event(struct perf_event *event)
2691 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2692 intel_pmu_auto_reload_read(event);
2693 else if (is_topdown_count(event) && x86_pmu.update_topdown_event)
2694 intel_pmu_read_topdown_event(event);
2696 x86_perf_event_update(event);
2699 static void intel_pmu_enable_fixed(struct perf_event *event)
2701 struct hw_perf_event *hwc = &event->hw;
2702 u64 ctrl_val, mask, bits = 0;
2705 if (is_topdown_idx(idx)) {
2706 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2708 * When there are other active TopDown events,
2709 * don't enable the fixed counter 3 again.
2711 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2714 idx = INTEL_PMC_IDX_FIXED_SLOTS;
2717 intel_set_masks(event, idx);
2720 * Enable IRQ generation (0x8), if not PEBS,
2721 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2724 if (!event->attr.precise_ip)
2726 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2728 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2732 * ANY bit is supported in v3 and up
2734 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2737 idx -= INTEL_PMC_IDX_FIXED;
2739 mask = 0xfULL << (idx * 4);
2741 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
2742 bits |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
2743 mask |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
2746 rdmsrl(hwc->config_base, ctrl_val);
2749 wrmsrl(hwc->config_base, ctrl_val);
2752 static void intel_pmu_enable_event(struct perf_event *event)
2754 struct hw_perf_event *hwc = &event->hw;
2757 if (unlikely(event->attr.precise_ip))
2758 intel_pmu_pebs_enable(event);
2761 case 0 ... INTEL_PMC_IDX_FIXED - 1:
2762 intel_set_masks(event, idx);
2763 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2765 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2766 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2767 intel_pmu_enable_fixed(event);
2769 case INTEL_PMC_IDX_FIXED_BTS:
2770 if (!__this_cpu_read(cpu_hw_events.enabled))
2772 intel_pmu_enable_bts(hwc->config);
2774 case INTEL_PMC_IDX_FIXED_VLBR:
2775 intel_set_masks(event, idx);
2778 pr_warn("Failed to enable the event with invalid index %d\n",
2783 static void intel_pmu_add_event(struct perf_event *event)
2785 if (event->attr.precise_ip)
2786 intel_pmu_pebs_add(event);
2787 if (needs_branch_stack(event))
2788 intel_pmu_lbr_add(event);
2792 * Save and restart an expired event. Called by NMI contexts,
2793 * so it has to be careful about preempting normal event ops:
2795 int intel_pmu_save_and_restart(struct perf_event *event)
2797 x86_perf_event_update(event);
2799 * For a checkpointed counter always reset back to 0. This
2800 * avoids a situation where the counter overflows, aborts the
2801 * transaction and is then set back to shortly before the
2802 * overflow, and overflows and aborts again.
2804 if (unlikely(event_is_checkpointed(event))) {
2805 /* No race with NMIs because the counter should not be armed */
2806 wrmsrl(event->hw.event_base, 0);
2807 local64_set(&event->hw.prev_count, 0);
2809 return x86_perf_event_set_period(event);
2812 static void intel_pmu_reset(void)
2814 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2815 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2816 int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
2817 int num_counters = hybrid(cpuc->pmu, num_counters);
2818 unsigned long flags;
2824 local_irq_save(flags);
2826 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2828 for (idx = 0; idx < num_counters; idx++) {
2829 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
2830 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
2832 for (idx = 0; idx < num_counters_fixed; idx++) {
2833 if (fixed_counter_disabled(idx, cpuc->pmu))
2835 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2839 ds->bts_index = ds->bts_buffer_base;
2841 /* Ack all overflows and disable fixed counters */
2842 if (x86_pmu.version >= 2) {
2843 intel_pmu_ack_status(intel_pmu_get_status());
2844 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2847 /* Reset LBRs and LBR freezing */
2848 if (x86_pmu.lbr_nr) {
2849 update_debugctlmsr(get_debugctlmsr() &
2850 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
2853 local_irq_restore(flags);
2857 * We may be running with guest PEBS events created by KVM, and the
2858 * PEBS records are logged into the guest's DS and invisible to host.
2860 * In the case of guest PEBS overflow, we only trigger a fake event
2861 * to emulate the PEBS overflow PMI for guest PEBS counters in KVM.
2862 * The guest will then vm-entry and check the guest DS area to read
2863 * the guest PEBS records.
2865 * The contents and other behavior of the guest event do not matter.
2867 static void x86_pmu_handle_guest_pebs(struct pt_regs *regs,
2868 struct perf_sample_data *data)
2870 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2871 u64 guest_pebs_idxs = cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask;
2872 struct perf_event *event = NULL;
2875 if (!unlikely(perf_guest_state()))
2878 if (!x86_pmu.pebs_ept || !x86_pmu.pebs_active ||
2882 for_each_set_bit(bit, (unsigned long *)&guest_pebs_idxs,
2883 INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed) {
2884 event = cpuc->events[bit];
2885 if (!event->attr.precise_ip)
2888 perf_sample_data_init(data, 0, event->hw.last_period);
2889 if (perf_event_overflow(event, data, regs))
2890 x86_pmu_stop(event, 0);
2892 /* Inject one fake event is enough. */
2897 static int handle_pmi_common(struct pt_regs *regs, u64 status)
2899 struct perf_sample_data data;
2900 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2903 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2905 inc_irq_stat(apic_perf_irqs);
2908 * Ignore a range of extra bits in status that do not indicate
2909 * overflow by themselves.
2911 status &= ~(GLOBAL_STATUS_COND_CHG |
2912 GLOBAL_STATUS_ASIF |
2913 GLOBAL_STATUS_LBRS_FROZEN);
2917 * In case multiple PEBS events are sampled at the same time,
2918 * it is possible to have GLOBAL_STATUS bit 62 set indicating
2919 * PEBS buffer overflow and also seeing at most 3 PEBS counters
2920 * having their bits set in the status register. This is a sign
2921 * that there was at least one PEBS record pending at the time
2922 * of the PMU interrupt. PEBS counters must only be processed
2923 * via the drain_pebs() calls and not via the regular sample
2924 * processing loop coming after that the function, otherwise
2925 * phony regular samples may be generated in the sampling buffer
2926 * not marked with the EXACT tag. Another possibility is to have
2927 * one PEBS event and at least one non-PEBS event which overflows
2928 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
2929 * not be set, yet the overflow status bit for the PEBS counter will
2932 * To avoid this problem, we systematically ignore the PEBS-enabled
2933 * counters from the GLOBAL_STATUS mask and we always process PEBS
2934 * events via drain_pebs().
2936 status &= ~(cpuc->pebs_enabled & x86_pmu.pebs_capable);
2939 * PEBS overflow sets bit 62 in the global status register
2941 if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) {
2942 u64 pebs_enabled = cpuc->pebs_enabled;
2945 x86_pmu_handle_guest_pebs(regs, &data);
2946 x86_pmu.drain_pebs(regs, &data);
2947 status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
2950 * PMI throttle may be triggered, which stops the PEBS event.
2951 * Although cpuc->pebs_enabled is updated accordingly, the
2952 * MSR_IA32_PEBS_ENABLE is not updated. Because the
2953 * cpuc->enabled has been forced to 0 in PMI.
2954 * Update the MSR if pebs_enabled is changed.
2956 if (pebs_enabled != cpuc->pebs_enabled)
2957 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
2963 if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
2965 if (!perf_guest_handle_intel_pt_intr())
2966 intel_pt_interrupt();
2970 * Intel Perf metrics
2972 if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) {
2974 if (x86_pmu.update_topdown_event)
2975 x86_pmu.update_topdown_event(NULL);
2979 * Checkpointed counters can lead to 'spurious' PMIs because the
2980 * rollback caused by the PMI will have cleared the overflow status
2981 * bit. Therefore always force probe these counters.
2983 status |= cpuc->intel_cp_status;
2985 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2986 struct perf_event *event = cpuc->events[bit];
2990 if (!test_bit(bit, cpuc->active_mask))
2993 if (!intel_pmu_save_and_restart(event))
2996 perf_sample_data_init(&data, 0, event->hw.last_period);
2998 if (has_branch_stack(event))
2999 data.br_stack = &cpuc->lbr_stack;
3001 if (perf_event_overflow(event, &data, regs))
3002 x86_pmu_stop(event, 0);
3009 * This handler is triggered by the local APIC, so the APIC IRQ handling
3012 static int intel_pmu_handle_irq(struct pt_regs *regs)
3014 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3015 bool late_ack = hybrid_bit(cpuc->pmu, late_ack);
3016 bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack);
3023 * Save the PMU state.
3024 * It needs to be restored when leaving the handler.
3026 pmu_enabled = cpuc->enabled;
3028 * In general, the early ACK is only applied for old platforms.
3029 * For the big core starts from Haswell, the late ACK should be
3031 * For the small core after Tremont, we have to do the ACK right
3032 * before re-enabling counters, which is in the middle of the
3035 if (!late_ack && !mid_ack)
3036 apic_write(APIC_LVTPC, APIC_DM_NMI);
3037 intel_bts_disable_local();
3039 __intel_pmu_disable_all(true);
3040 handled = intel_pmu_drain_bts_buffer();
3041 handled += intel_bts_interrupt();
3042 status = intel_pmu_get_status();
3048 intel_pmu_lbr_read();
3049 intel_pmu_ack_status(status);
3050 if (++loops > 100) {
3054 WARN(1, "perfevents: irq loop stuck!\n");
3055 perf_event_print_debug();
3062 handled += handle_pmi_common(regs, status);
3065 * Repeat if there is more work to be done:
3067 status = intel_pmu_get_status();
3073 apic_write(APIC_LVTPC, APIC_DM_NMI);
3074 /* Only restore PMU state when it's active. See x86_pmu_disable(). */
3075 cpuc->enabled = pmu_enabled;
3077 __intel_pmu_enable_all(0, true);
3078 intel_bts_enable_local();
3081 * Only unmask the NMI after the overflow counters
3082 * have been reset. This avoids spurious NMIs on
3086 apic_write(APIC_LVTPC, APIC_DM_NMI);
3090 static struct event_constraint *
3091 intel_bts_constraints(struct perf_event *event)
3093 if (unlikely(intel_pmu_has_bts(event)))
3094 return &bts_constraint;
3100 * Note: matches a fake event, like Fixed2.
3102 static struct event_constraint *
3103 intel_vlbr_constraints(struct perf_event *event)
3105 struct event_constraint *c = &vlbr_constraint;
3107 if (unlikely(constraint_match(c, event->hw.config))) {
3108 event->hw.flags |= c->flags;
3115 static int intel_alt_er(struct cpu_hw_events *cpuc,
3116 int idx, u64 config)
3118 struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs);
3121 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
3124 if (idx == EXTRA_REG_RSP_0)
3125 alt_idx = EXTRA_REG_RSP_1;
3127 if (idx == EXTRA_REG_RSP_1)
3128 alt_idx = EXTRA_REG_RSP_0;
3130 if (config & ~extra_regs[alt_idx].valid_mask)
3136 static void intel_fixup_er(struct perf_event *event, int idx)
3138 struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
3139 event->hw.extra_reg.idx = idx;
3141 if (idx == EXTRA_REG_RSP_0) {
3142 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3143 event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event;
3144 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
3145 } else if (idx == EXTRA_REG_RSP_1) {
3146 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3147 event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event;
3148 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
3153 * manage allocation of shared extra msr for certain events
3156 * per-cpu: to be shared between the various events on a single PMU
3157 * per-core: per-cpu + shared by HT threads
3159 static struct event_constraint *
3160 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
3161 struct perf_event *event,
3162 struct hw_perf_event_extra *reg)
3164 struct event_constraint *c = &emptyconstraint;
3165 struct er_account *era;
3166 unsigned long flags;
3170 * reg->alloc can be set due to existing state, so for fake cpuc we
3171 * need to ignore this, otherwise we might fail to allocate proper fake
3172 * state for this extra reg constraint. Also see the comment below.
3174 if (reg->alloc && !cpuc->is_fake)
3175 return NULL; /* call x86_get_event_constraint() */
3178 era = &cpuc->shared_regs->regs[idx];
3180 * we use spin_lock_irqsave() to avoid lockdep issues when
3181 * passing a fake cpuc
3183 raw_spin_lock_irqsave(&era->lock, flags);
3185 if (!atomic_read(&era->ref) || era->config == reg->config) {
3188 * If its a fake cpuc -- as per validate_{group,event}() we
3189 * shouldn't touch event state and we can avoid doing so
3190 * since both will only call get_event_constraints() once
3191 * on each event, this avoids the need for reg->alloc.
3193 * Not doing the ER fixup will only result in era->reg being
3194 * wrong, but since we won't actually try and program hardware
3195 * this isn't a problem either.
3197 if (!cpuc->is_fake) {
3198 if (idx != reg->idx)
3199 intel_fixup_er(event, idx);
3202 * x86_schedule_events() can call get_event_constraints()
3203 * multiple times on events in the case of incremental
3204 * scheduling(). reg->alloc ensures we only do the ER
3210 /* lock in msr value */
3211 era->config = reg->config;
3212 era->reg = reg->reg;
3215 atomic_inc(&era->ref);
3218 * need to call x86_get_event_constraint()
3219 * to check if associated event has constraints
3223 idx = intel_alt_er(cpuc, idx, reg->config);
3224 if (idx != reg->idx) {
3225 raw_spin_unlock_irqrestore(&era->lock, flags);
3229 raw_spin_unlock_irqrestore(&era->lock, flags);
3235 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
3236 struct hw_perf_event_extra *reg)
3238 struct er_account *era;
3241 * Only put constraint if extra reg was actually allocated. Also takes
3242 * care of event which do not use an extra shared reg.
3244 * Also, if this is a fake cpuc we shouldn't touch any event state
3245 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
3246 * either since it'll be thrown out.
3248 if (!reg->alloc || cpuc->is_fake)
3251 era = &cpuc->shared_regs->regs[reg->idx];
3253 /* one fewer user */
3254 atomic_dec(&era->ref);
3256 /* allocate again next time */
3260 static struct event_constraint *
3261 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
3262 struct perf_event *event)
3264 struct event_constraint *c = NULL, *d;
3265 struct hw_perf_event_extra *xreg, *breg;
3267 xreg = &event->hw.extra_reg;
3268 if (xreg->idx != EXTRA_REG_NONE) {
3269 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
3270 if (c == &emptyconstraint)
3273 breg = &event->hw.branch_reg;
3274 if (breg->idx != EXTRA_REG_NONE) {
3275 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
3276 if (d == &emptyconstraint) {
3277 __intel_shared_reg_put_constraints(cpuc, xreg);
3284 struct event_constraint *
3285 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3286 struct perf_event *event)
3288 struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints);
3289 struct event_constraint *c;
3291 if (event_constraints) {
3292 for_each_event_constraint(c, event_constraints) {
3293 if (constraint_match(c, event->hw.config)) {
3294 event->hw.flags |= c->flags;
3300 return &hybrid_var(cpuc->pmu, unconstrained);
3303 static struct event_constraint *
3304 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3305 struct perf_event *event)
3307 struct event_constraint *c;
3309 c = intel_vlbr_constraints(event);
3313 c = intel_bts_constraints(event);
3317 c = intel_shared_regs_constraints(cpuc, event);
3321 c = intel_pebs_constraints(event);
3325 return x86_get_event_constraints(cpuc, idx, event);
3329 intel_start_scheduling(struct cpu_hw_events *cpuc)
3331 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3332 struct intel_excl_states *xl;
3333 int tid = cpuc->excl_thread_id;
3336 * nothing needed if in group validation mode
3338 if (cpuc->is_fake || !is_ht_workaround_enabled())
3342 * no exclusion needed
3344 if (WARN_ON_ONCE(!excl_cntrs))
3347 xl = &excl_cntrs->states[tid];
3349 xl->sched_started = true;
3351 * lock shared state until we are done scheduling
3352 * in stop_event_scheduling()
3353 * makes scheduling appear as a transaction
3355 raw_spin_lock(&excl_cntrs->lock);
3358 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
3360 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3361 struct event_constraint *c = cpuc->event_constraint[idx];
3362 struct intel_excl_states *xl;
3363 int tid = cpuc->excl_thread_id;
3365 if (cpuc->is_fake || !is_ht_workaround_enabled())
3368 if (WARN_ON_ONCE(!excl_cntrs))
3371 if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
3374 xl = &excl_cntrs->states[tid];
3376 lockdep_assert_held(&excl_cntrs->lock);
3378 if (c->flags & PERF_X86_EVENT_EXCL)
3379 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
3381 xl->state[cntr] = INTEL_EXCL_SHARED;
3385 intel_stop_scheduling(struct cpu_hw_events *cpuc)
3387 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3388 struct intel_excl_states *xl;
3389 int tid = cpuc->excl_thread_id;
3392 * nothing needed if in group validation mode
3394 if (cpuc->is_fake || !is_ht_workaround_enabled())
3397 * no exclusion needed
3399 if (WARN_ON_ONCE(!excl_cntrs))
3402 xl = &excl_cntrs->states[tid];
3404 xl->sched_started = false;
3406 * release shared state lock (acquired in intel_start_scheduling())
3408 raw_spin_unlock(&excl_cntrs->lock);
3411 static struct event_constraint *
3412 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
3414 WARN_ON_ONCE(!cpuc->constraint_list);
3416 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
3417 struct event_constraint *cx;
3420 * grab pre-allocated constraint entry
3422 cx = &cpuc->constraint_list[idx];
3425 * initialize dynamic constraint
3426 * with static constraint
3431 * mark constraint as dynamic
3433 cx->flags |= PERF_X86_EVENT_DYNAMIC;
3440 static struct event_constraint *
3441 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
3442 int idx, struct event_constraint *c)
3444 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3445 struct intel_excl_states *xlo;
3446 int tid = cpuc->excl_thread_id;
3450 * validating a group does not require
3451 * enforcing cross-thread exclusion
3453 if (cpuc->is_fake || !is_ht_workaround_enabled())
3457 * no exclusion needed
3459 if (WARN_ON_ONCE(!excl_cntrs))
3463 * because we modify the constraint, we need
3464 * to make a copy. Static constraints come
3465 * from static const tables.
3467 * only needed when constraint has not yet
3468 * been cloned (marked dynamic)
3470 c = dyn_constraint(cpuc, c, idx);
3473 * From here on, the constraint is dynamic.
3474 * Either it was just allocated above, or it
3475 * was allocated during a earlier invocation
3480 * state of sibling HT
3482 xlo = &excl_cntrs->states[tid ^ 1];
3485 * event requires exclusive counter access
3488 is_excl = c->flags & PERF_X86_EVENT_EXCL;
3489 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
3490 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
3491 if (!cpuc->n_excl++)
3492 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
3496 * Modify static constraint with current dynamic
3499 * EXCLUSIVE: sibling counter measuring exclusive event
3500 * SHARED : sibling counter measuring non-exclusive event
3501 * UNUSED : sibling counter unused
3504 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
3506 * exclusive event in sibling counter
3507 * our corresponding counter cannot be used
3508 * regardless of our event
3510 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
3511 __clear_bit(i, c->idxmsk);
3516 * if measuring an exclusive event, sibling
3517 * measuring non-exclusive, then counter cannot
3520 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
3521 __clear_bit(i, c->idxmsk);
3528 * if we return an empty mask, then switch
3529 * back to static empty constraint to avoid
3530 * the cost of freeing later on
3533 c = &emptyconstraint;
3540 static struct event_constraint *
3541 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3542 struct perf_event *event)
3544 struct event_constraint *c1, *c2;
3546 c1 = cpuc->event_constraint[idx];
3550 * - static constraint: no change across incremental scheduling calls
3551 * - dynamic constraint: handled by intel_get_excl_constraints()
3553 c2 = __intel_get_event_constraints(cpuc, idx, event);
3555 WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
3556 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
3557 c1->weight = c2->weight;
3561 if (cpuc->excl_cntrs)
3562 return intel_get_excl_constraints(cpuc, event, idx, c2);
3567 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
3568 struct perf_event *event)
3570 struct hw_perf_event *hwc = &event->hw;
3571 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3572 int tid = cpuc->excl_thread_id;
3573 struct intel_excl_states *xl;
3576 * nothing needed if in group validation mode
3581 if (WARN_ON_ONCE(!excl_cntrs))
3584 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
3585 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
3586 if (!--cpuc->n_excl)
3587 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
3591 * If event was actually assigned, then mark the counter state as
3594 if (hwc->idx >= 0) {
3595 xl = &excl_cntrs->states[tid];
3598 * put_constraint may be called from x86_schedule_events()
3599 * which already has the lock held so here make locking
3602 if (!xl->sched_started)
3603 raw_spin_lock(&excl_cntrs->lock);
3605 xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
3607 if (!xl->sched_started)
3608 raw_spin_unlock(&excl_cntrs->lock);
3613 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
3614 struct perf_event *event)
3616 struct hw_perf_event_extra *reg;
3618 reg = &event->hw.extra_reg;
3619 if (reg->idx != EXTRA_REG_NONE)
3620 __intel_shared_reg_put_constraints(cpuc, reg);
3622 reg = &event->hw.branch_reg;
3623 if (reg->idx != EXTRA_REG_NONE)
3624 __intel_shared_reg_put_constraints(cpuc, reg);
3627 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
3628 struct perf_event *event)
3630 intel_put_shared_regs_event_constraints(cpuc, event);
3633 * is PMU has exclusive counter restrictions, then
3634 * all events are subject to and must call the
3635 * put_excl_constraints() routine
3637 if (cpuc->excl_cntrs)
3638 intel_put_excl_constraints(cpuc, event);
3641 static void intel_pebs_aliases_core2(struct perf_event *event)
3643 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3645 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3646 * (0x003c) so that we can use it with PEBS.
3648 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3649 * PEBS capable. However we can use INST_RETIRED.ANY_P
3650 * (0x00c0), which is a PEBS capable event, to get the same
3653 * INST_RETIRED.ANY_P counts the number of cycles that retires
3654 * CNTMASK instructions. By setting CNTMASK to a value (16)
3655 * larger than the maximum number of instructions that can be
3656 * retired per cycle (4) and then inverting the condition, we
3657 * count all cycles that retire 16 or less instructions, which
3660 * Thereby we gain a PEBS capable cycle counter.
3662 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
3664 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3665 event->hw.config = alt_config;
3669 static void intel_pebs_aliases_snb(struct perf_event *event)
3671 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3673 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3674 * (0x003c) so that we can use it with PEBS.
3676 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3677 * PEBS capable. However we can use UOPS_RETIRED.ALL
3678 * (0x01c2), which is a PEBS capable event, to get the same
3681 * UOPS_RETIRED.ALL counts the number of cycles that retires
3682 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
3683 * larger than the maximum number of micro-ops that can be
3684 * retired per cycle (4) and then inverting the condition, we
3685 * count all cycles that retire 16 or less micro-ops, which
3688 * Thereby we gain a PEBS capable cycle counter.
3690 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
3692 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3693 event->hw.config = alt_config;
3697 static void intel_pebs_aliases_precdist(struct perf_event *event)
3699 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3701 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3702 * (0x003c) so that we can use it with PEBS.
3704 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3705 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
3706 * (0x01c0), which is a PEBS capable event, to get the same
3709 * The PREC_DIST event has special support to minimize sample
3710 * shadowing effects. One drawback is that it can be
3711 * only programmed on counter 1, but that seems like an
3712 * acceptable trade off.
3714 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
3716 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3717 event->hw.config = alt_config;
3721 static void intel_pebs_aliases_ivb(struct perf_event *event)
3723 if (event->attr.precise_ip < 3)
3724 return intel_pebs_aliases_snb(event);
3725 return intel_pebs_aliases_precdist(event);
3728 static void intel_pebs_aliases_skl(struct perf_event *event)
3730 if (event->attr.precise_ip < 3)
3731 return intel_pebs_aliases_core2(event);
3732 return intel_pebs_aliases_precdist(event);
3735 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3737 unsigned long flags = x86_pmu.large_pebs_flags;
3739 if (event->attr.use_clockid)
3740 flags &= ~PERF_SAMPLE_TIME;
3741 if (!event->attr.exclude_kernel)
3742 flags &= ~PERF_SAMPLE_REGS_USER;
3743 if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
3744 flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
3748 static int intel_pmu_bts_config(struct perf_event *event)
3750 struct perf_event_attr *attr = &event->attr;
3752 if (unlikely(intel_pmu_has_bts(event))) {
3753 /* BTS is not supported by this architecture. */
3754 if (!x86_pmu.bts_active)
3757 /* BTS is currently only allowed for user-mode. */
3758 if (!attr->exclude_kernel)
3761 /* BTS is not allowed for precise events. */
3762 if (attr->precise_ip)
3765 /* disallow bts if conflicting events are present */
3766 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3769 event->destroy = hw_perf_lbr_event_destroy;
3775 static int core_pmu_hw_config(struct perf_event *event)
3777 int ret = x86_pmu_hw_config(event);
3782 return intel_pmu_bts_config(event);
3785 #define INTEL_TD_METRIC_AVAILABLE_MAX (INTEL_TD_METRIC_RETIRING + \
3786 ((x86_pmu.num_topdown_events - 1) << 8))
3788 static bool is_available_metric_event(struct perf_event *event)
3790 return is_metric_event(event) &&
3791 event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX;
3794 static inline bool is_mem_loads_event(struct perf_event *event)
3796 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xcd, .umask=0x01);
3799 static inline bool is_mem_loads_aux_event(struct perf_event *event)
3801 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82);
3804 static inline bool require_mem_loads_aux_event(struct perf_event *event)
3806 if (!(x86_pmu.flags & PMU_FL_MEM_LOADS_AUX))
3810 return hybrid_pmu(event->pmu)->cpu_type == hybrid_big;
3815 static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
3817 union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap);
3819 return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
3822 static int intel_pmu_hw_config(struct perf_event *event)
3824 int ret = x86_pmu_hw_config(event);
3829 ret = intel_pmu_bts_config(event);
3833 if (event->attr.precise_ip) {
3834 if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
3837 if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
3838 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
3839 if (!(event->attr.sample_type &
3840 ~intel_pmu_large_pebs_flags(event))) {
3841 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
3842 event->attach_state |= PERF_ATTACH_SCHED_CB;
3845 if (x86_pmu.pebs_aliases)
3846 x86_pmu.pebs_aliases(event);
3848 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3849 event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
3852 if (needs_branch_stack(event)) {
3853 ret = intel_pmu_setup_lbr_filter(event);
3856 event->attach_state |= PERF_ATTACH_SCHED_CB;
3859 * BTS is set up earlier in this path, so don't account twice
3861 if (!unlikely(intel_pmu_has_bts(event))) {
3862 /* disallow lbr if conflicting events are present */
3863 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3866 event->destroy = hw_perf_lbr_event_destroy;
3870 if (event->attr.aux_output) {
3871 if (!event->attr.precise_ip)
3874 event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT;
3877 if ((event->attr.type == PERF_TYPE_HARDWARE) ||
3878 (event->attr.type == PERF_TYPE_HW_CACHE))
3882 * Config Topdown slots and metric events
3884 * The slots event on Fixed Counter 3 can support sampling,
3885 * which will be handled normally in x86_perf_event_update().
3887 * Metric events don't support sampling and require being paired
3888 * with a slots event as group leader. When the slots event
3889 * is used in a metrics group, it too cannot support sampling.
3891 if (intel_pmu_has_cap(event, PERF_CAP_METRICS_IDX) && is_topdown_event(event)) {
3892 if (event->attr.config1 || event->attr.config2)
3896 * The TopDown metrics events and slots event don't
3897 * support any filters.
3899 if (event->attr.config & X86_ALL_EVENT_FLAGS)
3902 if (is_available_metric_event(event)) {
3903 struct perf_event *leader = event->group_leader;
3905 /* The metric events don't support sampling. */
3906 if (is_sampling_event(event))
3909 /* The metric events require a slots group leader. */
3910 if (!is_slots_event(leader))
3914 * The leader/SLOTS must not be a sampling event for
3915 * metric use; hardware requires it starts at 0 when used
3916 * in conjunction with MSR_PERF_METRICS.
3918 if (is_sampling_event(leader))
3921 event->event_caps |= PERF_EV_CAP_SIBLING;
3923 * Only once we have a METRICs sibling do we
3924 * need TopDown magic.
3926 leader->hw.flags |= PERF_X86_EVENT_TOPDOWN;
3927 event->hw.flags |= PERF_X86_EVENT_TOPDOWN;
3932 * The load latency event X86_CONFIG(.event=0xcd, .umask=0x01) on SPR
3933 * doesn't function quite right. As a work-around it needs to always be
3934 * co-scheduled with a auxiliary event X86_CONFIG(.event=0x03, .umask=0x82).
3935 * The actual count of this second event is irrelevant it just needs
3936 * to be active to make the first event function correctly.
3938 * In a group, the auxiliary event must be in front of the load latency
3939 * event. The rule is to simplify the implementation of the check.
3940 * That's because perf cannot have a complete group at the moment.
3942 if (require_mem_loads_aux_event(event) &&
3943 (event->attr.sample_type & PERF_SAMPLE_DATA_SRC) &&
3944 is_mem_loads_event(event)) {
3945 struct perf_event *leader = event->group_leader;
3946 struct perf_event *sibling = NULL;
3948 if (!is_mem_loads_aux_event(leader)) {
3949 for_each_sibling_event(sibling, leader) {
3950 if (is_mem_loads_aux_event(sibling))
3953 if (list_entry_is_head(sibling, &leader->sibling_list, sibling_list))
3958 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
3961 if (x86_pmu.version < 3)
3964 ret = perf_allow_cpu(&event->attr);
3968 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
3974 * Currently, the only caller of this function is the atomic_switch_perf_msrs().
3975 * The host perf conext helps to prepare the values of the real hardware for
3976 * a set of msrs that need to be switched atomically in a vmx transaction.
3978 * For example, the pseudocode needed to add a new msr should look like:
3980 * arr[(*nr)++] = (struct perf_guest_switch_msr){
3981 * .msr = the hardware msr address,
3982 * .host = the value the hardware has when it doesn't run a guest,
3983 * .guest = the value the hardware has when it runs a guest,
3986 * These values have nothing to do with the emulated values the guest sees
3987 * when it uses {RD,WR}MSR, which should be handled by the KVM context,
3988 * specifically in the intel_pmu_{get,set}_msr().
3990 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
3992 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3993 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3994 struct kvm_pmu *kvm_pmu = (struct kvm_pmu *)data;
3995 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
3996 u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable;
3997 int global_ctrl, pebs_enable;
4000 global_ctrl = (*nr)++;
4001 arr[global_ctrl] = (struct perf_guest_switch_msr){
4002 .msr = MSR_CORE_PERF_GLOBAL_CTRL,
4003 .host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask,
4004 .guest = intel_ctrl & (~cpuc->intel_ctrl_host_mask | ~pebs_mask),
4011 * If PMU counter has PEBS enabled it is not enough to
4012 * disable counter on a guest entry since PEBS memory
4013 * write can overshoot guest entry and corrupt guest
4014 * memory. Disabling PEBS solves the problem.
4016 * Don't do this if the CPU already enforces it.
4018 if (x86_pmu.pebs_no_isolation) {
4019 arr[(*nr)++] = (struct perf_guest_switch_msr){
4020 .msr = MSR_IA32_PEBS_ENABLE,
4021 .host = cpuc->pebs_enabled,
4027 if (!kvm_pmu || !x86_pmu.pebs_ept)
4030 arr[(*nr)++] = (struct perf_guest_switch_msr){
4031 .msr = MSR_IA32_DS_AREA,
4032 .host = (unsigned long)cpuc->ds,
4033 .guest = kvm_pmu->ds_area,
4036 if (x86_pmu.intel_cap.pebs_baseline) {
4037 arr[(*nr)++] = (struct perf_guest_switch_msr){
4038 .msr = MSR_PEBS_DATA_CFG,
4039 .host = cpuc->pebs_data_cfg,
4040 .guest = kvm_pmu->pebs_data_cfg,
4044 pebs_enable = (*nr)++;
4045 arr[pebs_enable] = (struct perf_guest_switch_msr){
4046 .msr = MSR_IA32_PEBS_ENABLE,
4047 .host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
4048 .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask,
4051 if (arr[pebs_enable].host) {
4052 /* Disable guest PEBS if host PEBS is enabled. */
4053 arr[pebs_enable].guest = 0;
4055 /* Disable guest PEBS for cross-mapped PEBS counters. */
4056 arr[pebs_enable].guest &= ~kvm_pmu->host_cross_mapped_mask;
4057 /* Set hw GLOBAL_CTRL bits for PEBS counter when it runs for guest */
4058 arr[global_ctrl].guest |= arr[pebs_enable].guest;
4064 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr, void *data)
4066 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4067 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
4070 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
4071 struct perf_event *event = cpuc->events[idx];
4073 arr[idx].msr = x86_pmu_config_addr(idx);
4074 arr[idx].host = arr[idx].guest = 0;
4076 if (!test_bit(idx, cpuc->active_mask))
4079 arr[idx].host = arr[idx].guest =
4080 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
4082 if (event->attr.exclude_host)
4083 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
4084 else if (event->attr.exclude_guest)
4085 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
4088 *nr = x86_pmu.num_counters;
4092 static void core_pmu_enable_event(struct perf_event *event)
4094 if (!event->attr.exclude_host)
4095 x86_pmu_enable_event(event);
4098 static void core_pmu_enable_all(int added)
4100 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4103 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
4104 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
4106 if (!test_bit(idx, cpuc->active_mask) ||
4107 cpuc->events[idx]->attr.exclude_host)
4110 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
4114 static int hsw_hw_config(struct perf_event *event)
4116 int ret = intel_pmu_hw_config(event);
4120 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
4122 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
4125 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
4126 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
4129 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
4130 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
4131 event->attr.precise_ip > 0))
4134 if (event_is_checkpointed(event)) {
4136 * Sampling of checkpointed events can cause situations where
4137 * the CPU constantly aborts because of a overflow, which is
4138 * then checkpointed back and ignored. Forbid checkpointing
4141 * But still allow a long sampling period, so that perf stat
4144 if (event->attr.sample_period > 0 &&
4145 event->attr.sample_period < 0x7fffffff)
4151 static struct event_constraint counter0_constraint =
4152 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
4154 static struct event_constraint counter2_constraint =
4155 EVENT_CONSTRAINT(0, 0x4, 0);
4157 static struct event_constraint fixed0_constraint =
4158 FIXED_EVENT_CONSTRAINT(0x00c0, 0);
4160 static struct event_constraint fixed0_counter0_constraint =
4161 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
4163 static struct event_constraint *
4164 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4165 struct perf_event *event)
4167 struct event_constraint *c;
4169 c = intel_get_event_constraints(cpuc, idx, event);
4171 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
4172 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
4173 if (c->idxmsk64 & (1U << 2))
4174 return &counter2_constraint;
4175 return &emptyconstraint;
4181 static struct event_constraint *
4182 icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4183 struct perf_event *event)
4186 * Fixed counter 0 has less skid.
4187 * Force instruction:ppp in Fixed counter 0
4189 if ((event->attr.precise_ip == 3) &&
4190 constraint_match(&fixed0_constraint, event->hw.config))
4191 return &fixed0_constraint;
4193 return hsw_get_event_constraints(cpuc, idx, event);
4196 static struct event_constraint *
4197 spr_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4198 struct perf_event *event)
4200 struct event_constraint *c;
4202 c = icl_get_event_constraints(cpuc, idx, event);
4205 * The :ppp indicates the Precise Distribution (PDist) facility, which
4206 * is only supported on the GP counter 0. If a :ppp event which is not
4207 * available on the GP counter 0, error out.
4208 * Exception: Instruction PDIR is only available on the fixed counter 0.
4210 if ((event->attr.precise_ip == 3) &&
4211 !constraint_match(&fixed0_constraint, event->hw.config)) {
4212 if (c->idxmsk64 & BIT_ULL(0))
4213 return &counter0_constraint;
4215 return &emptyconstraint;
4221 static struct event_constraint *
4222 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4223 struct perf_event *event)
4225 struct event_constraint *c;
4227 /* :ppp means to do reduced skid PEBS which is PMC0 only. */
4228 if (event->attr.precise_ip == 3)
4229 return &counter0_constraint;
4231 c = intel_get_event_constraints(cpuc, idx, event);
4236 static struct event_constraint *
4237 tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4238 struct perf_event *event)
4240 struct event_constraint *c;
4243 * :ppp means to do reduced skid PEBS,
4244 * which is available on PMC0 and fixed counter 0.
4246 if (event->attr.precise_ip == 3) {
4247 /* Force instruction:ppp on PMC0 and Fixed counter 0 */
4248 if (constraint_match(&fixed0_constraint, event->hw.config))
4249 return &fixed0_counter0_constraint;
4251 return &counter0_constraint;
4254 c = intel_get_event_constraints(cpuc, idx, event);
4259 static bool allow_tsx_force_abort = true;
4261 static struct event_constraint *
4262 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4263 struct perf_event *event)
4265 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
4268 * Without TFA we must not use PMC3.
4270 if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
4271 c = dyn_constraint(cpuc, c, idx);
4272 c->idxmsk64 &= ~(1ULL << 3);
4279 static struct event_constraint *
4280 adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4281 struct perf_event *event)
4283 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4285 if (pmu->cpu_type == hybrid_big)
4286 return spr_get_event_constraints(cpuc, idx, event);
4287 else if (pmu->cpu_type == hybrid_small)
4288 return tnt_get_event_constraints(cpuc, idx, event);
4291 return &emptyconstraint;
4294 static int adl_hw_config(struct perf_event *event)
4296 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4298 if (pmu->cpu_type == hybrid_big)
4299 return hsw_hw_config(event);
4300 else if (pmu->cpu_type == hybrid_small)
4301 return intel_pmu_hw_config(event);
4307 static u8 adl_get_hybrid_cpu_type(void)
4315 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
4316 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
4317 * the two to enforce a minimum period of 128 (the smallest value that has bits
4318 * 0-5 cleared and >= 100).
4320 * Because of how the code in x86_perf_event_set_period() works, the truncation
4321 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
4322 * to make up for the 'lost' events due to carrying the 'error' in period_left.
4324 * Therefore the effective (average) period matches the requested period,
4325 * despite coarser hardware granularity.
4327 static u64 bdw_limit_period(struct perf_event *event, u64 left)
4329 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
4330 X86_CONFIG(.event=0xc0, .umask=0x01)) {
4338 static u64 nhm_limit_period(struct perf_event *event, u64 left)
4340 return max(left, 32ULL);
4343 static u64 spr_limit_period(struct perf_event *event, u64 left)
4345 if (event->attr.precise_ip == 3)
4346 return max(left, 128ULL);
4351 PMU_FORMAT_ATTR(event, "config:0-7" );
4352 PMU_FORMAT_ATTR(umask, "config:8-15" );
4353 PMU_FORMAT_ATTR(edge, "config:18" );
4354 PMU_FORMAT_ATTR(pc, "config:19" );
4355 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
4356 PMU_FORMAT_ATTR(inv, "config:23" );
4357 PMU_FORMAT_ATTR(cmask, "config:24-31" );
4358 PMU_FORMAT_ATTR(in_tx, "config:32");
4359 PMU_FORMAT_ATTR(in_tx_cp, "config:33");
4361 static struct attribute *intel_arch_formats_attr[] = {
4362 &format_attr_event.attr,
4363 &format_attr_umask.attr,
4364 &format_attr_edge.attr,
4365 &format_attr_pc.attr,
4366 &format_attr_inv.attr,
4367 &format_attr_cmask.attr,
4371 ssize_t intel_event_sysfs_show(char *page, u64 config)
4373 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
4375 return x86_event_sysfs_show(page, config, event);
4378 static struct intel_shared_regs *allocate_shared_regs(int cpu)
4380 struct intel_shared_regs *regs;
4383 regs = kzalloc_node(sizeof(struct intel_shared_regs),
4384 GFP_KERNEL, cpu_to_node(cpu));
4387 * initialize the locks to keep lockdep happy
4389 for (i = 0; i < EXTRA_REG_MAX; i++)
4390 raw_spin_lock_init(®s->regs[i].lock);
4397 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
4399 struct intel_excl_cntrs *c;
4401 c = kzalloc_node(sizeof(struct intel_excl_cntrs),
4402 GFP_KERNEL, cpu_to_node(cpu));
4404 raw_spin_lock_init(&c->lock);
4411 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
4413 cpuc->pebs_record_size = x86_pmu.pebs_record_size;
4415 if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
4416 cpuc->shared_regs = allocate_shared_regs(cpu);
4417 if (!cpuc->shared_regs)
4421 if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
4422 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
4424 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
4425 if (!cpuc->constraint_list)
4426 goto err_shared_regs;
4429 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4430 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
4431 if (!cpuc->excl_cntrs)
4432 goto err_constraint_list;
4434 cpuc->excl_thread_id = 0;
4439 err_constraint_list:
4440 kfree(cpuc->constraint_list);
4441 cpuc->constraint_list = NULL;
4444 kfree(cpuc->shared_regs);
4445 cpuc->shared_regs = NULL;
4451 static int intel_pmu_cpu_prepare(int cpu)
4453 return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
4456 static void flip_smm_bit(void *data)
4458 unsigned long set = *(unsigned long *)data;
4461 msr_set_bit(MSR_IA32_DEBUGCTLMSR,
4462 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
4464 msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
4465 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
4469 static bool init_hybrid_pmu(int cpu)
4471 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4472 u8 cpu_type = get_this_hybrid_cpu_type();
4473 struct x86_hybrid_pmu *pmu = NULL;
4476 if (!cpu_type && x86_pmu.get_hybrid_cpu_type)
4477 cpu_type = x86_pmu.get_hybrid_cpu_type();
4479 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
4480 if (x86_pmu.hybrid_pmu[i].cpu_type == cpu_type) {
4481 pmu = &x86_pmu.hybrid_pmu[i];
4485 if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) {
4490 /* Only check and dump the PMU information for the first CPU */
4491 if (!cpumask_empty(&pmu->supported_cpus))
4494 if (!check_hw_exists(&pmu->pmu, pmu->num_counters, pmu->num_counters_fixed))
4497 pr_info("%s PMU driver: ", pmu->name);
4499 if (pmu->intel_cap.pebs_output_pt_available)
4500 pr_cont("PEBS-via-PT ");
4504 x86_pmu_show_pmu_cap(pmu->num_counters, pmu->num_counters_fixed,
4508 cpumask_set_cpu(cpu, &pmu->supported_cpus);
4509 cpuc->pmu = &pmu->pmu;
4511 x86_pmu_update_cpu_context(&pmu->pmu, cpu);
4516 static void intel_pmu_cpu_starting(int cpu)
4518 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4519 int core_id = topology_core_id(cpu);
4522 if (is_hybrid() && !init_hybrid_pmu(cpu))
4525 init_debug_store_on_cpu(cpu);
4527 * Deal with CPUs that don't clear their LBRs on power-up.
4529 intel_pmu_lbr_reset();
4531 cpuc->lbr_sel = NULL;
4533 if (x86_pmu.flags & PMU_FL_TFA) {
4534 WARN_ON_ONCE(cpuc->tfa_shadow);
4535 cpuc->tfa_shadow = ~0ULL;
4536 intel_set_tfa(cpuc, false);
4539 if (x86_pmu.version > 1)
4540 flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
4543 * Disable perf metrics if any added CPU doesn't support it.
4545 * Turn off the check for a hybrid architecture, because the
4546 * architecture MSR, MSR_IA32_PERF_CAPABILITIES, only indicate
4547 * the architecture features. The perf metrics is a model-specific
4548 * feature for now. The corresponding bit should always be 0 on
4549 * a hybrid platform, e.g., Alder Lake.
4551 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) {
4552 union perf_capabilities perf_cap;
4554 rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
4555 if (!perf_cap.perf_metrics) {
4556 x86_pmu.intel_cap.perf_metrics = 0;
4557 x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
4561 if (!cpuc->shared_regs)
4564 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
4565 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
4566 struct intel_shared_regs *pc;
4568 pc = per_cpu(cpu_hw_events, i).shared_regs;
4569 if (pc && pc->core_id == core_id) {
4570 cpuc->kfree_on_online[0] = cpuc->shared_regs;
4571 cpuc->shared_regs = pc;
4575 cpuc->shared_regs->core_id = core_id;
4576 cpuc->shared_regs->refcnt++;
4579 if (x86_pmu.lbr_sel_map)
4580 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
4582 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4583 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
4584 struct cpu_hw_events *sibling;
4585 struct intel_excl_cntrs *c;
4587 sibling = &per_cpu(cpu_hw_events, i);
4588 c = sibling->excl_cntrs;
4589 if (c && c->core_id == core_id) {
4590 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
4591 cpuc->excl_cntrs = c;
4592 if (!sibling->excl_thread_id)
4593 cpuc->excl_thread_id = 1;
4597 cpuc->excl_cntrs->core_id = core_id;
4598 cpuc->excl_cntrs->refcnt++;
4602 static void free_excl_cntrs(struct cpu_hw_events *cpuc)
4604 struct intel_excl_cntrs *c;
4606 c = cpuc->excl_cntrs;
4608 if (c->core_id == -1 || --c->refcnt == 0)
4610 cpuc->excl_cntrs = NULL;
4613 kfree(cpuc->constraint_list);
4614 cpuc->constraint_list = NULL;
4617 static void intel_pmu_cpu_dying(int cpu)
4619 fini_debug_store_on_cpu(cpu);
4622 void intel_cpuc_finish(struct cpu_hw_events *cpuc)
4624 struct intel_shared_regs *pc;
4626 pc = cpuc->shared_regs;
4628 if (pc->core_id == -1 || --pc->refcnt == 0)
4630 cpuc->shared_regs = NULL;
4633 free_excl_cntrs(cpuc);
4636 static void intel_pmu_cpu_dead(int cpu)
4638 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4640 intel_cpuc_finish(cpuc);
4642 if (is_hybrid() && cpuc->pmu)
4643 cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus);
4646 static void intel_pmu_sched_task(struct perf_event_context *ctx,
4649 intel_pmu_pebs_sched_task(ctx, sched_in);
4650 intel_pmu_lbr_sched_task(ctx, sched_in);
4653 static void intel_pmu_swap_task_ctx(struct perf_event_context *prev,
4654 struct perf_event_context *next)
4656 intel_pmu_lbr_swap_task_ctx(prev, next);
4659 static int intel_pmu_check_period(struct perf_event *event, u64 value)
4661 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
4664 static void intel_aux_output_init(void)
4666 /* Refer also intel_pmu_aux_output_match() */
4667 if (x86_pmu.intel_cap.pebs_output_pt_available)
4668 x86_pmu.assign = intel_pmu_assign_event;
4671 static int intel_pmu_aux_output_match(struct perf_event *event)
4673 /* intel_pmu_assign_event() is needed, refer intel_aux_output_init() */
4674 if (!x86_pmu.intel_cap.pebs_output_pt_available)
4677 return is_intel_pt_event(event);
4680 static int intel_pmu_filter_match(struct perf_event *event)
4682 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4683 unsigned int cpu = smp_processor_id();
4685 return cpumask_test_cpu(cpu, &pmu->supported_cpus);
4688 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
4690 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
4692 PMU_FORMAT_ATTR(frontend, "config1:0-23");
4694 static struct attribute *intel_arch3_formats_attr[] = {
4695 &format_attr_event.attr,
4696 &format_attr_umask.attr,
4697 &format_attr_edge.attr,
4698 &format_attr_pc.attr,
4699 &format_attr_any.attr,
4700 &format_attr_inv.attr,
4701 &format_attr_cmask.attr,
4705 static struct attribute *hsw_format_attr[] = {
4706 &format_attr_in_tx.attr,
4707 &format_attr_in_tx_cp.attr,
4708 &format_attr_offcore_rsp.attr,
4709 &format_attr_ldlat.attr,
4713 static struct attribute *nhm_format_attr[] = {
4714 &format_attr_offcore_rsp.attr,
4715 &format_attr_ldlat.attr,
4719 static struct attribute *slm_format_attr[] = {
4720 &format_attr_offcore_rsp.attr,
4724 static struct attribute *skl_format_attr[] = {
4725 &format_attr_frontend.attr,
4729 static __initconst const struct x86_pmu core_pmu = {
4731 .handle_irq = x86_pmu_handle_irq,
4732 .disable_all = x86_pmu_disable_all,
4733 .enable_all = core_pmu_enable_all,
4734 .enable = core_pmu_enable_event,
4735 .disable = x86_pmu_disable_event,
4736 .hw_config = core_pmu_hw_config,
4737 .schedule_events = x86_schedule_events,
4738 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
4739 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
4740 .event_map = intel_pmu_event_map,
4741 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
4743 .large_pebs_flags = LARGE_PEBS_FLAGS,
4746 * Intel PMCs cannot be accessed sanely above 32-bit width,
4747 * so we install an artificial 1<<31 period regardless of
4748 * the generic event period:
4750 .max_period = (1ULL<<31) - 1,
4751 .get_event_constraints = intel_get_event_constraints,
4752 .put_event_constraints = intel_put_event_constraints,
4753 .event_constraints = intel_core_event_constraints,
4754 .guest_get_msrs = core_guest_get_msrs,
4755 .format_attrs = intel_arch_formats_attr,
4756 .events_sysfs_show = intel_event_sysfs_show,
4759 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
4760 * together with PMU version 1 and thus be using core_pmu with
4761 * shared_regs. We need following callbacks here to allocate
4764 .cpu_prepare = intel_pmu_cpu_prepare,
4765 .cpu_starting = intel_pmu_cpu_starting,
4766 .cpu_dying = intel_pmu_cpu_dying,
4767 .cpu_dead = intel_pmu_cpu_dead,
4769 .check_period = intel_pmu_check_period,
4771 .lbr_reset = intel_pmu_lbr_reset_64,
4772 .lbr_read = intel_pmu_lbr_read_64,
4773 .lbr_save = intel_pmu_lbr_save,
4774 .lbr_restore = intel_pmu_lbr_restore,
4777 static __initconst const struct x86_pmu intel_pmu = {
4779 .handle_irq = intel_pmu_handle_irq,
4780 .disable_all = intel_pmu_disable_all,
4781 .enable_all = intel_pmu_enable_all,
4782 .enable = intel_pmu_enable_event,
4783 .disable = intel_pmu_disable_event,
4784 .add = intel_pmu_add_event,
4785 .del = intel_pmu_del_event,
4786 .read = intel_pmu_read_event,
4787 .hw_config = intel_pmu_hw_config,
4788 .schedule_events = x86_schedule_events,
4789 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
4790 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
4791 .event_map = intel_pmu_event_map,
4792 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
4794 .large_pebs_flags = LARGE_PEBS_FLAGS,
4796 * Intel PMCs cannot be accessed sanely above 32 bit width,
4797 * so we install an artificial 1<<31 period regardless of
4798 * the generic event period:
4800 .max_period = (1ULL << 31) - 1,
4801 .get_event_constraints = intel_get_event_constraints,
4802 .put_event_constraints = intel_put_event_constraints,
4803 .pebs_aliases = intel_pebs_aliases_core2,
4805 .format_attrs = intel_arch3_formats_attr,
4806 .events_sysfs_show = intel_event_sysfs_show,
4808 .cpu_prepare = intel_pmu_cpu_prepare,
4809 .cpu_starting = intel_pmu_cpu_starting,
4810 .cpu_dying = intel_pmu_cpu_dying,
4811 .cpu_dead = intel_pmu_cpu_dead,
4813 .guest_get_msrs = intel_guest_get_msrs,
4814 .sched_task = intel_pmu_sched_task,
4815 .swap_task_ctx = intel_pmu_swap_task_ctx,
4817 .check_period = intel_pmu_check_period,
4819 .aux_output_match = intel_pmu_aux_output_match,
4821 .lbr_reset = intel_pmu_lbr_reset_64,
4822 .lbr_read = intel_pmu_lbr_read_64,
4823 .lbr_save = intel_pmu_lbr_save,
4824 .lbr_restore = intel_pmu_lbr_restore,
4827 * SMM has access to all 4 rings and while traditionally SMM code only
4828 * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM.
4830 * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction
4831 * between SMM or not, this results in what should be pure userspace
4832 * counters including SMM data.
4834 * This is a clear privilege issue, therefore globally disable
4835 * counting SMM by default.
4837 .attr_freeze_on_smi = 1,
4840 static __init void intel_clovertown_quirk(void)
4843 * PEBS is unreliable due to:
4845 * AJ67 - PEBS may experience CPL leaks
4846 * AJ68 - PEBS PMI may be delayed by one event
4847 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
4848 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
4850 * AJ67 could be worked around by restricting the OS/USR flags.
4851 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
4853 * AJ106 could possibly be worked around by not allowing LBR
4854 * usage from PEBS, including the fixup.
4855 * AJ68 could possibly be worked around by always programming
4856 * a pebs_event_reset[0] value and coping with the lost events.
4858 * But taken together it might just make sense to not enable PEBS on
4861 pr_warn("PEBS disabled due to CPU errata\n");
4863 x86_pmu.pebs_constraints = NULL;
4866 static const struct x86_cpu_desc isolation_ucodes[] = {
4867 INTEL_CPU_DESC(INTEL_FAM6_HASWELL, 3, 0x0000001f),
4868 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_L, 1, 0x0000001e),
4869 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_G, 1, 0x00000015),
4870 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037),
4871 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a),
4872 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL, 4, 0x00000023),
4873 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_G, 1, 0x00000014),
4874 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 2, 0x00000010),
4875 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009),
4876 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009),
4877 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002),
4878 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 1, 0x0b000014),
4879 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021),
4880 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000),
4881 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000),
4882 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 6, 0x00000000),
4883 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 7, 0x00000000),
4884 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c),
4885 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c),
4886 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e),
4887 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 9, 0x0000004e),
4888 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 10, 0x0000004e),
4889 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 11, 0x0000004e),
4890 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 12, 0x0000004e),
4891 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 10, 0x0000004e),
4892 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 11, 0x0000004e),
4893 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 12, 0x0000004e),
4894 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 13, 0x0000004e),
4898 static void intel_check_pebs_isolation(void)
4900 x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes);
4903 static __init void intel_pebs_isolation_quirk(void)
4905 WARN_ON_ONCE(x86_pmu.check_microcode);
4906 x86_pmu.check_microcode = intel_check_pebs_isolation;
4907 intel_check_pebs_isolation();
4910 static const struct x86_cpu_desc pebs_ucodes[] = {
4911 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE, 7, 0x00000028),
4912 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 6, 0x00000618),
4913 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 7, 0x0000070c),
4917 static bool intel_snb_pebs_broken(void)
4919 return !x86_cpu_has_min_microcode_rev(pebs_ucodes);
4922 static void intel_snb_check_microcode(void)
4924 if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
4928 * Serialized by the microcode lock..
4930 if (x86_pmu.pebs_broken) {
4931 pr_info("PEBS enabled due to microcode update\n");
4932 x86_pmu.pebs_broken = 0;
4934 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
4935 x86_pmu.pebs_broken = 1;
4939 static bool is_lbr_from(unsigned long msr)
4941 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
4943 return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
4947 * Under certain circumstances, access certain MSR may cause #GP.
4948 * The function tests if the input MSR can be safely accessed.
4950 static bool check_msr(unsigned long msr, u64 mask)
4952 u64 val_old, val_new, val_tmp;
4955 * Disable the check for real HW, so we don't
4956 * mess with potentially enabled registers:
4958 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
4962 * Read the current value, change it and read it back to see if it
4963 * matches, this is needed to detect certain hardware emulators
4964 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
4966 if (rdmsrl_safe(msr, &val_old))
4970 * Only change the bits which can be updated by wrmsrl.
4972 val_tmp = val_old ^ mask;
4974 if (is_lbr_from(msr))
4975 val_tmp = lbr_from_signext_quirk_wr(val_tmp);
4977 if (wrmsrl_safe(msr, val_tmp) ||
4978 rdmsrl_safe(msr, &val_new))
4982 * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
4983 * should equal rdmsrl()'s even with the quirk.
4985 if (val_new != val_tmp)
4988 if (is_lbr_from(msr))
4989 val_old = lbr_from_signext_quirk_wr(val_old);
4991 /* Here it's sure that the MSR can be safely accessed.
4992 * Restore the old value and return.
4994 wrmsrl(msr, val_old);
4999 static __init void intel_sandybridge_quirk(void)
5001 x86_pmu.check_microcode = intel_snb_check_microcode;
5003 intel_snb_check_microcode();
5007 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
5008 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
5009 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
5010 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
5011 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
5012 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
5013 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
5014 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
5017 static __init void intel_arch_events_quirk(void)
5021 /* disable event that reported as not present by cpuid */
5022 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
5023 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
5024 pr_warn("CPUID marked event: \'%s\' unavailable\n",
5025 intel_arch_events_map[bit].name);
5029 static __init void intel_nehalem_quirk(void)
5031 union cpuid10_ebx ebx;
5033 ebx.full = x86_pmu.events_maskl;
5034 if (ebx.split.no_branch_misses_retired) {
5036 * Erratum AAJ80 detected, we work it around by using
5037 * the BR_MISP_EXEC.ANY event. This will over-count
5038 * branch-misses, but it's still much better than the
5039 * architectural event which is often completely bogus:
5041 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
5042 ebx.split.no_branch_misses_retired = 0;
5043 x86_pmu.events_maskl = ebx.full;
5044 pr_info("CPU erratum AAJ80 worked around\n");
5049 * enable software workaround for errata:
5054 * Only needed when HT is enabled. However detecting
5055 * if HT is enabled is difficult (model specific). So instead,
5056 * we enable the workaround in the early boot, and verify if
5057 * it is needed in a later initcall phase once we have valid
5058 * topology information to check if HT is actually enabled
5060 static __init void intel_ht_bug(void)
5062 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
5064 x86_pmu.start_scheduling = intel_start_scheduling;
5065 x86_pmu.commit_scheduling = intel_commit_scheduling;
5066 x86_pmu.stop_scheduling = intel_stop_scheduling;
5069 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
5070 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
5072 /* Haswell special events */
5073 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
5074 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
5075 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
5076 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
5077 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
5078 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
5079 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
5080 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
5081 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
5082 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
5083 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
5084 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
5086 static struct attribute *hsw_events_attrs[] = {
5087 EVENT_PTR(td_slots_issued),
5088 EVENT_PTR(td_slots_retired),
5089 EVENT_PTR(td_fetch_bubbles),
5090 EVENT_PTR(td_total_slots),
5091 EVENT_PTR(td_total_slots_scale),
5092 EVENT_PTR(td_recovery_bubbles),
5093 EVENT_PTR(td_recovery_bubbles_scale),
5097 static struct attribute *hsw_mem_events_attrs[] = {
5098 EVENT_PTR(mem_ld_hsw),
5099 EVENT_PTR(mem_st_hsw),
5103 static struct attribute *hsw_tsx_events_attrs[] = {
5104 EVENT_PTR(tx_start),
5105 EVENT_PTR(tx_commit),
5106 EVENT_PTR(tx_abort),
5107 EVENT_PTR(tx_capacity),
5108 EVENT_PTR(tx_conflict),
5109 EVENT_PTR(el_start),
5110 EVENT_PTR(el_commit),
5111 EVENT_PTR(el_abort),
5112 EVENT_PTR(el_capacity),
5113 EVENT_PTR(el_conflict),
5114 EVENT_PTR(cycles_t),
5115 EVENT_PTR(cycles_ct),
5119 EVENT_ATTR_STR(tx-capacity-read, tx_capacity_read, "event=0x54,umask=0x80");
5120 EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2");
5121 EVENT_ATTR_STR(el-capacity-read, el_capacity_read, "event=0x54,umask=0x80");
5122 EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2");
5124 static struct attribute *icl_events_attrs[] = {
5125 EVENT_PTR(mem_ld_hsw),
5126 EVENT_PTR(mem_st_hsw),
5130 static struct attribute *icl_td_events_attrs[] = {
5132 EVENT_PTR(td_retiring),
5133 EVENT_PTR(td_bad_spec),
5134 EVENT_PTR(td_fe_bound),
5135 EVENT_PTR(td_be_bound),
5139 static struct attribute *icl_tsx_events_attrs[] = {
5140 EVENT_PTR(tx_start),
5141 EVENT_PTR(tx_abort),
5142 EVENT_PTR(tx_commit),
5143 EVENT_PTR(tx_capacity_read),
5144 EVENT_PTR(tx_capacity_write),
5145 EVENT_PTR(tx_conflict),
5146 EVENT_PTR(el_start),
5147 EVENT_PTR(el_abort),
5148 EVENT_PTR(el_commit),
5149 EVENT_PTR(el_capacity_read),
5150 EVENT_PTR(el_capacity_write),
5151 EVENT_PTR(el_conflict),
5152 EVENT_PTR(cycles_t),
5153 EVENT_PTR(cycles_ct),
5158 EVENT_ATTR_STR(mem-stores, mem_st_spr, "event=0xcd,umask=0x2");
5159 EVENT_ATTR_STR(mem-loads-aux, mem_ld_aux, "event=0x03,umask=0x82");
5161 static struct attribute *spr_events_attrs[] = {
5162 EVENT_PTR(mem_ld_hsw),
5163 EVENT_PTR(mem_st_spr),
5164 EVENT_PTR(mem_ld_aux),
5168 static struct attribute *spr_td_events_attrs[] = {
5170 EVENT_PTR(td_retiring),
5171 EVENT_PTR(td_bad_spec),
5172 EVENT_PTR(td_fe_bound),
5173 EVENT_PTR(td_be_bound),
5174 EVENT_PTR(td_heavy_ops),
5175 EVENT_PTR(td_br_mispredict),
5176 EVENT_PTR(td_fetch_lat),
5177 EVENT_PTR(td_mem_bound),
5181 static struct attribute *spr_tsx_events_attrs[] = {
5182 EVENT_PTR(tx_start),
5183 EVENT_PTR(tx_abort),
5184 EVENT_PTR(tx_commit),
5185 EVENT_PTR(tx_capacity_read),
5186 EVENT_PTR(tx_capacity_write),
5187 EVENT_PTR(tx_conflict),
5188 EVENT_PTR(cycles_t),
5189 EVENT_PTR(cycles_ct),
5193 static ssize_t freeze_on_smi_show(struct device *cdev,
5194 struct device_attribute *attr,
5197 return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
5200 static DEFINE_MUTEX(freeze_on_smi_mutex);
5202 static ssize_t freeze_on_smi_store(struct device *cdev,
5203 struct device_attribute *attr,
5204 const char *buf, size_t count)
5209 ret = kstrtoul(buf, 0, &val);
5216 mutex_lock(&freeze_on_smi_mutex);
5218 if (x86_pmu.attr_freeze_on_smi == val)
5221 x86_pmu.attr_freeze_on_smi = val;
5224 on_each_cpu(flip_smm_bit, &val, 1);
5227 mutex_unlock(&freeze_on_smi_mutex);
5232 static void update_tfa_sched(void *ignored)
5234 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
5237 * check if PMC3 is used
5238 * and if so force schedule out for all event types all contexts
5240 if (test_bit(3, cpuc->active_mask))
5241 perf_pmu_resched(x86_get_pmu(smp_processor_id()));
5244 static ssize_t show_sysctl_tfa(struct device *cdev,
5245 struct device_attribute *attr,
5248 return snprintf(buf, 40, "%d\n", allow_tsx_force_abort);
5251 static ssize_t set_sysctl_tfa(struct device *cdev,
5252 struct device_attribute *attr,
5253 const char *buf, size_t count)
5258 ret = kstrtobool(buf, &val);
5263 if (val == allow_tsx_force_abort)
5266 allow_tsx_force_abort = val;
5269 on_each_cpu(update_tfa_sched, NULL, 1);
5276 static DEVICE_ATTR_RW(freeze_on_smi);
5278 static ssize_t branches_show(struct device *cdev,
5279 struct device_attribute *attr,
5282 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
5285 static DEVICE_ATTR_RO(branches);
5287 static struct attribute *lbr_attrs[] = {
5288 &dev_attr_branches.attr,
5292 static char pmu_name_str[30];
5294 static ssize_t pmu_name_show(struct device *cdev,
5295 struct device_attribute *attr,
5298 return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str);
5301 static DEVICE_ATTR_RO(pmu_name);
5303 static struct attribute *intel_pmu_caps_attrs[] = {
5304 &dev_attr_pmu_name.attr,
5308 static DEVICE_ATTR(allow_tsx_force_abort, 0644,
5312 static struct attribute *intel_pmu_attrs[] = {
5313 &dev_attr_freeze_on_smi.attr,
5314 &dev_attr_allow_tsx_force_abort.attr,
5319 tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5321 return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0;
5325 pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5327 return x86_pmu.pebs ? attr->mode : 0;
5331 lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5333 return x86_pmu.lbr_nr ? attr->mode : 0;
5337 exra_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5339 return x86_pmu.version >= 2 ? attr->mode : 0;
5343 default_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5345 if (attr == &dev_attr_allow_tsx_force_abort.attr)
5346 return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0;
5351 static struct attribute_group group_events_td = {
5355 static struct attribute_group group_events_mem = {
5357 .is_visible = pebs_is_visible,
5360 static struct attribute_group group_events_tsx = {
5362 .is_visible = tsx_is_visible,
5365 static struct attribute_group group_caps_gen = {
5367 .attrs = intel_pmu_caps_attrs,
5370 static struct attribute_group group_caps_lbr = {
5373 .is_visible = lbr_is_visible,
5376 static struct attribute_group group_format_extra = {
5378 .is_visible = exra_is_visible,
5381 static struct attribute_group group_format_extra_skl = {
5383 .is_visible = exra_is_visible,
5386 static struct attribute_group group_default = {
5387 .attrs = intel_pmu_attrs,
5388 .is_visible = default_is_visible,
5391 static const struct attribute_group *attr_update[] = {
5397 &group_format_extra,
5398 &group_format_extra_skl,
5403 EVENT_ATTR_STR_HYBRID(slots, slots_adl, "event=0x00,umask=0x4", hybrid_big);
5404 EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_adl, "event=0xc2,umask=0x0;event=0x00,umask=0x80", hybrid_big_small);
5405 EVENT_ATTR_STR_HYBRID(topdown-bad-spec, td_bad_spec_adl, "event=0x73,umask=0x0;event=0x00,umask=0x81", hybrid_big_small);
5406 EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_adl, "event=0x71,umask=0x0;event=0x00,umask=0x82", hybrid_big_small);
5407 EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_adl, "event=0x74,umask=0x0;event=0x00,umask=0x83", hybrid_big_small);
5408 EVENT_ATTR_STR_HYBRID(topdown-heavy-ops, td_heavy_ops_adl, "event=0x00,umask=0x84", hybrid_big);
5409 EVENT_ATTR_STR_HYBRID(topdown-br-mispredict, td_br_mis_adl, "event=0x00,umask=0x85", hybrid_big);
5410 EVENT_ATTR_STR_HYBRID(topdown-fetch-lat, td_fetch_lat_adl, "event=0x00,umask=0x86", hybrid_big);
5411 EVENT_ATTR_STR_HYBRID(topdown-mem-bound, td_mem_bound_adl, "event=0x00,umask=0x87", hybrid_big);
5413 static struct attribute *adl_hybrid_events_attrs[] = {
5414 EVENT_PTR(slots_adl),
5415 EVENT_PTR(td_retiring_adl),
5416 EVENT_PTR(td_bad_spec_adl),
5417 EVENT_PTR(td_fe_bound_adl),
5418 EVENT_PTR(td_be_bound_adl),
5419 EVENT_PTR(td_heavy_ops_adl),
5420 EVENT_PTR(td_br_mis_adl),
5421 EVENT_PTR(td_fetch_lat_adl),
5422 EVENT_PTR(td_mem_bound_adl),
5426 /* Must be in IDX order */
5427 EVENT_ATTR_STR_HYBRID(mem-loads, mem_ld_adl, "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small);
5428 EVENT_ATTR_STR_HYBRID(mem-stores, mem_st_adl, "event=0xd0,umask=0x6;event=0xcd,umask=0x2", hybrid_big_small);
5429 EVENT_ATTR_STR_HYBRID(mem-loads-aux, mem_ld_aux_adl, "event=0x03,umask=0x82", hybrid_big);
5431 static struct attribute *adl_hybrid_mem_attrs[] = {
5432 EVENT_PTR(mem_ld_adl),
5433 EVENT_PTR(mem_st_adl),
5434 EVENT_PTR(mem_ld_aux_adl),
5438 EVENT_ATTR_STR_HYBRID(tx-start, tx_start_adl, "event=0xc9,umask=0x1", hybrid_big);
5439 EVENT_ATTR_STR_HYBRID(tx-commit, tx_commit_adl, "event=0xc9,umask=0x2", hybrid_big);
5440 EVENT_ATTR_STR_HYBRID(tx-abort, tx_abort_adl, "event=0xc9,umask=0x4", hybrid_big);
5441 EVENT_ATTR_STR_HYBRID(tx-conflict, tx_conflict_adl, "event=0x54,umask=0x1", hybrid_big);
5442 EVENT_ATTR_STR_HYBRID(cycles-t, cycles_t_adl, "event=0x3c,in_tx=1", hybrid_big);
5443 EVENT_ATTR_STR_HYBRID(cycles-ct, cycles_ct_adl, "event=0x3c,in_tx=1,in_tx_cp=1", hybrid_big);
5444 EVENT_ATTR_STR_HYBRID(tx-capacity-read, tx_capacity_read_adl, "event=0x54,umask=0x80", hybrid_big);
5445 EVENT_ATTR_STR_HYBRID(tx-capacity-write, tx_capacity_write_adl, "event=0x54,umask=0x2", hybrid_big);
5447 static struct attribute *adl_hybrid_tsx_attrs[] = {
5448 EVENT_PTR(tx_start_adl),
5449 EVENT_PTR(tx_abort_adl),
5450 EVENT_PTR(tx_commit_adl),
5451 EVENT_PTR(tx_capacity_read_adl),
5452 EVENT_PTR(tx_capacity_write_adl),
5453 EVENT_PTR(tx_conflict_adl),
5454 EVENT_PTR(cycles_t_adl),
5455 EVENT_PTR(cycles_ct_adl),
5459 FORMAT_ATTR_HYBRID(in_tx, hybrid_big);
5460 FORMAT_ATTR_HYBRID(in_tx_cp, hybrid_big);
5461 FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small);
5462 FORMAT_ATTR_HYBRID(ldlat, hybrid_big_small);
5463 FORMAT_ATTR_HYBRID(frontend, hybrid_big);
5465 static struct attribute *adl_hybrid_extra_attr_rtm[] = {
5466 FORMAT_HYBRID_PTR(in_tx),
5467 FORMAT_HYBRID_PTR(in_tx_cp),
5468 FORMAT_HYBRID_PTR(offcore_rsp),
5469 FORMAT_HYBRID_PTR(ldlat),
5470 FORMAT_HYBRID_PTR(frontend),
5474 static struct attribute *adl_hybrid_extra_attr[] = {
5475 FORMAT_HYBRID_PTR(offcore_rsp),
5476 FORMAT_HYBRID_PTR(ldlat),
5477 FORMAT_HYBRID_PTR(frontend),
5481 static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr)
5483 struct device *dev = kobj_to_dev(kobj);
5484 struct x86_hybrid_pmu *pmu =
5485 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5486 struct perf_pmu_events_hybrid_attr *pmu_attr =
5487 container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr);
5489 return pmu->cpu_type & pmu_attr->pmu_type;
5492 static umode_t hybrid_events_is_visible(struct kobject *kobj,
5493 struct attribute *attr, int i)
5495 return is_attr_for_this_pmu(kobj, attr) ? attr->mode : 0;
5498 static inline int hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu)
5500 int cpu = cpumask_first(&pmu->supported_cpus);
5502 return (cpu >= nr_cpu_ids) ? -1 : cpu;
5505 static umode_t hybrid_tsx_is_visible(struct kobject *kobj,
5506 struct attribute *attr, int i)
5508 struct device *dev = kobj_to_dev(kobj);
5509 struct x86_hybrid_pmu *pmu =
5510 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5511 int cpu = hybrid_find_supported_cpu(pmu);
5513 return (cpu >= 0) && is_attr_for_this_pmu(kobj, attr) && cpu_has(&cpu_data(cpu), X86_FEATURE_RTM) ? attr->mode : 0;
5516 static umode_t hybrid_format_is_visible(struct kobject *kobj,
5517 struct attribute *attr, int i)
5519 struct device *dev = kobj_to_dev(kobj);
5520 struct x86_hybrid_pmu *pmu =
5521 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5522 struct perf_pmu_format_hybrid_attr *pmu_attr =
5523 container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr);
5524 int cpu = hybrid_find_supported_cpu(pmu);
5526 return (cpu >= 0) && (pmu->cpu_type & pmu_attr->pmu_type) ? attr->mode : 0;
5529 static struct attribute_group hybrid_group_events_td = {
5531 .is_visible = hybrid_events_is_visible,
5534 static struct attribute_group hybrid_group_events_mem = {
5536 .is_visible = hybrid_events_is_visible,
5539 static struct attribute_group hybrid_group_events_tsx = {
5541 .is_visible = hybrid_tsx_is_visible,
5544 static struct attribute_group hybrid_group_format_extra = {
5546 .is_visible = hybrid_format_is_visible,
5549 static ssize_t intel_hybrid_get_attr_cpus(struct device *dev,
5550 struct device_attribute *attr,
5553 struct x86_hybrid_pmu *pmu =
5554 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5556 return cpumap_print_to_pagebuf(true, buf, &pmu->supported_cpus);
5559 static DEVICE_ATTR(cpus, S_IRUGO, intel_hybrid_get_attr_cpus, NULL);
5560 static struct attribute *intel_hybrid_cpus_attrs[] = {
5561 &dev_attr_cpus.attr,
5565 static struct attribute_group hybrid_group_cpus = {
5566 .attrs = intel_hybrid_cpus_attrs,
5569 static const struct attribute_group *hybrid_attr_update[] = {
5570 &hybrid_group_events_td,
5571 &hybrid_group_events_mem,
5572 &hybrid_group_events_tsx,
5575 &hybrid_group_format_extra,
5581 static struct attribute *empty_attrs;
5583 static void intel_pmu_check_num_counters(int *num_counters,
5584 int *num_counters_fixed,
5585 u64 *intel_ctrl, u64 fixed_mask)
5587 if (*num_counters > INTEL_PMC_MAX_GENERIC) {
5588 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
5589 *num_counters, INTEL_PMC_MAX_GENERIC);
5590 *num_counters = INTEL_PMC_MAX_GENERIC;
5592 *intel_ctrl = (1ULL << *num_counters) - 1;
5594 if (*num_counters_fixed > INTEL_PMC_MAX_FIXED) {
5595 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
5596 *num_counters_fixed, INTEL_PMC_MAX_FIXED);
5597 *num_counters_fixed = INTEL_PMC_MAX_FIXED;
5600 *intel_ctrl |= fixed_mask << INTEL_PMC_IDX_FIXED;
5603 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
5605 int num_counters_fixed,
5608 struct event_constraint *c;
5610 if (!event_constraints)
5614 * event on fixed counter2 (REF_CYCLES) only works on this
5615 * counter, so do not extend mask to generic counters
5617 for_each_event_constraint(c, event_constraints) {
5619 * Don't extend the topdown slots and metrics
5620 * events to the generic counters.
5622 if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
5624 * Disable topdown slots and metrics events,
5625 * if slots event is not in CPUID.
5627 if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl))
5629 c->weight = hweight64(c->idxmsk64);
5633 if (c->cmask == FIXED_EVENT_FLAGS) {
5634 /* Disabled fixed counters which are not in CPUID */
5635 c->idxmsk64 &= intel_ctrl;
5638 * Don't extend the pseudo-encoding to the
5641 if (!use_fixed_pseudo_encoding(c->code))
5642 c->idxmsk64 |= (1ULL << num_counters) - 1;
5645 ~(~0ULL << (INTEL_PMC_IDX_FIXED + num_counters_fixed));
5646 c->weight = hweight64(c->idxmsk64);
5650 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs)
5652 struct extra_reg *er;
5655 * Access extra MSR may cause #GP under certain circumstances.
5656 * E.g. KVM doesn't support offcore event
5657 * Check all extra_regs here.
5662 for (er = extra_regs; er->msr; er++) {
5663 er->extra_msr_access = check_msr(er->msr, 0x11UL);
5664 /* Disable LBR select mapping */
5665 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
5666 x86_pmu.lbr_sel_map = NULL;
5670 static void intel_pmu_check_hybrid_pmus(u64 fixed_mask)
5672 struct x86_hybrid_pmu *pmu;
5675 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
5676 pmu = &x86_pmu.hybrid_pmu[i];
5678 intel_pmu_check_num_counters(&pmu->num_counters,
5679 &pmu->num_counters_fixed,
5683 if (pmu->intel_cap.perf_metrics) {
5684 pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
5685 pmu->intel_ctrl |= INTEL_PMC_MSK_FIXED_SLOTS;
5688 if (pmu->intel_cap.pebs_output_pt_available)
5689 pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
5691 intel_pmu_check_event_constraints(pmu->event_constraints,
5693 pmu->num_counters_fixed,
5696 intel_pmu_check_extra_regs(pmu->extra_regs);
5700 __init int intel_pmu_init(void)
5702 struct attribute **extra_skl_attr = &empty_attrs;
5703 struct attribute **extra_attr = &empty_attrs;
5704 struct attribute **td_attr = &empty_attrs;
5705 struct attribute **mem_attr = &empty_attrs;
5706 struct attribute **tsx_attr = &empty_attrs;
5707 union cpuid10_edx edx;
5708 union cpuid10_eax eax;
5709 union cpuid10_ebx ebx;
5710 unsigned int fixed_mask;
5714 struct x86_hybrid_pmu *pmu;
5716 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
5717 switch (boot_cpu_data.x86) {
5719 return p6_pmu_init();
5721 return knc_pmu_init();
5723 return p4_pmu_init();
5729 * Check whether the Architectural PerfMon supports
5730 * Branch Misses Retired hw_event or not.
5732 cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full);
5733 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
5736 version = eax.split.version_id;
5740 x86_pmu = intel_pmu;
5742 x86_pmu.version = version;
5743 x86_pmu.num_counters = eax.split.num_counters;
5744 x86_pmu.cntval_bits = eax.split.bit_width;
5745 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
5747 x86_pmu.events_maskl = ebx.full;
5748 x86_pmu.events_mask_len = eax.split.mask_length;
5750 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
5751 x86_pmu.pebs_capable = PEBS_COUNTER_MASK;
5754 * Quirk: v2 perfmon does not report fixed-purpose events, so
5755 * assume at least 3 events, when not running in a hypervisor:
5757 if (version > 1 && version < 5) {
5758 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
5760 x86_pmu.num_counters_fixed =
5761 max((int)edx.split.num_counters_fixed, assume);
5763 fixed_mask = (1L << x86_pmu.num_counters_fixed) - 1;
5764 } else if (version >= 5)
5765 x86_pmu.num_counters_fixed = fls(fixed_mask);
5767 if (boot_cpu_has(X86_FEATURE_PDCM)) {
5770 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
5771 x86_pmu.intel_cap.capabilities = capabilities;
5774 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) {
5775 x86_pmu.lbr_reset = intel_pmu_lbr_reset_32;
5776 x86_pmu.lbr_read = intel_pmu_lbr_read_32;
5779 if (boot_cpu_has(X86_FEATURE_ARCH_LBR))
5780 intel_pmu_arch_lbr_init();
5784 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
5787 x86_pmu.intel_cap.anythread_deprecated = edx.split.anythread_deprecated;
5788 if (x86_pmu.intel_cap.anythread_deprecated)
5789 pr_cont(" AnyThread deprecated, ");
5793 * Install the hw-cache-events table:
5795 switch (boot_cpu_data.x86_model) {
5796 case INTEL_FAM6_CORE_YONAH:
5797 pr_cont("Core events, ");
5801 case INTEL_FAM6_CORE2_MEROM:
5802 x86_add_quirk(intel_clovertown_quirk);
5805 case INTEL_FAM6_CORE2_MEROM_L:
5806 case INTEL_FAM6_CORE2_PENRYN:
5807 case INTEL_FAM6_CORE2_DUNNINGTON:
5808 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
5809 sizeof(hw_cache_event_ids));
5811 intel_pmu_lbr_init_core();
5813 x86_pmu.event_constraints = intel_core2_event_constraints;
5814 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
5815 pr_cont("Core2 events, ");
5819 case INTEL_FAM6_NEHALEM:
5820 case INTEL_FAM6_NEHALEM_EP:
5821 case INTEL_FAM6_NEHALEM_EX:
5822 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
5823 sizeof(hw_cache_event_ids));
5824 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
5825 sizeof(hw_cache_extra_regs));
5827 intel_pmu_lbr_init_nhm();
5829 x86_pmu.event_constraints = intel_nehalem_event_constraints;
5830 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
5831 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
5832 x86_pmu.extra_regs = intel_nehalem_extra_regs;
5833 x86_pmu.limit_period = nhm_limit_period;
5835 mem_attr = nhm_mem_events_attrs;
5837 /* UOPS_ISSUED.STALLED_CYCLES */
5838 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
5839 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
5840 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
5841 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
5842 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
5844 intel_pmu_pebs_data_source_nhm();
5845 x86_add_quirk(intel_nehalem_quirk);
5846 x86_pmu.pebs_no_tlb = 1;
5847 extra_attr = nhm_format_attr;
5849 pr_cont("Nehalem events, ");
5853 case INTEL_FAM6_ATOM_BONNELL:
5854 case INTEL_FAM6_ATOM_BONNELL_MID:
5855 case INTEL_FAM6_ATOM_SALTWELL:
5856 case INTEL_FAM6_ATOM_SALTWELL_MID:
5857 case INTEL_FAM6_ATOM_SALTWELL_TABLET:
5858 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
5859 sizeof(hw_cache_event_ids));
5861 intel_pmu_lbr_init_atom();
5863 x86_pmu.event_constraints = intel_gen_event_constraints;
5864 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
5865 x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
5866 pr_cont("Atom events, ");
5870 case INTEL_FAM6_ATOM_SILVERMONT:
5871 case INTEL_FAM6_ATOM_SILVERMONT_D:
5872 case INTEL_FAM6_ATOM_SILVERMONT_MID:
5873 case INTEL_FAM6_ATOM_AIRMONT:
5874 case INTEL_FAM6_ATOM_AIRMONT_MID:
5875 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
5876 sizeof(hw_cache_event_ids));
5877 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
5878 sizeof(hw_cache_extra_regs));
5880 intel_pmu_lbr_init_slm();
5882 x86_pmu.event_constraints = intel_slm_event_constraints;
5883 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
5884 x86_pmu.extra_regs = intel_slm_extra_regs;
5885 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5886 td_attr = slm_events_attrs;
5887 extra_attr = slm_format_attr;
5888 pr_cont("Silvermont events, ");
5889 name = "silvermont";
5892 case INTEL_FAM6_ATOM_GOLDMONT:
5893 case INTEL_FAM6_ATOM_GOLDMONT_D:
5894 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
5895 sizeof(hw_cache_event_ids));
5896 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
5897 sizeof(hw_cache_extra_regs));
5899 intel_pmu_lbr_init_skl();
5901 x86_pmu.event_constraints = intel_slm_event_constraints;
5902 x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
5903 x86_pmu.extra_regs = intel_glm_extra_regs;
5905 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
5906 * for precise cycles.
5907 * :pp is identical to :ppp
5909 x86_pmu.pebs_aliases = NULL;
5910 x86_pmu.pebs_prec_dist = true;
5911 x86_pmu.lbr_pt_coexist = true;
5912 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5913 td_attr = glm_events_attrs;
5914 extra_attr = slm_format_attr;
5915 pr_cont("Goldmont events, ");
5919 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
5920 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
5921 sizeof(hw_cache_event_ids));
5922 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
5923 sizeof(hw_cache_extra_regs));
5925 intel_pmu_lbr_init_skl();
5927 x86_pmu.event_constraints = intel_slm_event_constraints;
5928 x86_pmu.extra_regs = intel_glm_extra_regs;
5930 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
5931 * for precise cycles.
5933 x86_pmu.pebs_aliases = NULL;
5934 x86_pmu.pebs_prec_dist = true;
5935 x86_pmu.lbr_pt_coexist = true;
5936 x86_pmu.pebs_capable = ~0ULL;
5937 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5938 x86_pmu.flags |= PMU_FL_PEBS_ALL;
5939 x86_pmu.get_event_constraints = glp_get_event_constraints;
5940 td_attr = glm_events_attrs;
5941 /* Goldmont Plus has 4-wide pipeline */
5942 event_attr_td_total_slots_scale_glm.event_str = "4";
5943 extra_attr = slm_format_attr;
5944 pr_cont("Goldmont plus events, ");
5945 name = "goldmont_plus";
5948 case INTEL_FAM6_ATOM_TREMONT_D:
5949 case INTEL_FAM6_ATOM_TREMONT:
5950 case INTEL_FAM6_ATOM_TREMONT_L:
5951 x86_pmu.late_ack = true;
5952 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
5953 sizeof(hw_cache_event_ids));
5954 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
5955 sizeof(hw_cache_extra_regs));
5956 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
5958 intel_pmu_lbr_init_skl();
5960 x86_pmu.event_constraints = intel_slm_event_constraints;
5961 x86_pmu.extra_regs = intel_tnt_extra_regs;
5963 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
5964 * for precise cycles.
5966 x86_pmu.pebs_aliases = NULL;
5967 x86_pmu.pebs_prec_dist = true;
5968 x86_pmu.lbr_pt_coexist = true;
5969 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5970 x86_pmu.get_event_constraints = tnt_get_event_constraints;
5971 td_attr = tnt_events_attrs;
5972 extra_attr = slm_format_attr;
5973 pr_cont("Tremont events, ");
5977 case INTEL_FAM6_WESTMERE:
5978 case INTEL_FAM6_WESTMERE_EP:
5979 case INTEL_FAM6_WESTMERE_EX:
5980 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
5981 sizeof(hw_cache_event_ids));
5982 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
5983 sizeof(hw_cache_extra_regs));
5985 intel_pmu_lbr_init_nhm();
5987 x86_pmu.event_constraints = intel_westmere_event_constraints;
5988 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
5989 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
5990 x86_pmu.extra_regs = intel_westmere_extra_regs;
5991 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5993 mem_attr = nhm_mem_events_attrs;
5995 /* UOPS_ISSUED.STALLED_CYCLES */
5996 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
5997 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
5998 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
5999 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
6000 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
6002 intel_pmu_pebs_data_source_nhm();
6003 extra_attr = nhm_format_attr;
6004 pr_cont("Westmere events, ");
6008 case INTEL_FAM6_SANDYBRIDGE:
6009 case INTEL_FAM6_SANDYBRIDGE_X:
6010 x86_add_quirk(intel_sandybridge_quirk);
6011 x86_add_quirk(intel_ht_bug);
6012 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
6013 sizeof(hw_cache_event_ids));
6014 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
6015 sizeof(hw_cache_extra_regs));
6017 intel_pmu_lbr_init_snb();
6019 x86_pmu.event_constraints = intel_snb_event_constraints;
6020 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
6021 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
6022 if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
6023 x86_pmu.extra_regs = intel_snbep_extra_regs;
6025 x86_pmu.extra_regs = intel_snb_extra_regs;
6028 /* all extra regs are per-cpu when HT is on */
6029 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6030 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6032 td_attr = snb_events_attrs;
6033 mem_attr = snb_mem_events_attrs;
6035 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
6036 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6037 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6038 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
6039 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
6040 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
6042 extra_attr = nhm_format_attr;
6044 pr_cont("SandyBridge events, ");
6045 name = "sandybridge";
6048 case INTEL_FAM6_IVYBRIDGE:
6049 case INTEL_FAM6_IVYBRIDGE_X:
6050 x86_add_quirk(intel_ht_bug);
6051 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
6052 sizeof(hw_cache_event_ids));
6053 /* dTLB-load-misses on IVB is different than SNB */
6054 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
6056 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
6057 sizeof(hw_cache_extra_regs));
6059 intel_pmu_lbr_init_snb();
6061 x86_pmu.event_constraints = intel_ivb_event_constraints;
6062 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
6063 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
6064 x86_pmu.pebs_prec_dist = true;
6065 if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
6066 x86_pmu.extra_regs = intel_snbep_extra_regs;
6068 x86_pmu.extra_regs = intel_snb_extra_regs;
6069 /* all extra regs are per-cpu when HT is on */
6070 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6071 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6073 td_attr = snb_events_attrs;
6074 mem_attr = snb_mem_events_attrs;
6076 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
6077 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6078 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6080 extra_attr = nhm_format_attr;
6082 pr_cont("IvyBridge events, ");
6087 case INTEL_FAM6_HASWELL:
6088 case INTEL_FAM6_HASWELL_X:
6089 case INTEL_FAM6_HASWELL_L:
6090 case INTEL_FAM6_HASWELL_G:
6091 x86_add_quirk(intel_ht_bug);
6092 x86_add_quirk(intel_pebs_isolation_quirk);
6093 x86_pmu.late_ack = true;
6094 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6095 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6097 intel_pmu_lbr_init_hsw();
6099 x86_pmu.event_constraints = intel_hsw_event_constraints;
6100 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
6101 x86_pmu.extra_regs = intel_snbep_extra_regs;
6102 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
6103 x86_pmu.pebs_prec_dist = true;
6104 /* all extra regs are per-cpu when HT is on */
6105 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6106 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6108 x86_pmu.hw_config = hsw_hw_config;
6109 x86_pmu.get_event_constraints = hsw_get_event_constraints;
6110 x86_pmu.lbr_double_abort = true;
6111 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6112 hsw_format_attr : nhm_format_attr;
6113 td_attr = hsw_events_attrs;
6114 mem_attr = hsw_mem_events_attrs;
6115 tsx_attr = hsw_tsx_events_attrs;
6116 pr_cont("Haswell events, ");
6120 case INTEL_FAM6_BROADWELL:
6121 case INTEL_FAM6_BROADWELL_D:
6122 case INTEL_FAM6_BROADWELL_G:
6123 case INTEL_FAM6_BROADWELL_X:
6124 x86_add_quirk(intel_pebs_isolation_quirk);
6125 x86_pmu.late_ack = true;
6126 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6127 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6129 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
6130 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
6131 BDW_L3_MISS|HSW_SNOOP_DRAM;
6132 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
6134 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
6135 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
6136 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
6137 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
6139 intel_pmu_lbr_init_hsw();
6141 x86_pmu.event_constraints = intel_bdw_event_constraints;
6142 x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
6143 x86_pmu.extra_regs = intel_snbep_extra_regs;
6144 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
6145 x86_pmu.pebs_prec_dist = true;
6146 /* all extra regs are per-cpu when HT is on */
6147 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6148 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6150 x86_pmu.hw_config = hsw_hw_config;
6151 x86_pmu.get_event_constraints = hsw_get_event_constraints;
6152 x86_pmu.limit_period = bdw_limit_period;
6153 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6154 hsw_format_attr : nhm_format_attr;
6155 td_attr = hsw_events_attrs;
6156 mem_attr = hsw_mem_events_attrs;
6157 tsx_attr = hsw_tsx_events_attrs;
6158 pr_cont("Broadwell events, ");
6162 case INTEL_FAM6_XEON_PHI_KNL:
6163 case INTEL_FAM6_XEON_PHI_KNM:
6164 memcpy(hw_cache_event_ids,
6165 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6166 memcpy(hw_cache_extra_regs,
6167 knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6168 intel_pmu_lbr_init_knl();
6170 x86_pmu.event_constraints = intel_slm_event_constraints;
6171 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
6172 x86_pmu.extra_regs = intel_knl_extra_regs;
6174 /* all extra regs are per-cpu when HT is on */
6175 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6176 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6177 extra_attr = slm_format_attr;
6178 pr_cont("Knights Landing/Mill events, ");
6179 name = "knights-landing";
6182 case INTEL_FAM6_SKYLAKE_X:
6185 case INTEL_FAM6_SKYLAKE_L:
6186 case INTEL_FAM6_SKYLAKE:
6187 case INTEL_FAM6_KABYLAKE_L:
6188 case INTEL_FAM6_KABYLAKE:
6189 case INTEL_FAM6_COMETLAKE_L:
6190 case INTEL_FAM6_COMETLAKE:
6191 x86_add_quirk(intel_pebs_isolation_quirk);
6192 x86_pmu.late_ack = true;
6193 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6194 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6195 intel_pmu_lbr_init_skl();
6197 /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
6198 event_attr_td_recovery_bubbles.event_str_noht =
6199 "event=0xd,umask=0x1,cmask=1";
6200 event_attr_td_recovery_bubbles.event_str_ht =
6201 "event=0xd,umask=0x1,cmask=1,any=1";
6203 x86_pmu.event_constraints = intel_skl_event_constraints;
6204 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
6205 x86_pmu.extra_regs = intel_skl_extra_regs;
6206 x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
6207 x86_pmu.pebs_prec_dist = true;
6208 /* all extra regs are per-cpu when HT is on */
6209 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6210 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6212 x86_pmu.hw_config = hsw_hw_config;
6213 x86_pmu.get_event_constraints = hsw_get_event_constraints;
6214 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6215 hsw_format_attr : nhm_format_attr;
6216 extra_skl_attr = skl_format_attr;
6217 td_attr = hsw_events_attrs;
6218 mem_attr = hsw_mem_events_attrs;
6219 tsx_attr = hsw_tsx_events_attrs;
6220 intel_pmu_pebs_data_source_skl(pmem);
6223 * Processors with CPUID.RTM_ALWAYS_ABORT have TSX deprecated by default.
6224 * TSX force abort hooks are not required on these systems. Only deploy
6225 * workaround when microcode has not enabled X86_FEATURE_RTM_ALWAYS_ABORT.
6227 if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) &&
6228 !boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
6229 x86_pmu.flags |= PMU_FL_TFA;
6230 x86_pmu.get_event_constraints = tfa_get_event_constraints;
6231 x86_pmu.enable_all = intel_tfa_pmu_enable_all;
6232 x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
6235 pr_cont("Skylake events, ");
6239 case INTEL_FAM6_ICELAKE_X:
6240 case INTEL_FAM6_ICELAKE_D:
6241 x86_pmu.pebs_ept = 1;
6244 case INTEL_FAM6_ICELAKE_L:
6245 case INTEL_FAM6_ICELAKE:
6246 case INTEL_FAM6_TIGERLAKE_L:
6247 case INTEL_FAM6_TIGERLAKE:
6248 case INTEL_FAM6_ROCKETLAKE:
6249 x86_pmu.late_ack = true;
6250 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6251 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6252 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6253 intel_pmu_lbr_init_skl();
6255 x86_pmu.event_constraints = intel_icl_event_constraints;
6256 x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints;
6257 x86_pmu.extra_regs = intel_icl_extra_regs;
6258 x86_pmu.pebs_aliases = NULL;
6259 x86_pmu.pebs_prec_dist = true;
6260 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6261 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6263 x86_pmu.hw_config = hsw_hw_config;
6264 x86_pmu.get_event_constraints = icl_get_event_constraints;
6265 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6266 hsw_format_attr : nhm_format_attr;
6267 extra_skl_attr = skl_format_attr;
6268 mem_attr = icl_events_attrs;
6269 td_attr = icl_td_events_attrs;
6270 tsx_attr = icl_tsx_events_attrs;
6271 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6272 x86_pmu.lbr_pt_coexist = true;
6273 intel_pmu_pebs_data_source_skl(pmem);
6274 x86_pmu.num_topdown_events = 4;
6275 x86_pmu.update_topdown_event = icl_update_topdown_event;
6276 x86_pmu.set_topdown_event_period = icl_set_topdown_event_period;
6277 pr_cont("Icelake events, ");
6281 case INTEL_FAM6_SAPPHIRERAPIDS_X:
6283 x86_pmu.late_ack = true;
6284 memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6285 memcpy(hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6287 x86_pmu.event_constraints = intel_spr_event_constraints;
6288 x86_pmu.pebs_constraints = intel_spr_pebs_event_constraints;
6289 x86_pmu.extra_regs = intel_spr_extra_regs;
6290 x86_pmu.limit_period = spr_limit_period;
6291 x86_pmu.pebs_aliases = NULL;
6292 x86_pmu.pebs_prec_dist = true;
6293 x86_pmu.pebs_block = true;
6294 x86_pmu.pebs_capable = ~0ULL;
6295 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6296 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6297 x86_pmu.flags |= PMU_FL_PEBS_ALL;
6298 x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6299 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
6301 x86_pmu.hw_config = hsw_hw_config;
6302 x86_pmu.get_event_constraints = spr_get_event_constraints;
6303 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6304 hsw_format_attr : nhm_format_attr;
6305 extra_skl_attr = skl_format_attr;
6306 mem_attr = spr_events_attrs;
6307 td_attr = spr_td_events_attrs;
6308 tsx_attr = spr_tsx_events_attrs;
6309 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6310 x86_pmu.lbr_pt_coexist = true;
6311 intel_pmu_pebs_data_source_skl(pmem);
6312 x86_pmu.num_topdown_events = 8;
6313 x86_pmu.update_topdown_event = icl_update_topdown_event;
6314 x86_pmu.set_topdown_event_period = icl_set_topdown_event_period;
6315 pr_cont("Sapphire Rapids events, ");
6316 name = "sapphire_rapids";
6319 case INTEL_FAM6_ALDERLAKE:
6320 case INTEL_FAM6_ALDERLAKE_L:
6321 case INTEL_FAM6_ALDERLAKE_N:
6322 case INTEL_FAM6_RAPTORLAKE:
6323 case INTEL_FAM6_RAPTORLAKE_P:
6325 * Alder Lake has 2 types of CPU, core and atom.
6327 * Initialize the common PerfMon capabilities here.
6329 x86_pmu.hybrid_pmu = kcalloc(X86_HYBRID_NUM_PMUS,
6330 sizeof(struct x86_hybrid_pmu),
6332 if (!x86_pmu.hybrid_pmu)
6334 static_branch_enable(&perf_is_hybrid);
6335 x86_pmu.num_hybrid_pmus = X86_HYBRID_NUM_PMUS;
6337 x86_pmu.pebs_aliases = NULL;
6338 x86_pmu.pebs_prec_dist = true;
6339 x86_pmu.pebs_block = true;
6340 x86_pmu.pebs_capable = ~0ULL;
6341 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6342 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6343 x86_pmu.flags |= PMU_FL_PEBS_ALL;
6344 x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6345 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
6346 x86_pmu.lbr_pt_coexist = true;
6347 intel_pmu_pebs_data_source_skl(false);
6348 x86_pmu.num_topdown_events = 8;
6349 x86_pmu.update_topdown_event = adl_update_topdown_event;
6350 x86_pmu.set_topdown_event_period = adl_set_topdown_event_period;
6352 x86_pmu.filter_match = intel_pmu_filter_match;
6353 x86_pmu.get_event_constraints = adl_get_event_constraints;
6354 x86_pmu.hw_config = adl_hw_config;
6355 x86_pmu.limit_period = spr_limit_period;
6356 x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type;
6358 * The rtm_abort_event is used to check whether to enable GPRs
6359 * for the RTM abort event. Atom doesn't have the RTM abort
6360 * event. There is no harmful to set it in the common
6361 * x86_pmu.rtm_abort_event.
6363 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6365 td_attr = adl_hybrid_events_attrs;
6366 mem_attr = adl_hybrid_mem_attrs;
6367 tsx_attr = adl_hybrid_tsx_attrs;
6368 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6369 adl_hybrid_extra_attr_rtm : adl_hybrid_extra_attr;
6371 /* Initialize big core specific PerfMon capabilities.*/
6372 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
6373 pmu->name = "cpu_core";
6374 pmu->cpu_type = hybrid_big;
6375 pmu->late_ack = true;
6376 if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
6377 pmu->num_counters = x86_pmu.num_counters + 2;
6378 pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1;
6380 pmu->num_counters = x86_pmu.num_counters;
6381 pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
6385 * Quirk: For some Alder Lake machine, when all E-cores are disabled in
6386 * a BIOS, the leaf 0xA will enumerate all counters of P-cores. However,
6387 * the X86_FEATURE_HYBRID_CPU is still set. The above codes will
6388 * mistakenly add extra counters for P-cores. Correct the number of
6391 if ((pmu->num_counters > 8) || (pmu->num_counters_fixed > 4)) {
6392 pmu->num_counters = x86_pmu.num_counters;
6393 pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
6396 pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
6397 pmu->unconstrained = (struct event_constraint)
6398 __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
6399 0, pmu->num_counters, 0, 0);
6400 pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
6401 pmu->intel_cap.perf_metrics = 1;
6402 pmu->intel_cap.pebs_output_pt_available = 0;
6404 memcpy(pmu->hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids));
6405 memcpy(pmu->hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs));
6406 pmu->event_constraints = intel_spr_event_constraints;
6407 pmu->pebs_constraints = intel_spr_pebs_event_constraints;
6408 pmu->extra_regs = intel_spr_extra_regs;
6410 /* Initialize Atom core specific PerfMon capabilities.*/
6411 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
6412 pmu->name = "cpu_atom";
6413 pmu->cpu_type = hybrid_small;
6414 pmu->mid_ack = true;
6415 pmu->num_counters = x86_pmu.num_counters;
6416 pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
6417 pmu->max_pebs_events = x86_pmu.max_pebs_events;
6418 pmu->unconstrained = (struct event_constraint)
6419 __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
6420 0, pmu->num_counters, 0, 0);
6421 pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
6422 pmu->intel_cap.perf_metrics = 0;
6423 pmu->intel_cap.pebs_output_pt_available = 1;
6425 memcpy(pmu->hw_cache_event_ids, glp_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids));
6426 memcpy(pmu->hw_cache_extra_regs, tnt_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs));
6427 pmu->hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6428 pmu->event_constraints = intel_slm_event_constraints;
6429 pmu->pebs_constraints = intel_grt_pebs_event_constraints;
6430 pmu->extra_regs = intel_grt_extra_regs;
6431 pr_cont("Alderlake Hybrid events, ");
6432 name = "alderlake_hybrid";
6436 switch (x86_pmu.version) {
6438 x86_pmu.event_constraints = intel_v1_event_constraints;
6439 pr_cont("generic architected perfmon v1, ");
6440 name = "generic_arch_v1";
6446 * default constraints for v2 and up
6448 x86_pmu.event_constraints = intel_gen_event_constraints;
6449 pr_cont("generic architected perfmon, ");
6450 name = "generic_arch_v2+";
6454 * The default constraints for v5 and up can support up to
6455 * 16 fixed counters. For the fixed counters 4 and later,
6456 * the pseudo-encoding is applied.
6457 * The constraints may be cut according to the CPUID enumeration
6458 * by inserting the EVENT_CONSTRAINT_END.
6460 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED)
6461 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
6462 intel_v5_gen_event_constraints[x86_pmu.num_counters_fixed].weight = -1;
6463 x86_pmu.event_constraints = intel_v5_gen_event_constraints;
6464 pr_cont("generic architected perfmon, ");
6465 name = "generic_arch_v5+";
6470 snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
6473 group_events_td.attrs = td_attr;
6474 group_events_mem.attrs = mem_attr;
6475 group_events_tsx.attrs = tsx_attr;
6476 group_format_extra.attrs = extra_attr;
6477 group_format_extra_skl.attrs = extra_skl_attr;
6479 x86_pmu.attr_update = attr_update;
6481 hybrid_group_events_td.attrs = td_attr;
6482 hybrid_group_events_mem.attrs = mem_attr;
6483 hybrid_group_events_tsx.attrs = tsx_attr;
6484 hybrid_group_format_extra.attrs = extra_attr;
6486 x86_pmu.attr_update = hybrid_attr_update;
6489 intel_pmu_check_num_counters(&x86_pmu.num_counters,
6490 &x86_pmu.num_counters_fixed,
6491 &x86_pmu.intel_ctrl,
6494 /* AnyThread may be deprecated on arch perfmon v5 or later */
6495 if (x86_pmu.intel_cap.anythread_deprecated)
6496 x86_pmu.format_attrs = intel_arch_formats_attr;
6498 intel_pmu_check_event_constraints(x86_pmu.event_constraints,
6499 x86_pmu.num_counters,
6500 x86_pmu.num_counters_fixed,
6501 x86_pmu.intel_ctrl);
6503 * Access LBR MSR may cause #GP under certain circumstances.
6504 * E.g. KVM doesn't support LBR MSR
6505 * Check all LBT MSR here.
6506 * Disable LBR access if any LBR MSRs can not be accessed.
6508 if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
6510 for (i = 0; i < x86_pmu.lbr_nr; i++) {
6511 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
6512 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
6516 if (x86_pmu.lbr_nr) {
6517 intel_pmu_lbr_init();
6519 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
6521 /* only support branch_stack snapshot for perfmon >= v2 */
6522 if (x86_pmu.disable_all == intel_pmu_disable_all) {
6523 if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) {
6524 static_call_update(perf_snapshot_branch_stack,
6525 intel_pmu_snapshot_arch_branch_stack);
6527 static_call_update(perf_snapshot_branch_stack,
6528 intel_pmu_snapshot_branch_stack);
6533 intel_pmu_check_extra_regs(x86_pmu.extra_regs);
6535 /* Support full width counters using alternative MSR range */
6536 if (x86_pmu.intel_cap.full_width_write) {
6537 x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
6538 x86_pmu.perfctr = MSR_IA32_PMC0;
6539 pr_cont("full-width counters, ");
6542 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics)
6543 x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
6546 intel_pmu_check_hybrid_pmus((u64)fixed_mask);
6548 intel_aux_output_init();
6554 * HT bug: phase 2 init
6555 * Called once we have valid topology information to check
6556 * whether or not HT is enabled
6557 * If HT is off, then we disable the workaround
6559 static __init int fixup_ht_bug(void)
6563 * problem not present on this CPU model, nothing to do
6565 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
6568 if (topology_max_smt_threads() > 1) {
6569 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
6575 hardlockup_detector_perf_stop();
6577 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
6579 x86_pmu.start_scheduling = NULL;
6580 x86_pmu.commit_scheduling = NULL;
6581 x86_pmu.stop_scheduling = NULL;
6583 hardlockup_detector_perf_restart();
6585 for_each_online_cpu(c)
6586 free_excl_cntrs(&per_cpu(cpu_hw_events, c));
6589 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
6592 subsys_initcall(fixup_ht_bug)