1 // SPDX-License-Identifier: GPL-2.0-only
5 * Used to coordinate shared registers between HT threads or
6 * among events on a single PMU.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/stddef.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/nmi.h>
17 #include <linux/kvm_host.h>
19 #include <asm/cpufeature.h>
20 #include <asm/debugreg.h>
21 #include <asm/hardirq.h>
22 #include <asm/intel-family.h>
23 #include <asm/intel_pt.h>
25 #include <asm/cpu_device_id.h>
27 #include "../perf_event.h"
30 * Intel PerfMon, used on Core and later.
32 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
34 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
35 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
36 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
37 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
38 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
39 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
40 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
41 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
44 static struct event_constraint intel_core_event_constraints[] __read_mostly =
46 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
47 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
48 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
49 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
50 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
51 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
55 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
57 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
58 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
59 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
60 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
61 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
62 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
63 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
64 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
65 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
66 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
67 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
68 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
69 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
73 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
75 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
76 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
77 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
78 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
79 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
80 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
81 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
82 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
83 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
84 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
85 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
89 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
91 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
92 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
93 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
97 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
99 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
100 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
101 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
102 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
103 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
104 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
105 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
109 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
111 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
112 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
113 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
114 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
115 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
116 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
117 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
118 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
119 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
120 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
121 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
122 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
125 * When HT is off these events can only run on the bottom 4 counters
126 * When HT is on, they are impacted by the HT bug and require EXCL access
128 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
129 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
130 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
131 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
136 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
138 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
139 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
140 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
141 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
142 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */
143 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
144 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
145 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
146 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
147 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
148 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
149 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
150 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
153 * When HT is off these events can only run on the bottom 4 counters
154 * When HT is on, they are impacted by the HT bug and require EXCL access
156 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
157 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
158 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
159 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
164 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
166 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
167 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
168 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
169 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
173 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
178 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
180 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
181 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
182 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
186 static struct event_constraint intel_v5_gen_event_constraints[] __read_mostly =
188 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
189 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
190 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
191 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
192 FIXED_EVENT_CONSTRAINT(0x0500, 4),
193 FIXED_EVENT_CONSTRAINT(0x0600, 5),
194 FIXED_EVENT_CONSTRAINT(0x0700, 6),
195 FIXED_EVENT_CONSTRAINT(0x0800, 7),
196 FIXED_EVENT_CONSTRAINT(0x0900, 8),
197 FIXED_EVENT_CONSTRAINT(0x0a00, 9),
198 FIXED_EVENT_CONSTRAINT(0x0b00, 10),
199 FIXED_EVENT_CONSTRAINT(0x0c00, 11),
200 FIXED_EVENT_CONSTRAINT(0x0d00, 12),
201 FIXED_EVENT_CONSTRAINT(0x0e00, 13),
202 FIXED_EVENT_CONSTRAINT(0x0f00, 14),
203 FIXED_EVENT_CONSTRAINT(0x1000, 15),
207 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
209 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
210 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
211 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
215 static struct event_constraint intel_grt_event_constraints[] __read_mostly = {
216 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
217 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
218 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
219 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
223 static struct event_constraint intel_skl_event_constraints[] = {
224 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
225 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
226 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
227 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
230 * when HT is off, these can only run on the bottom 4 counters
232 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
233 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
234 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
235 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
236 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
241 static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
242 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
243 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
247 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
248 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
249 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
250 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
251 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
255 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
256 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
257 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
258 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
259 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
263 static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
264 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
265 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
266 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
268 * Note the low 8 bits eventsel code is not a continuous field, containing
269 * some #GPing bits. These are masked out.
271 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
275 static struct event_constraint intel_icl_event_constraints[] = {
276 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
277 FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* old INST_RETIRED.PREC_DIST */
278 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */
279 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
280 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
281 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
282 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
283 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
284 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
285 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
286 INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
287 INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
288 INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */
289 INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf),
290 INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
291 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */
292 INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */
293 INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
294 INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */
295 INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
296 INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
297 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
298 INTEL_EVENT_CONSTRAINT(0xef, 0xf),
299 INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
303 static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
304 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
305 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
306 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
307 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
311 static struct extra_reg intel_glc_extra_regs[] __read_mostly = {
312 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
313 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
314 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
315 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
316 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
317 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
321 static struct event_constraint intel_glc_event_constraints[] = {
322 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
323 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */
324 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
325 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
326 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
327 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
328 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
329 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
330 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
331 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
332 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
333 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
334 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
335 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
337 INTEL_EVENT_CONSTRAINT(0x2e, 0xff),
338 INTEL_EVENT_CONSTRAINT(0x3c, 0xff),
340 * Generally event codes < 0x90 are restricted to counters 0-3.
341 * The 0x2E and 0x3C are exception, which has no restriction.
343 INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
345 INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
346 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
347 INTEL_UEVENT_CONSTRAINT(0x08a3, 0xf),
348 INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
349 INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
350 INTEL_UEVENT_CONSTRAINT(0x02cd, 0x1),
351 INTEL_EVENT_CONSTRAINT(0xce, 0x1),
352 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
354 * Generally event codes >= 0x90 are likely to have no restrictions.
355 * The exception are defined as above.
357 INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0xff),
362 static struct extra_reg intel_rwc_extra_regs[] __read_mostly = {
363 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
364 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
365 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
366 INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
367 INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
368 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
369 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
373 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
374 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
375 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
377 static struct attribute *nhm_mem_events_attrs[] = {
378 EVENT_PTR(mem_ld_nhm),
383 * topdown events for Intel Core CPUs.
385 * The events are all in slots, which is a free slot in a 4 wide
386 * pipeline. Some events are already reported in slots, for cycle
387 * events we multiply by the pipeline width (4).
389 * With Hyper Threading on, topdown metrics are either summed or averaged
390 * between the threads of a core: (count_t0 + count_t1).
392 * For the average case the metric is always scaled to pipeline width,
393 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
396 EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
397 "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */
398 "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */
399 EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
400 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
401 "event=0xe,umask=0x1"); /* uops_issued.any */
402 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
403 "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */
404 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
405 "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */
406 EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
407 "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */
408 "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */
409 EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
412 EVENT_ATTR_STR(slots, slots, "event=0x00,umask=0x4");
413 EVENT_ATTR_STR(topdown-retiring, td_retiring, "event=0x00,umask=0x80");
414 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec, "event=0x00,umask=0x81");
415 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound, "event=0x00,umask=0x82");
416 EVENT_ATTR_STR(topdown-be-bound, td_be_bound, "event=0x00,umask=0x83");
417 EVENT_ATTR_STR(topdown-heavy-ops, td_heavy_ops, "event=0x00,umask=0x84");
418 EVENT_ATTR_STR(topdown-br-mispredict, td_br_mispredict, "event=0x00,umask=0x85");
419 EVENT_ATTR_STR(topdown-fetch-lat, td_fetch_lat, "event=0x00,umask=0x86");
420 EVENT_ATTR_STR(topdown-mem-bound, td_mem_bound, "event=0x00,umask=0x87");
422 static struct attribute *snb_events_attrs[] = {
423 EVENT_PTR(td_slots_issued),
424 EVENT_PTR(td_slots_retired),
425 EVENT_PTR(td_fetch_bubbles),
426 EVENT_PTR(td_total_slots),
427 EVENT_PTR(td_total_slots_scale),
428 EVENT_PTR(td_recovery_bubbles),
429 EVENT_PTR(td_recovery_bubbles_scale),
433 static struct attribute *snb_mem_events_attrs[] = {
434 EVENT_PTR(mem_ld_snb),
435 EVENT_PTR(mem_st_snb),
439 static struct event_constraint intel_hsw_event_constraints[] = {
440 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
441 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
442 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
443 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
444 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
445 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
446 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
447 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
448 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
449 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
450 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
451 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
454 * When HT is off these events can only run on the bottom 4 counters
455 * When HT is on, they are impacted by the HT bug and require EXCL access
457 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
458 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
459 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
460 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
465 static struct event_constraint intel_bdw_event_constraints[] = {
466 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
467 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
468 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
469 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
470 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
472 * when HT is off, these can only run on the bottom 4 counters
474 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
475 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
476 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
477 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
481 static u64 intel_pmu_event_map(int hw_event)
483 return intel_perfmon_event_map[hw_event];
486 static __initconst const u64 glc_hw_cache_event_ids
487 [PERF_COUNT_HW_CACHE_MAX]
488 [PERF_COUNT_HW_CACHE_OP_MAX]
489 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
493 [ C(RESULT_ACCESS) ] = 0x81d0,
494 [ C(RESULT_MISS) ] = 0xe124,
497 [ C(RESULT_ACCESS) ] = 0x82d0,
502 [ C(RESULT_MISS) ] = 0xe424,
505 [ C(RESULT_ACCESS) ] = -1,
506 [ C(RESULT_MISS) ] = -1,
511 [ C(RESULT_ACCESS) ] = 0x12a,
512 [ C(RESULT_MISS) ] = 0x12a,
515 [ C(RESULT_ACCESS) ] = 0x12a,
516 [ C(RESULT_MISS) ] = 0x12a,
521 [ C(RESULT_ACCESS) ] = 0x81d0,
522 [ C(RESULT_MISS) ] = 0xe12,
525 [ C(RESULT_ACCESS) ] = 0x82d0,
526 [ C(RESULT_MISS) ] = 0xe13,
531 [ C(RESULT_ACCESS) ] = -1,
532 [ C(RESULT_MISS) ] = 0xe11,
535 [ C(RESULT_ACCESS) ] = -1,
536 [ C(RESULT_MISS) ] = -1,
538 [ C(OP_PREFETCH) ] = {
539 [ C(RESULT_ACCESS) ] = -1,
540 [ C(RESULT_MISS) ] = -1,
545 [ C(RESULT_ACCESS) ] = 0x4c4,
546 [ C(RESULT_MISS) ] = 0x4c5,
549 [ C(RESULT_ACCESS) ] = -1,
550 [ C(RESULT_MISS) ] = -1,
552 [ C(OP_PREFETCH) ] = {
553 [ C(RESULT_ACCESS) ] = -1,
554 [ C(RESULT_MISS) ] = -1,
559 [ C(RESULT_ACCESS) ] = 0x12a,
560 [ C(RESULT_MISS) ] = 0x12a,
565 static __initconst const u64 glc_hw_cache_extra_regs
566 [PERF_COUNT_HW_CACHE_MAX]
567 [PERF_COUNT_HW_CACHE_OP_MAX]
568 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
572 [ C(RESULT_ACCESS) ] = 0x10001,
573 [ C(RESULT_MISS) ] = 0x3fbfc00001,
576 [ C(RESULT_ACCESS) ] = 0x3f3ffc0002,
577 [ C(RESULT_MISS) ] = 0x3f3fc00002,
582 [ C(RESULT_ACCESS) ] = 0x10c000001,
583 [ C(RESULT_MISS) ] = 0x3fb3000001,
589 * Notes on the events:
590 * - data reads do not include code reads (comparable to earlier tables)
591 * - data counts include speculative execution (except L1 write, dtlb, bpu)
592 * - remote node access includes remote memory, remote cache, remote mmio.
593 * - prefetches are not included in the counts.
594 * - icache miss does not include decoded icache
597 #define SKL_DEMAND_DATA_RD BIT_ULL(0)
598 #define SKL_DEMAND_RFO BIT_ULL(1)
599 #define SKL_ANY_RESPONSE BIT_ULL(16)
600 #define SKL_SUPPLIER_NONE BIT_ULL(17)
601 #define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26)
602 #define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27)
603 #define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28)
604 #define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29)
605 #define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \
606 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
607 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
608 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
609 #define SKL_SPL_HIT BIT_ULL(30)
610 #define SKL_SNOOP_NONE BIT_ULL(31)
611 #define SKL_SNOOP_NOT_NEEDED BIT_ULL(32)
612 #define SKL_SNOOP_MISS BIT_ULL(33)
613 #define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34)
614 #define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35)
615 #define SKL_SNOOP_HITM BIT_ULL(36)
616 #define SKL_SNOOP_NON_DRAM BIT_ULL(37)
617 #define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \
618 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
619 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
620 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
621 #define SKL_DEMAND_READ SKL_DEMAND_DATA_RD
622 #define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \
623 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
624 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
625 SKL_SNOOP_HITM|SKL_SPL_HIT)
626 #define SKL_DEMAND_WRITE SKL_DEMAND_RFO
627 #define SKL_LLC_ACCESS SKL_ANY_RESPONSE
628 #define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
629 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
630 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
632 static __initconst const u64 skl_hw_cache_event_ids
633 [PERF_COUNT_HW_CACHE_MAX]
634 [PERF_COUNT_HW_CACHE_OP_MAX]
635 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
639 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
640 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
643 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
644 [ C(RESULT_MISS) ] = 0x0,
646 [ C(OP_PREFETCH) ] = {
647 [ C(RESULT_ACCESS) ] = 0x0,
648 [ C(RESULT_MISS) ] = 0x0,
653 [ C(RESULT_ACCESS) ] = 0x0,
654 [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */
657 [ C(RESULT_ACCESS) ] = -1,
658 [ C(RESULT_MISS) ] = -1,
660 [ C(OP_PREFETCH) ] = {
661 [ C(RESULT_ACCESS) ] = 0x0,
662 [ C(RESULT_MISS) ] = 0x0,
667 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
668 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
671 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
672 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
674 [ C(OP_PREFETCH) ] = {
675 [ C(RESULT_ACCESS) ] = 0x0,
676 [ C(RESULT_MISS) ] = 0x0,
681 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
682 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
685 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
686 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
688 [ C(OP_PREFETCH) ] = {
689 [ C(RESULT_ACCESS) ] = 0x0,
690 [ C(RESULT_MISS) ] = 0x0,
695 [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */
696 [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */
699 [ C(RESULT_ACCESS) ] = -1,
700 [ C(RESULT_MISS) ] = -1,
702 [ C(OP_PREFETCH) ] = {
703 [ C(RESULT_ACCESS) ] = -1,
704 [ C(RESULT_MISS) ] = -1,
709 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
710 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
713 [ C(RESULT_ACCESS) ] = -1,
714 [ C(RESULT_MISS) ] = -1,
716 [ C(OP_PREFETCH) ] = {
717 [ C(RESULT_ACCESS) ] = -1,
718 [ C(RESULT_MISS) ] = -1,
723 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
724 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
727 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
728 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
730 [ C(OP_PREFETCH) ] = {
731 [ C(RESULT_ACCESS) ] = 0x0,
732 [ C(RESULT_MISS) ] = 0x0,
737 static __initconst const u64 skl_hw_cache_extra_regs
738 [PERF_COUNT_HW_CACHE_MAX]
739 [PERF_COUNT_HW_CACHE_OP_MAX]
740 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
744 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
745 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
746 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
747 SKL_L3_MISS|SKL_ANY_SNOOP|
751 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
752 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
753 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
754 SKL_L3_MISS|SKL_ANY_SNOOP|
757 [ C(OP_PREFETCH) ] = {
758 [ C(RESULT_ACCESS) ] = 0x0,
759 [ C(RESULT_MISS) ] = 0x0,
764 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
765 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
766 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
767 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
770 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
771 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
772 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
773 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
775 [ C(OP_PREFETCH) ] = {
776 [ C(RESULT_ACCESS) ] = 0x0,
777 [ C(RESULT_MISS) ] = 0x0,
782 #define SNB_DMND_DATA_RD (1ULL << 0)
783 #define SNB_DMND_RFO (1ULL << 1)
784 #define SNB_DMND_IFETCH (1ULL << 2)
785 #define SNB_DMND_WB (1ULL << 3)
786 #define SNB_PF_DATA_RD (1ULL << 4)
787 #define SNB_PF_RFO (1ULL << 5)
788 #define SNB_PF_IFETCH (1ULL << 6)
789 #define SNB_LLC_DATA_RD (1ULL << 7)
790 #define SNB_LLC_RFO (1ULL << 8)
791 #define SNB_LLC_IFETCH (1ULL << 9)
792 #define SNB_BUS_LOCKS (1ULL << 10)
793 #define SNB_STRM_ST (1ULL << 11)
794 #define SNB_OTHER (1ULL << 15)
795 #define SNB_RESP_ANY (1ULL << 16)
796 #define SNB_NO_SUPP (1ULL << 17)
797 #define SNB_LLC_HITM (1ULL << 18)
798 #define SNB_LLC_HITE (1ULL << 19)
799 #define SNB_LLC_HITS (1ULL << 20)
800 #define SNB_LLC_HITF (1ULL << 21)
801 #define SNB_LOCAL (1ULL << 22)
802 #define SNB_REMOTE (0xffULL << 23)
803 #define SNB_SNP_NONE (1ULL << 31)
804 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
805 #define SNB_SNP_MISS (1ULL << 33)
806 #define SNB_NO_FWD (1ULL << 34)
807 #define SNB_SNP_FWD (1ULL << 35)
808 #define SNB_HITM (1ULL << 36)
809 #define SNB_NON_DRAM (1ULL << 37)
811 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
812 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
813 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
815 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
816 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
819 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
820 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
822 #define SNB_L3_ACCESS SNB_RESP_ANY
823 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
825 static __initconst const u64 snb_hw_cache_extra_regs
826 [PERF_COUNT_HW_CACHE_MAX]
827 [PERF_COUNT_HW_CACHE_OP_MAX]
828 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
832 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
833 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
836 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
837 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
839 [ C(OP_PREFETCH) ] = {
840 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
841 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
846 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
847 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
850 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
851 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
853 [ C(OP_PREFETCH) ] = {
854 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
855 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
860 static __initconst const u64 snb_hw_cache_event_ids
861 [PERF_COUNT_HW_CACHE_MAX]
862 [PERF_COUNT_HW_CACHE_OP_MAX]
863 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
867 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
868 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
871 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
872 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
874 [ C(OP_PREFETCH) ] = {
875 [ C(RESULT_ACCESS) ] = 0x0,
876 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
881 [ C(RESULT_ACCESS) ] = 0x0,
882 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
885 [ C(RESULT_ACCESS) ] = -1,
886 [ C(RESULT_MISS) ] = -1,
888 [ C(OP_PREFETCH) ] = {
889 [ C(RESULT_ACCESS) ] = 0x0,
890 [ C(RESULT_MISS) ] = 0x0,
895 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
896 [ C(RESULT_ACCESS) ] = 0x01b7,
897 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
898 [ C(RESULT_MISS) ] = 0x01b7,
901 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
902 [ C(RESULT_ACCESS) ] = 0x01b7,
903 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
904 [ C(RESULT_MISS) ] = 0x01b7,
906 [ C(OP_PREFETCH) ] = {
907 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
908 [ C(RESULT_ACCESS) ] = 0x01b7,
909 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
910 [ C(RESULT_MISS) ] = 0x01b7,
915 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
916 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
919 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
920 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
922 [ C(OP_PREFETCH) ] = {
923 [ C(RESULT_ACCESS) ] = 0x0,
924 [ C(RESULT_MISS) ] = 0x0,
929 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
930 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
933 [ C(RESULT_ACCESS) ] = -1,
934 [ C(RESULT_MISS) ] = -1,
936 [ C(OP_PREFETCH) ] = {
937 [ C(RESULT_ACCESS) ] = -1,
938 [ C(RESULT_MISS) ] = -1,
943 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
944 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
947 [ C(RESULT_ACCESS) ] = -1,
948 [ C(RESULT_MISS) ] = -1,
950 [ C(OP_PREFETCH) ] = {
951 [ C(RESULT_ACCESS) ] = -1,
952 [ C(RESULT_MISS) ] = -1,
957 [ C(RESULT_ACCESS) ] = 0x01b7,
958 [ C(RESULT_MISS) ] = 0x01b7,
961 [ C(RESULT_ACCESS) ] = 0x01b7,
962 [ C(RESULT_MISS) ] = 0x01b7,
964 [ C(OP_PREFETCH) ] = {
965 [ C(RESULT_ACCESS) ] = 0x01b7,
966 [ C(RESULT_MISS) ] = 0x01b7,
973 * Notes on the events:
974 * - data reads do not include code reads (comparable to earlier tables)
975 * - data counts include speculative execution (except L1 write, dtlb, bpu)
976 * - remote node access includes remote memory, remote cache, remote mmio.
977 * - prefetches are not included in the counts because they are not
981 #define HSW_DEMAND_DATA_RD BIT_ULL(0)
982 #define HSW_DEMAND_RFO BIT_ULL(1)
983 #define HSW_ANY_RESPONSE BIT_ULL(16)
984 #define HSW_SUPPLIER_NONE BIT_ULL(17)
985 #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
986 #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
987 #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
988 #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
989 #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
990 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
991 HSW_L3_MISS_REMOTE_HOP2P)
992 #define HSW_SNOOP_NONE BIT_ULL(31)
993 #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
994 #define HSW_SNOOP_MISS BIT_ULL(33)
995 #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
996 #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
997 #define HSW_SNOOP_HITM BIT_ULL(36)
998 #define HSW_SNOOP_NON_DRAM BIT_ULL(37)
999 #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
1000 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
1001 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
1002 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
1003 #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
1004 #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
1005 #define HSW_DEMAND_WRITE HSW_DEMAND_RFO
1006 #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
1007 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
1008 #define HSW_LLC_ACCESS HSW_ANY_RESPONSE
1010 #define BDW_L3_MISS_LOCAL BIT(26)
1011 #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
1012 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
1013 HSW_L3_MISS_REMOTE_HOP2P)
1016 static __initconst const u64 hsw_hw_cache_event_ids
1017 [PERF_COUNT_HW_CACHE_MAX]
1018 [PERF_COUNT_HW_CACHE_OP_MAX]
1019 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1023 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1024 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
1027 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1028 [ C(RESULT_MISS) ] = 0x0,
1030 [ C(OP_PREFETCH) ] = {
1031 [ C(RESULT_ACCESS) ] = 0x0,
1032 [ C(RESULT_MISS) ] = 0x0,
1037 [ C(RESULT_ACCESS) ] = 0x0,
1038 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
1041 [ C(RESULT_ACCESS) ] = -1,
1042 [ C(RESULT_MISS) ] = -1,
1044 [ C(OP_PREFETCH) ] = {
1045 [ C(RESULT_ACCESS) ] = 0x0,
1046 [ C(RESULT_MISS) ] = 0x0,
1051 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1052 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1055 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1056 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1058 [ C(OP_PREFETCH) ] = {
1059 [ C(RESULT_ACCESS) ] = 0x0,
1060 [ C(RESULT_MISS) ] = 0x0,
1065 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1066 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
1069 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1070 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
1072 [ C(OP_PREFETCH) ] = {
1073 [ C(RESULT_ACCESS) ] = 0x0,
1074 [ C(RESULT_MISS) ] = 0x0,
1079 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
1080 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
1083 [ C(RESULT_ACCESS) ] = -1,
1084 [ C(RESULT_MISS) ] = -1,
1086 [ C(OP_PREFETCH) ] = {
1087 [ C(RESULT_ACCESS) ] = -1,
1088 [ C(RESULT_MISS) ] = -1,
1093 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
1094 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1097 [ C(RESULT_ACCESS) ] = -1,
1098 [ C(RESULT_MISS) ] = -1,
1100 [ C(OP_PREFETCH) ] = {
1101 [ C(RESULT_ACCESS) ] = -1,
1102 [ C(RESULT_MISS) ] = -1,
1107 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1108 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1111 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1112 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1114 [ C(OP_PREFETCH) ] = {
1115 [ C(RESULT_ACCESS) ] = 0x0,
1116 [ C(RESULT_MISS) ] = 0x0,
1121 static __initconst const u64 hsw_hw_cache_extra_regs
1122 [PERF_COUNT_HW_CACHE_MAX]
1123 [PERF_COUNT_HW_CACHE_OP_MAX]
1124 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1128 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1130 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
1131 HSW_L3_MISS|HSW_ANY_SNOOP,
1134 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1136 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
1137 HSW_L3_MISS|HSW_ANY_SNOOP,
1139 [ C(OP_PREFETCH) ] = {
1140 [ C(RESULT_ACCESS) ] = 0x0,
1141 [ C(RESULT_MISS) ] = 0x0,
1146 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1147 HSW_L3_MISS_LOCAL_DRAM|
1149 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
1154 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1155 HSW_L3_MISS_LOCAL_DRAM|
1157 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
1161 [ C(OP_PREFETCH) ] = {
1162 [ C(RESULT_ACCESS) ] = 0x0,
1163 [ C(RESULT_MISS) ] = 0x0,
1168 static __initconst const u64 westmere_hw_cache_event_ids
1169 [PERF_COUNT_HW_CACHE_MAX]
1170 [PERF_COUNT_HW_CACHE_OP_MAX]
1171 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1175 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1176 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1179 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1180 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1182 [ C(OP_PREFETCH) ] = {
1183 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1184 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1189 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1190 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1193 [ C(RESULT_ACCESS) ] = -1,
1194 [ C(RESULT_MISS) ] = -1,
1196 [ C(OP_PREFETCH) ] = {
1197 [ C(RESULT_ACCESS) ] = 0x0,
1198 [ C(RESULT_MISS) ] = 0x0,
1203 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1204 [ C(RESULT_ACCESS) ] = 0x01b7,
1205 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1206 [ C(RESULT_MISS) ] = 0x01b7,
1209 * Use RFO, not WRITEBACK, because a write miss would typically occur
1213 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1214 [ C(RESULT_ACCESS) ] = 0x01b7,
1215 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1216 [ C(RESULT_MISS) ] = 0x01b7,
1218 [ C(OP_PREFETCH) ] = {
1219 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1220 [ C(RESULT_ACCESS) ] = 0x01b7,
1221 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1222 [ C(RESULT_MISS) ] = 0x01b7,
1227 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1228 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1231 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1232 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1234 [ C(OP_PREFETCH) ] = {
1235 [ C(RESULT_ACCESS) ] = 0x0,
1236 [ C(RESULT_MISS) ] = 0x0,
1241 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1242 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
1245 [ C(RESULT_ACCESS) ] = -1,
1246 [ C(RESULT_MISS) ] = -1,
1248 [ C(OP_PREFETCH) ] = {
1249 [ C(RESULT_ACCESS) ] = -1,
1250 [ C(RESULT_MISS) ] = -1,
1255 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1256 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1259 [ C(RESULT_ACCESS) ] = -1,
1260 [ C(RESULT_MISS) ] = -1,
1262 [ C(OP_PREFETCH) ] = {
1263 [ C(RESULT_ACCESS) ] = -1,
1264 [ C(RESULT_MISS) ] = -1,
1269 [ C(RESULT_ACCESS) ] = 0x01b7,
1270 [ C(RESULT_MISS) ] = 0x01b7,
1273 [ C(RESULT_ACCESS) ] = 0x01b7,
1274 [ C(RESULT_MISS) ] = 0x01b7,
1276 [ C(OP_PREFETCH) ] = {
1277 [ C(RESULT_ACCESS) ] = 0x01b7,
1278 [ C(RESULT_MISS) ] = 0x01b7,
1284 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1285 * See IA32 SDM Vol 3B 30.6.1.3
1288 #define NHM_DMND_DATA_RD (1 << 0)
1289 #define NHM_DMND_RFO (1 << 1)
1290 #define NHM_DMND_IFETCH (1 << 2)
1291 #define NHM_DMND_WB (1 << 3)
1292 #define NHM_PF_DATA_RD (1 << 4)
1293 #define NHM_PF_DATA_RFO (1 << 5)
1294 #define NHM_PF_IFETCH (1 << 6)
1295 #define NHM_OFFCORE_OTHER (1 << 7)
1296 #define NHM_UNCORE_HIT (1 << 8)
1297 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
1298 #define NHM_OTHER_CORE_HITM (1 << 10)
1300 #define NHM_REMOTE_CACHE_FWD (1 << 12)
1301 #define NHM_REMOTE_DRAM (1 << 13)
1302 #define NHM_LOCAL_DRAM (1 << 14)
1303 #define NHM_NON_DRAM (1 << 15)
1305 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1306 #define NHM_REMOTE (NHM_REMOTE_DRAM)
1308 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
1309 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
1310 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1312 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1313 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1314 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
1316 static __initconst const u64 nehalem_hw_cache_extra_regs
1317 [PERF_COUNT_HW_CACHE_MAX]
1318 [PERF_COUNT_HW_CACHE_OP_MAX]
1319 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1323 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1324 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
1327 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1328 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
1330 [ C(OP_PREFETCH) ] = {
1331 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1332 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1337 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1338 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
1341 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1342 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
1344 [ C(OP_PREFETCH) ] = {
1345 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1346 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1351 static __initconst const u64 nehalem_hw_cache_event_ids
1352 [PERF_COUNT_HW_CACHE_MAX]
1353 [PERF_COUNT_HW_CACHE_OP_MAX]
1354 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1358 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1359 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1362 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1363 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1365 [ C(OP_PREFETCH) ] = {
1366 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1367 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1372 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1373 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1376 [ C(RESULT_ACCESS) ] = -1,
1377 [ C(RESULT_MISS) ] = -1,
1379 [ C(OP_PREFETCH) ] = {
1380 [ C(RESULT_ACCESS) ] = 0x0,
1381 [ C(RESULT_MISS) ] = 0x0,
1386 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1387 [ C(RESULT_ACCESS) ] = 0x01b7,
1388 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1389 [ C(RESULT_MISS) ] = 0x01b7,
1392 * Use RFO, not WRITEBACK, because a write miss would typically occur
1396 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1397 [ C(RESULT_ACCESS) ] = 0x01b7,
1398 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1399 [ C(RESULT_MISS) ] = 0x01b7,
1401 [ C(OP_PREFETCH) ] = {
1402 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1403 [ C(RESULT_ACCESS) ] = 0x01b7,
1404 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1405 [ C(RESULT_MISS) ] = 0x01b7,
1410 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1411 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1414 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1415 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1417 [ C(OP_PREFETCH) ] = {
1418 [ C(RESULT_ACCESS) ] = 0x0,
1419 [ C(RESULT_MISS) ] = 0x0,
1424 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1425 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
1428 [ C(RESULT_ACCESS) ] = -1,
1429 [ C(RESULT_MISS) ] = -1,
1431 [ C(OP_PREFETCH) ] = {
1432 [ C(RESULT_ACCESS) ] = -1,
1433 [ C(RESULT_MISS) ] = -1,
1438 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1439 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1442 [ C(RESULT_ACCESS) ] = -1,
1443 [ C(RESULT_MISS) ] = -1,
1445 [ C(OP_PREFETCH) ] = {
1446 [ C(RESULT_ACCESS) ] = -1,
1447 [ C(RESULT_MISS) ] = -1,
1452 [ C(RESULT_ACCESS) ] = 0x01b7,
1453 [ C(RESULT_MISS) ] = 0x01b7,
1456 [ C(RESULT_ACCESS) ] = 0x01b7,
1457 [ C(RESULT_MISS) ] = 0x01b7,
1459 [ C(OP_PREFETCH) ] = {
1460 [ C(RESULT_ACCESS) ] = 0x01b7,
1461 [ C(RESULT_MISS) ] = 0x01b7,
1466 static __initconst const u64 core2_hw_cache_event_ids
1467 [PERF_COUNT_HW_CACHE_MAX]
1468 [PERF_COUNT_HW_CACHE_OP_MAX]
1469 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1473 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
1474 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
1477 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
1478 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
1480 [ C(OP_PREFETCH) ] = {
1481 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
1482 [ C(RESULT_MISS) ] = 0,
1487 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
1488 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
1491 [ C(RESULT_ACCESS) ] = -1,
1492 [ C(RESULT_MISS) ] = -1,
1494 [ C(OP_PREFETCH) ] = {
1495 [ C(RESULT_ACCESS) ] = 0,
1496 [ C(RESULT_MISS) ] = 0,
1501 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1502 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1505 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1506 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1508 [ C(OP_PREFETCH) ] = {
1509 [ C(RESULT_ACCESS) ] = 0,
1510 [ C(RESULT_MISS) ] = 0,
1515 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1516 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
1519 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1520 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
1522 [ C(OP_PREFETCH) ] = {
1523 [ C(RESULT_ACCESS) ] = 0,
1524 [ C(RESULT_MISS) ] = 0,
1529 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1530 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
1533 [ C(RESULT_ACCESS) ] = -1,
1534 [ C(RESULT_MISS) ] = -1,
1536 [ C(OP_PREFETCH) ] = {
1537 [ C(RESULT_ACCESS) ] = -1,
1538 [ C(RESULT_MISS) ] = -1,
1543 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1544 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1547 [ C(RESULT_ACCESS) ] = -1,
1548 [ C(RESULT_MISS) ] = -1,
1550 [ C(OP_PREFETCH) ] = {
1551 [ C(RESULT_ACCESS) ] = -1,
1552 [ C(RESULT_MISS) ] = -1,
1557 static __initconst const u64 atom_hw_cache_event_ids
1558 [PERF_COUNT_HW_CACHE_MAX]
1559 [PERF_COUNT_HW_CACHE_OP_MAX]
1560 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1564 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
1565 [ C(RESULT_MISS) ] = 0,
1568 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
1569 [ C(RESULT_MISS) ] = 0,
1571 [ C(OP_PREFETCH) ] = {
1572 [ C(RESULT_ACCESS) ] = 0x0,
1573 [ C(RESULT_MISS) ] = 0,
1578 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1579 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1582 [ C(RESULT_ACCESS) ] = -1,
1583 [ C(RESULT_MISS) ] = -1,
1585 [ C(OP_PREFETCH) ] = {
1586 [ C(RESULT_ACCESS) ] = 0,
1587 [ C(RESULT_MISS) ] = 0,
1592 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1593 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1596 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1597 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1599 [ C(OP_PREFETCH) ] = {
1600 [ C(RESULT_ACCESS) ] = 0,
1601 [ C(RESULT_MISS) ] = 0,
1606 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1607 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1610 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1611 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1613 [ C(OP_PREFETCH) ] = {
1614 [ C(RESULT_ACCESS) ] = 0,
1615 [ C(RESULT_MISS) ] = 0,
1620 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1621 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1624 [ C(RESULT_ACCESS) ] = -1,
1625 [ C(RESULT_MISS) ] = -1,
1627 [ C(OP_PREFETCH) ] = {
1628 [ C(RESULT_ACCESS) ] = -1,
1629 [ C(RESULT_MISS) ] = -1,
1634 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1635 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1638 [ C(RESULT_ACCESS) ] = -1,
1639 [ C(RESULT_MISS) ] = -1,
1641 [ C(OP_PREFETCH) ] = {
1642 [ C(RESULT_ACCESS) ] = -1,
1643 [ C(RESULT_MISS) ] = -1,
1648 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1649 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1650 /* no_alloc_cycles.not_delivered */
1651 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1652 "event=0xca,umask=0x50");
1653 EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1654 /* uops_retired.all */
1655 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1656 "event=0xc2,umask=0x10");
1657 /* uops_retired.all */
1658 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1659 "event=0xc2,umask=0x10");
1661 static struct attribute *slm_events_attrs[] = {
1662 EVENT_PTR(td_total_slots_slm),
1663 EVENT_PTR(td_total_slots_scale_slm),
1664 EVENT_PTR(td_fetch_bubbles_slm),
1665 EVENT_PTR(td_fetch_bubbles_scale_slm),
1666 EVENT_PTR(td_slots_issued_slm),
1667 EVENT_PTR(td_slots_retired_slm),
1671 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1673 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1674 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1675 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1679 #define SLM_DMND_READ SNB_DMND_DATA_RD
1680 #define SLM_DMND_WRITE SNB_DMND_RFO
1681 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1683 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1684 #define SLM_LLC_ACCESS SNB_RESP_ANY
1685 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1687 static __initconst const u64 slm_hw_cache_extra_regs
1688 [PERF_COUNT_HW_CACHE_MAX]
1689 [PERF_COUNT_HW_CACHE_OP_MAX]
1690 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1694 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1695 [ C(RESULT_MISS) ] = 0,
1698 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1699 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1701 [ C(OP_PREFETCH) ] = {
1702 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1703 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1708 static __initconst const u64 slm_hw_cache_event_ids
1709 [PERF_COUNT_HW_CACHE_MAX]
1710 [PERF_COUNT_HW_CACHE_OP_MAX]
1711 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1715 [ C(RESULT_ACCESS) ] = 0,
1716 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
1719 [ C(RESULT_ACCESS) ] = 0,
1720 [ C(RESULT_MISS) ] = 0,
1722 [ C(OP_PREFETCH) ] = {
1723 [ C(RESULT_ACCESS) ] = 0,
1724 [ C(RESULT_MISS) ] = 0,
1729 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1730 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
1733 [ C(RESULT_ACCESS) ] = -1,
1734 [ C(RESULT_MISS) ] = -1,
1736 [ C(OP_PREFETCH) ] = {
1737 [ C(RESULT_ACCESS) ] = 0,
1738 [ C(RESULT_MISS) ] = 0,
1743 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1744 [ C(RESULT_ACCESS) ] = 0x01b7,
1745 [ C(RESULT_MISS) ] = 0,
1748 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1749 [ C(RESULT_ACCESS) ] = 0x01b7,
1750 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1751 [ C(RESULT_MISS) ] = 0x01b7,
1753 [ C(OP_PREFETCH) ] = {
1754 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1755 [ C(RESULT_ACCESS) ] = 0x01b7,
1756 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1757 [ C(RESULT_MISS) ] = 0x01b7,
1762 [ C(RESULT_ACCESS) ] = 0,
1763 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
1766 [ C(RESULT_ACCESS) ] = 0,
1767 [ C(RESULT_MISS) ] = 0,
1769 [ C(OP_PREFETCH) ] = {
1770 [ C(RESULT_ACCESS) ] = 0,
1771 [ C(RESULT_MISS) ] = 0,
1776 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1777 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1780 [ C(RESULT_ACCESS) ] = -1,
1781 [ C(RESULT_MISS) ] = -1,
1783 [ C(OP_PREFETCH) ] = {
1784 [ C(RESULT_ACCESS) ] = -1,
1785 [ C(RESULT_MISS) ] = -1,
1790 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1791 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1794 [ C(RESULT_ACCESS) ] = -1,
1795 [ C(RESULT_MISS) ] = -1,
1797 [ C(OP_PREFETCH) ] = {
1798 [ C(RESULT_ACCESS) ] = -1,
1799 [ C(RESULT_MISS) ] = -1,
1804 EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1805 EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1806 /* UOPS_NOT_DELIVERED.ANY */
1807 EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1808 /* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1809 EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1810 /* UOPS_RETIRED.ANY */
1811 EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1812 /* UOPS_ISSUED.ANY */
1813 EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1815 static struct attribute *glm_events_attrs[] = {
1816 EVENT_PTR(td_total_slots_glm),
1817 EVENT_PTR(td_total_slots_scale_glm),
1818 EVENT_PTR(td_fetch_bubbles_glm),
1819 EVENT_PTR(td_recovery_bubbles_glm),
1820 EVENT_PTR(td_slots_issued_glm),
1821 EVENT_PTR(td_slots_retired_glm),
1825 static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1826 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1827 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1828 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1832 #define GLM_DEMAND_DATA_RD BIT_ULL(0)
1833 #define GLM_DEMAND_RFO BIT_ULL(1)
1834 #define GLM_ANY_RESPONSE BIT_ULL(16)
1835 #define GLM_SNP_NONE_OR_MISS BIT_ULL(33)
1836 #define GLM_DEMAND_READ GLM_DEMAND_DATA_RD
1837 #define GLM_DEMAND_WRITE GLM_DEMAND_RFO
1838 #define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1839 #define GLM_LLC_ACCESS GLM_ANY_RESPONSE
1840 #define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1841 #define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM)
1843 static __initconst const u64 glm_hw_cache_event_ids
1844 [PERF_COUNT_HW_CACHE_MAX]
1845 [PERF_COUNT_HW_CACHE_OP_MAX]
1846 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1849 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1850 [C(RESULT_MISS)] = 0x0,
1853 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1854 [C(RESULT_MISS)] = 0x0,
1856 [C(OP_PREFETCH)] = {
1857 [C(RESULT_ACCESS)] = 0x0,
1858 [C(RESULT_MISS)] = 0x0,
1863 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1864 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1867 [C(RESULT_ACCESS)] = -1,
1868 [C(RESULT_MISS)] = -1,
1870 [C(OP_PREFETCH)] = {
1871 [C(RESULT_ACCESS)] = 0x0,
1872 [C(RESULT_MISS)] = 0x0,
1877 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1878 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1881 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1882 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1884 [C(OP_PREFETCH)] = {
1885 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1886 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1891 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1892 [C(RESULT_MISS)] = 0x0,
1895 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1896 [C(RESULT_MISS)] = 0x0,
1898 [C(OP_PREFETCH)] = {
1899 [C(RESULT_ACCESS)] = 0x0,
1900 [C(RESULT_MISS)] = 0x0,
1905 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1906 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1909 [C(RESULT_ACCESS)] = -1,
1910 [C(RESULT_MISS)] = -1,
1912 [C(OP_PREFETCH)] = {
1913 [C(RESULT_ACCESS)] = -1,
1914 [C(RESULT_MISS)] = -1,
1919 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1920 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1923 [C(RESULT_ACCESS)] = -1,
1924 [C(RESULT_MISS)] = -1,
1926 [C(OP_PREFETCH)] = {
1927 [C(RESULT_ACCESS)] = -1,
1928 [C(RESULT_MISS)] = -1,
1933 static __initconst const u64 glm_hw_cache_extra_regs
1934 [PERF_COUNT_HW_CACHE_MAX]
1935 [PERF_COUNT_HW_CACHE_OP_MAX]
1936 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1939 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1941 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1945 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1947 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1950 [C(OP_PREFETCH)] = {
1951 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH|
1953 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH|
1959 static __initconst const u64 glp_hw_cache_event_ids
1960 [PERF_COUNT_HW_CACHE_MAX]
1961 [PERF_COUNT_HW_CACHE_OP_MAX]
1962 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1965 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1966 [C(RESULT_MISS)] = 0x0,
1969 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1970 [C(RESULT_MISS)] = 0x0,
1972 [C(OP_PREFETCH)] = {
1973 [C(RESULT_ACCESS)] = 0x0,
1974 [C(RESULT_MISS)] = 0x0,
1979 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1980 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1983 [C(RESULT_ACCESS)] = -1,
1984 [C(RESULT_MISS)] = -1,
1986 [C(OP_PREFETCH)] = {
1987 [C(RESULT_ACCESS)] = 0x0,
1988 [C(RESULT_MISS)] = 0x0,
1993 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1994 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1997 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1998 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
2000 [C(OP_PREFETCH)] = {
2001 [C(RESULT_ACCESS)] = 0x0,
2002 [C(RESULT_MISS)] = 0x0,
2007 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
2008 [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
2011 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
2012 [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
2014 [C(OP_PREFETCH)] = {
2015 [C(RESULT_ACCESS)] = 0x0,
2016 [C(RESULT_MISS)] = 0x0,
2021 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
2022 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
2025 [C(RESULT_ACCESS)] = -1,
2026 [C(RESULT_MISS)] = -1,
2028 [C(OP_PREFETCH)] = {
2029 [C(RESULT_ACCESS)] = -1,
2030 [C(RESULT_MISS)] = -1,
2035 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
2036 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
2039 [C(RESULT_ACCESS)] = -1,
2040 [C(RESULT_MISS)] = -1,
2042 [C(OP_PREFETCH)] = {
2043 [C(RESULT_ACCESS)] = -1,
2044 [C(RESULT_MISS)] = -1,
2049 static __initconst const u64 glp_hw_cache_extra_regs
2050 [PERF_COUNT_HW_CACHE_MAX]
2051 [PERF_COUNT_HW_CACHE_OP_MAX]
2052 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2055 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
2057 [C(RESULT_MISS)] = GLM_DEMAND_READ|
2061 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
2063 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
2066 [C(OP_PREFETCH)] = {
2067 [C(RESULT_ACCESS)] = 0x0,
2068 [C(RESULT_MISS)] = 0x0,
2073 #define TNT_LOCAL_DRAM BIT_ULL(26)
2074 #define TNT_DEMAND_READ GLM_DEMAND_DATA_RD
2075 #define TNT_DEMAND_WRITE GLM_DEMAND_RFO
2076 #define TNT_LLC_ACCESS GLM_ANY_RESPONSE
2077 #define TNT_SNP_ANY (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
2078 SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
2079 #define TNT_LLC_MISS (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
2081 static __initconst const u64 tnt_hw_cache_extra_regs
2082 [PERF_COUNT_HW_CACHE_MAX]
2083 [PERF_COUNT_HW_CACHE_OP_MAX]
2084 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2087 [C(RESULT_ACCESS)] = TNT_DEMAND_READ|
2089 [C(RESULT_MISS)] = TNT_DEMAND_READ|
2093 [C(RESULT_ACCESS)] = TNT_DEMAND_WRITE|
2095 [C(RESULT_MISS)] = TNT_DEMAND_WRITE|
2098 [C(OP_PREFETCH)] = {
2099 [C(RESULT_ACCESS)] = 0x0,
2100 [C(RESULT_MISS)] = 0x0,
2105 EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_tnt, "event=0x71,umask=0x0");
2106 EVENT_ATTR_STR(topdown-retiring, td_retiring_tnt, "event=0xc2,umask=0x0");
2107 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_tnt, "event=0x73,umask=0x6");
2108 EVENT_ATTR_STR(topdown-be-bound, td_be_bound_tnt, "event=0x74,umask=0x0");
2110 static struct attribute *tnt_events_attrs[] = {
2111 EVENT_PTR(td_fe_bound_tnt),
2112 EVENT_PTR(td_retiring_tnt),
2113 EVENT_PTR(td_bad_spec_tnt),
2114 EVENT_PTR(td_be_bound_tnt),
2118 static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
2119 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2120 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0),
2121 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1),
2125 EVENT_ATTR_STR(mem-loads, mem_ld_grt, "event=0xd0,umask=0x5,ldlat=3");
2126 EVENT_ATTR_STR(mem-stores, mem_st_grt, "event=0xd0,umask=0x6");
2128 static struct attribute *grt_mem_attrs[] = {
2129 EVENT_PTR(mem_ld_grt),
2130 EVENT_PTR(mem_st_grt),
2134 static struct extra_reg intel_grt_extra_regs[] __read_mostly = {
2135 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2136 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
2137 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
2138 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2142 EVENT_ATTR_STR(topdown-retiring, td_retiring_cmt, "event=0x72,umask=0x0");
2143 EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_cmt, "event=0x73,umask=0x0");
2145 static struct attribute *cmt_events_attrs[] = {
2146 EVENT_PTR(td_fe_bound_tnt),
2147 EVENT_PTR(td_retiring_cmt),
2148 EVENT_PTR(td_bad_spec_cmt),
2149 EVENT_PTR(td_be_bound_tnt),
2153 static struct extra_reg intel_cmt_extra_regs[] __read_mostly = {
2154 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2155 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff3ffffffffffull, RSP_0),
2156 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff3ffffffffffull, RSP_1),
2157 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2158 INTEL_UEVENT_EXTRA_REG(0x0127, MSR_SNOOP_RSP_0, 0xffffffffffffffffull, SNOOP_0),
2159 INTEL_UEVENT_EXTRA_REG(0x0227, MSR_SNOOP_RSP_1, 0xffffffffffffffffull, SNOOP_1),
2163 #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
2164 #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
2165 #define KNL_MCDRAM_LOCAL BIT_ULL(21)
2166 #define KNL_MCDRAM_FAR BIT_ULL(22)
2167 #define KNL_DDR_LOCAL BIT_ULL(23)
2168 #define KNL_DDR_FAR BIT_ULL(24)
2169 #define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
2170 KNL_DDR_LOCAL | KNL_DDR_FAR)
2171 #define KNL_L2_READ SLM_DMND_READ
2172 #define KNL_L2_WRITE SLM_DMND_WRITE
2173 #define KNL_L2_PREFETCH SLM_DMND_PREFETCH
2174 #define KNL_L2_ACCESS SLM_LLC_ACCESS
2175 #define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
2176 KNL_DRAM_ANY | SNB_SNP_ANY | \
2179 static __initconst const u64 knl_hw_cache_extra_regs
2180 [PERF_COUNT_HW_CACHE_MAX]
2181 [PERF_COUNT_HW_CACHE_OP_MAX]
2182 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2185 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
2186 [C(RESULT_MISS)] = 0,
2189 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
2190 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS,
2192 [C(OP_PREFETCH)] = {
2193 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
2194 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS,
2200 * Used from PMIs where the LBRs are already disabled.
2202 * This function could be called consecutively. It is required to remain in
2203 * disabled state if called consecutively.
2205 * During consecutive calls, the same disable value will be written to related
2206 * registers, so the PMU state remains unchanged.
2208 * intel_bts events don't coexist with intel PMU's BTS events because of
2209 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
2210 * disabled around intel PMU's event batching etc, only inside the PMI handler.
2212 * Avoid PEBS_ENABLE MSR access in PMIs.
2213 * The GLOBAL_CTRL has been disabled. All the counters do not count anymore.
2214 * It doesn't matter if the PEBS is enabled or not.
2215 * Usually, the PEBS status are not changed in PMIs. It's unnecessary to
2216 * access PEBS_ENABLE MSR in disable_all()/enable_all().
2217 * However, there are some cases which may change PEBS status, e.g. PMI
2218 * throttle. The PEBS_ENABLE should be updated where the status changes.
2220 static __always_inline void __intel_pmu_disable_all(bool bts)
2222 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2224 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2226 if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
2227 intel_pmu_disable_bts();
2230 static __always_inline void intel_pmu_disable_all(void)
2232 __intel_pmu_disable_all(true);
2233 intel_pmu_pebs_disable_all();
2234 intel_pmu_lbr_disable_all();
2237 static void __intel_pmu_enable_all(int added, bool pmi)
2239 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2240 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2242 intel_pmu_lbr_enable_all(pmi);
2244 if (cpuc->fixed_ctrl_val != cpuc->active_fixed_ctrl_val) {
2245 wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, cpuc->fixed_ctrl_val);
2246 cpuc->active_fixed_ctrl_val = cpuc->fixed_ctrl_val;
2249 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
2250 intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
2252 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2253 struct perf_event *event =
2254 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
2256 if (WARN_ON_ONCE(!event))
2259 intel_pmu_enable_bts(event->hw.config);
2263 static void intel_pmu_enable_all(int added)
2265 intel_pmu_pebs_enable_all();
2266 __intel_pmu_enable_all(added, false);
2270 __intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries,
2271 unsigned int cnt, unsigned long flags)
2273 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2275 intel_pmu_lbr_read();
2276 cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr);
2278 memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt);
2279 intel_pmu_enable_all(0);
2280 local_irq_restore(flags);
2285 intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2287 unsigned long flags;
2289 /* must not have branches... */
2290 local_irq_save(flags);
2291 __intel_pmu_disable_all(false); /* we don't care about BTS */
2292 __intel_pmu_lbr_disable();
2293 /* ... until here */
2294 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2298 intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2300 unsigned long flags;
2302 /* must not have branches... */
2303 local_irq_save(flags);
2304 __intel_pmu_disable_all(false); /* we don't care about BTS */
2305 __intel_pmu_arch_lbr_disable();
2306 /* ... until here */
2307 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2312 * Intel Errata AAK100 (model 26)
2313 * Intel Errata AAP53 (model 30)
2314 * Intel Errata BD53 (model 44)
2316 * The official story:
2317 * These chips need to be 'reset' when adding counters by programming the
2318 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
2319 * in sequence on the same PMC or on different PMCs.
2321 * In practice it appears some of these events do in fact count, and
2322 * we need to program all 4 events.
2324 static void intel_pmu_nhm_workaround(void)
2326 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2327 static const unsigned long nhm_magic[4] = {
2333 struct perf_event *event;
2337 * The Errata requires below steps:
2338 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
2339 * 2) Configure 4 PERFEVTSELx with the magic events and clear
2340 * the corresponding PMCx;
2341 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
2342 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
2343 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
2347 * The real steps we choose are a little different from above.
2348 * A) To reduce MSR operations, we don't run step 1) as they
2349 * are already cleared before this function is called;
2350 * B) Call x86_perf_event_update to save PMCx before configuring
2351 * PERFEVTSELx with magic number;
2352 * C) With step 5), we do clear only when the PERFEVTSELx is
2353 * not used currently.
2354 * D) Call x86_perf_event_set_period to restore PMCx;
2357 /* We always operate 4 pairs of PERF Counters */
2358 for (i = 0; i < 4; i++) {
2359 event = cpuc->events[i];
2361 static_call(x86_pmu_update)(event);
2364 for (i = 0; i < 4; i++) {
2365 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
2366 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
2369 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
2370 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
2372 for (i = 0; i < 4; i++) {
2373 event = cpuc->events[i];
2376 static_call(x86_pmu_set_period)(event);
2377 __x86_pmu_enable_event(&event->hw,
2378 ARCH_PERFMON_EVENTSEL_ENABLE);
2380 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
2384 static void intel_pmu_nhm_enable_all(int added)
2387 intel_pmu_nhm_workaround();
2388 intel_pmu_enable_all(added);
2391 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2393 u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2395 if (cpuc->tfa_shadow != val) {
2396 cpuc->tfa_shadow = val;
2397 wrmsrl(MSR_TSX_FORCE_ABORT, val);
2401 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2404 * We're going to use PMC3, make sure TFA is set before we touch it.
2407 intel_set_tfa(cpuc, true);
2410 static void intel_tfa_pmu_enable_all(int added)
2412 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2415 * If we find PMC3 is no longer used when we enable the PMU, we can
2418 if (!test_bit(3, cpuc->active_mask))
2419 intel_set_tfa(cpuc, false);
2421 intel_pmu_enable_all(added);
2424 static inline u64 intel_pmu_get_status(void)
2428 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
2433 static inline void intel_pmu_ack_status(u64 ack)
2435 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2438 static inline bool event_is_checkpointed(struct perf_event *event)
2440 return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2443 static inline void intel_set_masks(struct perf_event *event, int idx)
2445 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2447 if (event->attr.exclude_host)
2448 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2449 if (event->attr.exclude_guest)
2450 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2451 if (event_is_checkpointed(event))
2452 __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2455 static inline void intel_clear_masks(struct perf_event *event, int idx)
2457 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2459 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2460 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2461 __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2464 static void intel_pmu_disable_fixed(struct perf_event *event)
2466 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2467 struct hw_perf_event *hwc = &event->hw;
2471 if (is_topdown_idx(idx)) {
2472 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2475 * When there are other active TopDown events,
2476 * don't disable the fixed counter 3.
2478 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2480 idx = INTEL_PMC_IDX_FIXED_SLOTS;
2483 intel_clear_masks(event, idx);
2485 mask = intel_fixed_bits_by_idx(idx - INTEL_PMC_IDX_FIXED, INTEL_FIXED_BITS_MASK);
2486 cpuc->fixed_ctrl_val &= ~mask;
2489 static void intel_pmu_disable_event(struct perf_event *event)
2491 struct hw_perf_event *hwc = &event->hw;
2495 case 0 ... INTEL_PMC_IDX_FIXED - 1:
2496 intel_clear_masks(event, idx);
2497 x86_pmu_disable_event(event);
2499 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2500 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2501 intel_pmu_disable_fixed(event);
2503 case INTEL_PMC_IDX_FIXED_BTS:
2504 intel_pmu_disable_bts();
2505 intel_pmu_drain_bts_buffer();
2507 case INTEL_PMC_IDX_FIXED_VLBR:
2508 intel_clear_masks(event, idx);
2511 intel_clear_masks(event, idx);
2512 pr_warn("Failed to disable the event with invalid index %d\n",
2518 * Needs to be called after x86_pmu_disable_event,
2519 * so we don't trigger the event without PEBS bit set.
2521 if (unlikely(event->attr.precise_ip))
2522 intel_pmu_pebs_disable(event);
2525 static void intel_pmu_assign_event(struct perf_event *event, int idx)
2527 if (is_pebs_pt(event))
2528 perf_report_aux_output_id(event, idx);
2531 static __always_inline bool intel_pmu_needs_branch_stack(struct perf_event *event)
2533 return event->hw.flags & PERF_X86_EVENT_NEEDS_BRANCH_STACK;
2536 static void intel_pmu_del_event(struct perf_event *event)
2538 if (intel_pmu_needs_branch_stack(event))
2539 intel_pmu_lbr_del(event);
2540 if (event->attr.precise_ip)
2541 intel_pmu_pebs_del(event);
2544 static int icl_set_topdown_event_period(struct perf_event *event)
2546 struct hw_perf_event *hwc = &event->hw;
2547 s64 left = local64_read(&hwc->period_left);
2550 * The values in PERF_METRICS MSR are derived from fixed counter 3.
2551 * Software should start both registers, PERF_METRICS and fixed
2552 * counter 3, from zero.
2553 * Clear PERF_METRICS and Fixed counter 3 in initialization.
2554 * After that, both MSRs will be cleared for each read.
2555 * Don't need to clear them again.
2557 if (left == x86_pmu.max_period) {
2558 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2559 wrmsrl(MSR_PERF_METRICS, 0);
2560 hwc->saved_slots = 0;
2561 hwc->saved_metric = 0;
2564 if ((hwc->saved_slots) && is_slots_event(event)) {
2565 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots);
2566 wrmsrl(MSR_PERF_METRICS, hwc->saved_metric);
2569 perf_event_update_userpage(event);
2574 DEFINE_STATIC_CALL(intel_pmu_set_topdown_event_period, x86_perf_event_set_period);
2576 static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx)
2581 * The metric is reported as an 8bit integer fraction
2582 * summing up to 0xff.
2583 * slots-in-metric = (Metric / 0xff) * slots
2585 val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff;
2586 return mul_u64_u32_div(slots, val, 0xff);
2589 static u64 icl_get_topdown_value(struct perf_event *event,
2590 u64 slots, u64 metrics)
2592 int idx = event->hw.idx;
2595 if (is_metric_idx(idx))
2596 delta = icl_get_metrics_event_value(metrics, slots, idx);
2603 static void __icl_update_topdown_event(struct perf_event *event,
2604 u64 slots, u64 metrics,
2605 u64 last_slots, u64 last_metrics)
2607 u64 delta, last = 0;
2609 delta = icl_get_topdown_value(event, slots, metrics);
2611 last = icl_get_topdown_value(event, last_slots, last_metrics);
2614 * The 8bit integer fraction of metric may be not accurate,
2615 * especially when the changes is very small.
2616 * For example, if only a few bad_spec happens, the fraction
2617 * may be reduced from 1 to 0. If so, the bad_spec event value
2618 * will be 0 which is definitely less than the last value.
2619 * Avoid update event->count for this case.
2623 local64_add(delta, &event->count);
2627 static void update_saved_topdown_regs(struct perf_event *event, u64 slots,
2628 u64 metrics, int metric_end)
2630 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2631 struct perf_event *other;
2634 event->hw.saved_slots = slots;
2635 event->hw.saved_metric = metrics;
2637 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2638 if (!is_topdown_idx(idx))
2640 other = cpuc->events[idx];
2641 other->hw.saved_slots = slots;
2642 other->hw.saved_metric = metrics;
2647 * Update all active Topdown events.
2649 * The PERF_METRICS and Fixed counter 3 are read separately. The values may be
2650 * modify by a NMI. PMU has to be disabled before calling this function.
2653 static u64 intel_update_topdown_event(struct perf_event *event, int metric_end)
2655 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2656 struct perf_event *other;
2661 /* read Fixed counter 3 */
2662 rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots);
2666 /* read PERF_METRICS */
2667 rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
2669 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2670 if (!is_topdown_idx(idx))
2672 other = cpuc->events[idx];
2673 __icl_update_topdown_event(other, slots, metrics,
2674 event ? event->hw.saved_slots : 0,
2675 event ? event->hw.saved_metric : 0);
2679 * Check and update this event, which may have been cleared
2680 * in active_mask e.g. x86_pmu_stop()
2682 if (event && !test_bit(event->hw.idx, cpuc->active_mask)) {
2683 __icl_update_topdown_event(event, slots, metrics,
2684 event->hw.saved_slots,
2685 event->hw.saved_metric);
2688 * In x86_pmu_stop(), the event is cleared in active_mask first,
2689 * then drain the delta, which indicates context switch for
2691 * Save metric and slots for context switch.
2692 * Don't need to reset the PERF_METRICS and Fixed counter 3.
2693 * Because the values will be restored in next schedule in.
2695 update_saved_topdown_regs(event, slots, metrics, metric_end);
2700 /* The fixed counter 3 has to be written before the PERF_METRICS. */
2701 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2702 wrmsrl(MSR_PERF_METRICS, 0);
2704 update_saved_topdown_regs(event, 0, 0, metric_end);
2710 static u64 icl_update_topdown_event(struct perf_event *event)
2712 return intel_update_topdown_event(event, INTEL_PMC_IDX_METRIC_BASE +
2713 x86_pmu.num_topdown_events - 1);
2716 DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update);
2718 static void intel_pmu_read_topdown_event(struct perf_event *event)
2720 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2722 /* Only need to call update_topdown_event() once for group read. */
2723 if ((cpuc->txn_flags & PERF_PMU_TXN_READ) &&
2724 !is_slots_event(event))
2727 perf_pmu_disable(event->pmu);
2728 static_call(intel_pmu_update_topdown_event)(event);
2729 perf_pmu_enable(event->pmu);
2732 static void intel_pmu_read_event(struct perf_event *event)
2734 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2735 intel_pmu_auto_reload_read(event);
2736 else if (is_topdown_count(event))
2737 intel_pmu_read_topdown_event(event);
2739 x86_perf_event_update(event);
2742 static void intel_pmu_enable_fixed(struct perf_event *event)
2744 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2745 struct hw_perf_event *hwc = &event->hw;
2749 if (is_topdown_idx(idx)) {
2750 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2752 * When there are other active TopDown events,
2753 * don't enable the fixed counter 3 again.
2755 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2758 idx = INTEL_PMC_IDX_FIXED_SLOTS;
2761 intel_set_masks(event, idx);
2764 * Enable IRQ generation (0x8), if not PEBS,
2765 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2768 if (!event->attr.precise_ip)
2769 bits |= INTEL_FIXED_0_ENABLE_PMI;
2770 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2771 bits |= INTEL_FIXED_0_USER;
2772 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2773 bits |= INTEL_FIXED_0_KERNEL;
2776 * ANY bit is supported in v3 and up
2778 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2779 bits |= INTEL_FIXED_0_ANYTHREAD;
2781 idx -= INTEL_PMC_IDX_FIXED;
2782 bits = intel_fixed_bits_by_idx(idx, bits);
2783 mask = intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK);
2785 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
2786 bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
2787 mask |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
2790 cpuc->fixed_ctrl_val &= ~mask;
2791 cpuc->fixed_ctrl_val |= bits;
2794 static void intel_pmu_enable_event(struct perf_event *event)
2796 u64 enable_mask = ARCH_PERFMON_EVENTSEL_ENABLE;
2797 struct hw_perf_event *hwc = &event->hw;
2800 if (unlikely(event->attr.precise_ip))
2801 intel_pmu_pebs_enable(event);
2804 case 0 ... INTEL_PMC_IDX_FIXED - 1:
2805 if (branch_sample_counters(event))
2806 enable_mask |= ARCH_PERFMON_EVENTSEL_BR_CNTR;
2807 intel_set_masks(event, idx);
2808 __x86_pmu_enable_event(hwc, enable_mask);
2810 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2811 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2812 intel_pmu_enable_fixed(event);
2814 case INTEL_PMC_IDX_FIXED_BTS:
2815 if (!__this_cpu_read(cpu_hw_events.enabled))
2817 intel_pmu_enable_bts(hwc->config);
2819 case INTEL_PMC_IDX_FIXED_VLBR:
2820 intel_set_masks(event, idx);
2823 pr_warn("Failed to enable the event with invalid index %d\n",
2828 static void intel_pmu_add_event(struct perf_event *event)
2830 if (event->attr.precise_ip)
2831 intel_pmu_pebs_add(event);
2832 if (intel_pmu_needs_branch_stack(event))
2833 intel_pmu_lbr_add(event);
2837 * Save and restart an expired event. Called by NMI contexts,
2838 * so it has to be careful about preempting normal event ops:
2840 int intel_pmu_save_and_restart(struct perf_event *event)
2842 static_call(x86_pmu_update)(event);
2844 * For a checkpointed counter always reset back to 0. This
2845 * avoids a situation where the counter overflows, aborts the
2846 * transaction and is then set back to shortly before the
2847 * overflow, and overflows and aborts again.
2849 if (unlikely(event_is_checkpointed(event))) {
2850 /* No race with NMIs because the counter should not be armed */
2851 wrmsrl(event->hw.event_base, 0);
2852 local64_set(&event->hw.prev_count, 0);
2854 return static_call(x86_pmu_set_period)(event);
2857 static int intel_pmu_set_period(struct perf_event *event)
2859 if (unlikely(is_topdown_count(event)))
2860 return static_call(intel_pmu_set_topdown_event_period)(event);
2862 return x86_perf_event_set_period(event);
2865 static u64 intel_pmu_update(struct perf_event *event)
2867 if (unlikely(is_topdown_count(event)))
2868 return static_call(intel_pmu_update_topdown_event)(event);
2870 return x86_perf_event_update(event);
2873 static void intel_pmu_reset(void)
2875 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2876 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2877 int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
2878 int num_counters = hybrid(cpuc->pmu, num_counters);
2879 unsigned long flags;
2885 local_irq_save(flags);
2887 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2889 for (idx = 0; idx < num_counters; idx++) {
2890 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
2891 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
2893 for (idx = 0; idx < num_counters_fixed; idx++) {
2894 if (fixed_counter_disabled(idx, cpuc->pmu))
2896 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2900 ds->bts_index = ds->bts_buffer_base;
2902 /* Ack all overflows and disable fixed counters */
2903 if (x86_pmu.version >= 2) {
2904 intel_pmu_ack_status(intel_pmu_get_status());
2905 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2908 /* Reset LBRs and LBR freezing */
2909 if (x86_pmu.lbr_nr) {
2910 update_debugctlmsr(get_debugctlmsr() &
2911 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
2914 local_irq_restore(flags);
2918 * We may be running with guest PEBS events created by KVM, and the
2919 * PEBS records are logged into the guest's DS and invisible to host.
2921 * In the case of guest PEBS overflow, we only trigger a fake event
2922 * to emulate the PEBS overflow PMI for guest PEBS counters in KVM.
2923 * The guest will then vm-entry and check the guest DS area to read
2924 * the guest PEBS records.
2926 * The contents and other behavior of the guest event do not matter.
2928 static void x86_pmu_handle_guest_pebs(struct pt_regs *regs,
2929 struct perf_sample_data *data)
2931 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2932 u64 guest_pebs_idxs = cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask;
2933 struct perf_event *event = NULL;
2936 if (!unlikely(perf_guest_state()))
2939 if (!x86_pmu.pebs_ept || !x86_pmu.pebs_active ||
2943 for_each_set_bit(bit, (unsigned long *)&guest_pebs_idxs,
2944 INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed) {
2945 event = cpuc->events[bit];
2946 if (!event->attr.precise_ip)
2949 perf_sample_data_init(data, 0, event->hw.last_period);
2950 if (perf_event_overflow(event, data, regs))
2951 x86_pmu_stop(event, 0);
2953 /* Inject one fake event is enough. */
2958 static int handle_pmi_common(struct pt_regs *regs, u64 status)
2960 struct perf_sample_data data;
2961 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2964 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2966 inc_irq_stat(apic_perf_irqs);
2969 * Ignore a range of extra bits in status that do not indicate
2970 * overflow by themselves.
2972 status &= ~(GLOBAL_STATUS_COND_CHG |
2973 GLOBAL_STATUS_ASIF |
2974 GLOBAL_STATUS_LBRS_FROZEN);
2978 * In case multiple PEBS events are sampled at the same time,
2979 * it is possible to have GLOBAL_STATUS bit 62 set indicating
2980 * PEBS buffer overflow and also seeing at most 3 PEBS counters
2981 * having their bits set in the status register. This is a sign
2982 * that there was at least one PEBS record pending at the time
2983 * of the PMU interrupt. PEBS counters must only be processed
2984 * via the drain_pebs() calls and not via the regular sample
2985 * processing loop coming after that the function, otherwise
2986 * phony regular samples may be generated in the sampling buffer
2987 * not marked with the EXACT tag. Another possibility is to have
2988 * one PEBS event and at least one non-PEBS event which overflows
2989 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
2990 * not be set, yet the overflow status bit for the PEBS counter will
2993 * To avoid this problem, we systematically ignore the PEBS-enabled
2994 * counters from the GLOBAL_STATUS mask and we always process PEBS
2995 * events via drain_pebs().
2997 status &= ~(cpuc->pebs_enabled & x86_pmu.pebs_capable);
3000 * PEBS overflow sets bit 62 in the global status register
3002 if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) {
3003 u64 pebs_enabled = cpuc->pebs_enabled;
3006 x86_pmu_handle_guest_pebs(regs, &data);
3007 x86_pmu.drain_pebs(regs, &data);
3008 status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
3011 * PMI throttle may be triggered, which stops the PEBS event.
3012 * Although cpuc->pebs_enabled is updated accordingly, the
3013 * MSR_IA32_PEBS_ENABLE is not updated. Because the
3014 * cpuc->enabled has been forced to 0 in PMI.
3015 * Update the MSR if pebs_enabled is changed.
3017 if (pebs_enabled != cpuc->pebs_enabled)
3018 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
3024 if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
3026 if (!perf_guest_handle_intel_pt_intr())
3027 intel_pt_interrupt();
3031 * Intel Perf metrics
3033 if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) {
3035 static_call(intel_pmu_update_topdown_event)(NULL);
3039 * Checkpointed counters can lead to 'spurious' PMIs because the
3040 * rollback caused by the PMI will have cleared the overflow status
3041 * bit. Therefore always force probe these counters.
3043 status |= cpuc->intel_cp_status;
3045 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
3046 struct perf_event *event = cpuc->events[bit];
3050 if (!test_bit(bit, cpuc->active_mask))
3053 if (!intel_pmu_save_and_restart(event))
3056 perf_sample_data_init(&data, 0, event->hw.last_period);
3058 if (has_branch_stack(event))
3059 intel_pmu_lbr_save_brstack(&data, cpuc, event);
3061 if (perf_event_overflow(event, &data, regs))
3062 x86_pmu_stop(event, 0);
3069 * This handler is triggered by the local APIC, so the APIC IRQ handling
3072 static int intel_pmu_handle_irq(struct pt_regs *regs)
3074 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3075 bool late_ack = hybrid_bit(cpuc->pmu, late_ack);
3076 bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack);
3083 * Save the PMU state.
3084 * It needs to be restored when leaving the handler.
3086 pmu_enabled = cpuc->enabled;
3088 * In general, the early ACK is only applied for old platforms.
3089 * For the big core starts from Haswell, the late ACK should be
3091 * For the small core after Tremont, we have to do the ACK right
3092 * before re-enabling counters, which is in the middle of the
3095 if (!late_ack && !mid_ack)
3096 apic_write(APIC_LVTPC, APIC_DM_NMI);
3097 intel_bts_disable_local();
3099 __intel_pmu_disable_all(true);
3100 handled = intel_pmu_drain_bts_buffer();
3101 handled += intel_bts_interrupt();
3102 status = intel_pmu_get_status();
3108 intel_pmu_lbr_read();
3109 intel_pmu_ack_status(status);
3110 if (++loops > 100) {
3114 WARN(1, "perfevents: irq loop stuck!\n");
3115 perf_event_print_debug();
3122 handled += handle_pmi_common(regs, status);
3125 * Repeat if there is more work to be done:
3127 status = intel_pmu_get_status();
3133 apic_write(APIC_LVTPC, APIC_DM_NMI);
3134 /* Only restore PMU state when it's active. See x86_pmu_disable(). */
3135 cpuc->enabled = pmu_enabled;
3137 __intel_pmu_enable_all(0, true);
3138 intel_bts_enable_local();
3141 * Only unmask the NMI after the overflow counters
3142 * have been reset. This avoids spurious NMIs on
3146 apic_write(APIC_LVTPC, APIC_DM_NMI);
3150 static struct event_constraint *
3151 intel_bts_constraints(struct perf_event *event)
3153 if (unlikely(intel_pmu_has_bts(event)))
3154 return &bts_constraint;
3160 * Note: matches a fake event, like Fixed2.
3162 static struct event_constraint *
3163 intel_vlbr_constraints(struct perf_event *event)
3165 struct event_constraint *c = &vlbr_constraint;
3167 if (unlikely(constraint_match(c, event->hw.config))) {
3168 event->hw.flags |= c->flags;
3175 static int intel_alt_er(struct cpu_hw_events *cpuc,
3176 int idx, u64 config)
3178 struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs);
3181 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
3184 if (idx == EXTRA_REG_RSP_0)
3185 alt_idx = EXTRA_REG_RSP_1;
3187 if (idx == EXTRA_REG_RSP_1)
3188 alt_idx = EXTRA_REG_RSP_0;
3190 if (config & ~extra_regs[alt_idx].valid_mask)
3196 static void intel_fixup_er(struct perf_event *event, int idx)
3198 struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
3199 event->hw.extra_reg.idx = idx;
3201 if (idx == EXTRA_REG_RSP_0) {
3202 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3203 event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event;
3204 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
3205 } else if (idx == EXTRA_REG_RSP_1) {
3206 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3207 event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event;
3208 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
3213 * manage allocation of shared extra msr for certain events
3216 * per-cpu: to be shared between the various events on a single PMU
3217 * per-core: per-cpu + shared by HT threads
3219 static struct event_constraint *
3220 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
3221 struct perf_event *event,
3222 struct hw_perf_event_extra *reg)
3224 struct event_constraint *c = &emptyconstraint;
3225 struct er_account *era;
3226 unsigned long flags;
3230 * reg->alloc can be set due to existing state, so for fake cpuc we
3231 * need to ignore this, otherwise we might fail to allocate proper fake
3232 * state for this extra reg constraint. Also see the comment below.
3234 if (reg->alloc && !cpuc->is_fake)
3235 return NULL; /* call x86_get_event_constraint() */
3238 era = &cpuc->shared_regs->regs[idx];
3240 * we use spin_lock_irqsave() to avoid lockdep issues when
3241 * passing a fake cpuc
3243 raw_spin_lock_irqsave(&era->lock, flags);
3245 if (!atomic_read(&era->ref) || era->config == reg->config) {
3248 * If its a fake cpuc -- as per validate_{group,event}() we
3249 * shouldn't touch event state and we can avoid doing so
3250 * since both will only call get_event_constraints() once
3251 * on each event, this avoids the need for reg->alloc.
3253 * Not doing the ER fixup will only result in era->reg being
3254 * wrong, but since we won't actually try and program hardware
3255 * this isn't a problem either.
3257 if (!cpuc->is_fake) {
3258 if (idx != reg->idx)
3259 intel_fixup_er(event, idx);
3262 * x86_schedule_events() can call get_event_constraints()
3263 * multiple times on events in the case of incremental
3264 * scheduling(). reg->alloc ensures we only do the ER
3270 /* lock in msr value */
3271 era->config = reg->config;
3272 era->reg = reg->reg;
3275 atomic_inc(&era->ref);
3278 * need to call x86_get_event_constraint()
3279 * to check if associated event has constraints
3283 idx = intel_alt_er(cpuc, idx, reg->config);
3284 if (idx != reg->idx) {
3285 raw_spin_unlock_irqrestore(&era->lock, flags);
3289 raw_spin_unlock_irqrestore(&era->lock, flags);
3295 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
3296 struct hw_perf_event_extra *reg)
3298 struct er_account *era;
3301 * Only put constraint if extra reg was actually allocated. Also takes
3302 * care of event which do not use an extra shared reg.
3304 * Also, if this is a fake cpuc we shouldn't touch any event state
3305 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
3306 * either since it'll be thrown out.
3308 if (!reg->alloc || cpuc->is_fake)
3311 era = &cpuc->shared_regs->regs[reg->idx];
3313 /* one fewer user */
3314 atomic_dec(&era->ref);
3316 /* allocate again next time */
3320 static struct event_constraint *
3321 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
3322 struct perf_event *event)
3324 struct event_constraint *c = NULL, *d;
3325 struct hw_perf_event_extra *xreg, *breg;
3327 xreg = &event->hw.extra_reg;
3328 if (xreg->idx != EXTRA_REG_NONE) {
3329 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
3330 if (c == &emptyconstraint)
3333 breg = &event->hw.branch_reg;
3334 if (breg->idx != EXTRA_REG_NONE) {
3335 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
3336 if (d == &emptyconstraint) {
3337 __intel_shared_reg_put_constraints(cpuc, xreg);
3344 struct event_constraint *
3345 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3346 struct perf_event *event)
3348 struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints);
3349 struct event_constraint *c;
3351 if (event_constraints) {
3352 for_each_event_constraint(c, event_constraints) {
3353 if (constraint_match(c, event->hw.config)) {
3354 event->hw.flags |= c->flags;
3360 return &hybrid_var(cpuc->pmu, unconstrained);
3363 static struct event_constraint *
3364 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3365 struct perf_event *event)
3367 struct event_constraint *c;
3369 c = intel_vlbr_constraints(event);
3373 c = intel_bts_constraints(event);
3377 c = intel_shared_regs_constraints(cpuc, event);
3381 c = intel_pebs_constraints(event);
3385 return x86_get_event_constraints(cpuc, idx, event);
3389 intel_start_scheduling(struct cpu_hw_events *cpuc)
3391 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3392 struct intel_excl_states *xl;
3393 int tid = cpuc->excl_thread_id;
3396 * nothing needed if in group validation mode
3398 if (cpuc->is_fake || !is_ht_workaround_enabled())
3402 * no exclusion needed
3404 if (WARN_ON_ONCE(!excl_cntrs))
3407 xl = &excl_cntrs->states[tid];
3409 xl->sched_started = true;
3411 * lock shared state until we are done scheduling
3412 * in stop_event_scheduling()
3413 * makes scheduling appear as a transaction
3415 raw_spin_lock(&excl_cntrs->lock);
3418 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
3420 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3421 struct event_constraint *c = cpuc->event_constraint[idx];
3422 struct intel_excl_states *xl;
3423 int tid = cpuc->excl_thread_id;
3425 if (cpuc->is_fake || !is_ht_workaround_enabled())
3428 if (WARN_ON_ONCE(!excl_cntrs))
3431 if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
3434 xl = &excl_cntrs->states[tid];
3436 lockdep_assert_held(&excl_cntrs->lock);
3438 if (c->flags & PERF_X86_EVENT_EXCL)
3439 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
3441 xl->state[cntr] = INTEL_EXCL_SHARED;
3445 intel_stop_scheduling(struct cpu_hw_events *cpuc)
3447 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3448 struct intel_excl_states *xl;
3449 int tid = cpuc->excl_thread_id;
3452 * nothing needed if in group validation mode
3454 if (cpuc->is_fake || !is_ht_workaround_enabled())
3457 * no exclusion needed
3459 if (WARN_ON_ONCE(!excl_cntrs))
3462 xl = &excl_cntrs->states[tid];
3464 xl->sched_started = false;
3466 * release shared state lock (acquired in intel_start_scheduling())
3468 raw_spin_unlock(&excl_cntrs->lock);
3471 static struct event_constraint *
3472 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
3474 WARN_ON_ONCE(!cpuc->constraint_list);
3476 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
3477 struct event_constraint *cx;
3480 * grab pre-allocated constraint entry
3482 cx = &cpuc->constraint_list[idx];
3485 * initialize dynamic constraint
3486 * with static constraint
3491 * mark constraint as dynamic
3493 cx->flags |= PERF_X86_EVENT_DYNAMIC;
3500 static struct event_constraint *
3501 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
3502 int idx, struct event_constraint *c)
3504 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3505 struct intel_excl_states *xlo;
3506 int tid = cpuc->excl_thread_id;
3510 * validating a group does not require
3511 * enforcing cross-thread exclusion
3513 if (cpuc->is_fake || !is_ht_workaround_enabled())
3517 * no exclusion needed
3519 if (WARN_ON_ONCE(!excl_cntrs))
3523 * because we modify the constraint, we need
3524 * to make a copy. Static constraints come
3525 * from static const tables.
3527 * only needed when constraint has not yet
3528 * been cloned (marked dynamic)
3530 c = dyn_constraint(cpuc, c, idx);
3533 * From here on, the constraint is dynamic.
3534 * Either it was just allocated above, or it
3535 * was allocated during a earlier invocation
3540 * state of sibling HT
3542 xlo = &excl_cntrs->states[tid ^ 1];
3545 * event requires exclusive counter access
3548 is_excl = c->flags & PERF_X86_EVENT_EXCL;
3549 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
3550 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
3551 if (!cpuc->n_excl++)
3552 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
3556 * Modify static constraint with current dynamic
3559 * EXCLUSIVE: sibling counter measuring exclusive event
3560 * SHARED : sibling counter measuring non-exclusive event
3561 * UNUSED : sibling counter unused
3564 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
3566 * exclusive event in sibling counter
3567 * our corresponding counter cannot be used
3568 * regardless of our event
3570 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
3571 __clear_bit(i, c->idxmsk);
3576 * if measuring an exclusive event, sibling
3577 * measuring non-exclusive, then counter cannot
3580 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
3581 __clear_bit(i, c->idxmsk);
3588 * if we return an empty mask, then switch
3589 * back to static empty constraint to avoid
3590 * the cost of freeing later on
3593 c = &emptyconstraint;
3600 static struct event_constraint *
3601 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3602 struct perf_event *event)
3604 struct event_constraint *c1, *c2;
3606 c1 = cpuc->event_constraint[idx];
3610 * - static constraint: no change across incremental scheduling calls
3611 * - dynamic constraint: handled by intel_get_excl_constraints()
3613 c2 = __intel_get_event_constraints(cpuc, idx, event);
3615 WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
3616 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
3617 c1->weight = c2->weight;
3621 if (cpuc->excl_cntrs)
3622 return intel_get_excl_constraints(cpuc, event, idx, c2);
3624 /* Not all counters support the branch counter feature. */
3625 if (branch_sample_counters(event)) {
3626 c2 = dyn_constraint(cpuc, c2, idx);
3627 c2->idxmsk64 &= x86_pmu.lbr_counters;
3628 c2->weight = hweight64(c2->idxmsk64);
3634 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
3635 struct perf_event *event)
3637 struct hw_perf_event *hwc = &event->hw;
3638 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3639 int tid = cpuc->excl_thread_id;
3640 struct intel_excl_states *xl;
3643 * nothing needed if in group validation mode
3648 if (WARN_ON_ONCE(!excl_cntrs))
3651 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
3652 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
3653 if (!--cpuc->n_excl)
3654 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
3658 * If event was actually assigned, then mark the counter state as
3661 if (hwc->idx >= 0) {
3662 xl = &excl_cntrs->states[tid];
3665 * put_constraint may be called from x86_schedule_events()
3666 * which already has the lock held so here make locking
3669 if (!xl->sched_started)
3670 raw_spin_lock(&excl_cntrs->lock);
3672 xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
3674 if (!xl->sched_started)
3675 raw_spin_unlock(&excl_cntrs->lock);
3680 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
3681 struct perf_event *event)
3683 struct hw_perf_event_extra *reg;
3685 reg = &event->hw.extra_reg;
3686 if (reg->idx != EXTRA_REG_NONE)
3687 __intel_shared_reg_put_constraints(cpuc, reg);
3689 reg = &event->hw.branch_reg;
3690 if (reg->idx != EXTRA_REG_NONE)
3691 __intel_shared_reg_put_constraints(cpuc, reg);
3694 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
3695 struct perf_event *event)
3697 intel_put_shared_regs_event_constraints(cpuc, event);
3700 * is PMU has exclusive counter restrictions, then
3701 * all events are subject to and must call the
3702 * put_excl_constraints() routine
3704 if (cpuc->excl_cntrs)
3705 intel_put_excl_constraints(cpuc, event);
3708 static void intel_pebs_aliases_core2(struct perf_event *event)
3710 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3712 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3713 * (0x003c) so that we can use it with PEBS.
3715 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3716 * PEBS capable. However we can use INST_RETIRED.ANY_P
3717 * (0x00c0), which is a PEBS capable event, to get the same
3720 * INST_RETIRED.ANY_P counts the number of cycles that retires
3721 * CNTMASK instructions. By setting CNTMASK to a value (16)
3722 * larger than the maximum number of instructions that can be
3723 * retired per cycle (4) and then inverting the condition, we
3724 * count all cycles that retire 16 or less instructions, which
3727 * Thereby we gain a PEBS capable cycle counter.
3729 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
3731 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3732 event->hw.config = alt_config;
3736 static void intel_pebs_aliases_snb(struct perf_event *event)
3738 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3740 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3741 * (0x003c) so that we can use it with PEBS.
3743 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3744 * PEBS capable. However we can use UOPS_RETIRED.ALL
3745 * (0x01c2), which is a PEBS capable event, to get the same
3748 * UOPS_RETIRED.ALL counts the number of cycles that retires
3749 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
3750 * larger than the maximum number of micro-ops that can be
3751 * retired per cycle (4) and then inverting the condition, we
3752 * count all cycles that retire 16 or less micro-ops, which
3755 * Thereby we gain a PEBS capable cycle counter.
3757 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
3759 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3760 event->hw.config = alt_config;
3764 static void intel_pebs_aliases_precdist(struct perf_event *event)
3766 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3768 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3769 * (0x003c) so that we can use it with PEBS.
3771 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3772 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
3773 * (0x01c0), which is a PEBS capable event, to get the same
3776 * The PREC_DIST event has special support to minimize sample
3777 * shadowing effects. One drawback is that it can be
3778 * only programmed on counter 1, but that seems like an
3779 * acceptable trade off.
3781 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
3783 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3784 event->hw.config = alt_config;
3788 static void intel_pebs_aliases_ivb(struct perf_event *event)
3790 if (event->attr.precise_ip < 3)
3791 return intel_pebs_aliases_snb(event);
3792 return intel_pebs_aliases_precdist(event);
3795 static void intel_pebs_aliases_skl(struct perf_event *event)
3797 if (event->attr.precise_ip < 3)
3798 return intel_pebs_aliases_core2(event);
3799 return intel_pebs_aliases_precdist(event);
3802 static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3804 unsigned long flags = x86_pmu.large_pebs_flags;
3806 if (event->attr.use_clockid)
3807 flags &= ~PERF_SAMPLE_TIME;
3808 if (!event->attr.exclude_kernel)
3809 flags &= ~PERF_SAMPLE_REGS_USER;
3810 if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
3811 flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
3815 static int intel_pmu_bts_config(struct perf_event *event)
3817 struct perf_event_attr *attr = &event->attr;
3819 if (unlikely(intel_pmu_has_bts(event))) {
3820 /* BTS is not supported by this architecture. */
3821 if (!x86_pmu.bts_active)
3824 /* BTS is currently only allowed for user-mode. */
3825 if (!attr->exclude_kernel)
3828 /* BTS is not allowed for precise events. */
3829 if (attr->precise_ip)
3832 /* disallow bts if conflicting events are present */
3833 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3836 event->destroy = hw_perf_lbr_event_destroy;
3842 static int core_pmu_hw_config(struct perf_event *event)
3844 int ret = x86_pmu_hw_config(event);
3849 return intel_pmu_bts_config(event);
3852 #define INTEL_TD_METRIC_AVAILABLE_MAX (INTEL_TD_METRIC_RETIRING + \
3853 ((x86_pmu.num_topdown_events - 1) << 8))
3855 static bool is_available_metric_event(struct perf_event *event)
3857 return is_metric_event(event) &&
3858 event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX;
3861 static inline bool is_mem_loads_event(struct perf_event *event)
3863 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xcd, .umask=0x01);
3866 static inline bool is_mem_loads_aux_event(struct perf_event *event)
3868 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82);
3871 static inline bool require_mem_loads_aux_event(struct perf_event *event)
3873 if (!(x86_pmu.flags & PMU_FL_MEM_LOADS_AUX))
3877 return hybrid_pmu(event->pmu)->pmu_type == hybrid_big;
3882 static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
3884 union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap);
3886 return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
3889 static int intel_pmu_hw_config(struct perf_event *event)
3891 int ret = x86_pmu_hw_config(event);
3896 ret = intel_pmu_bts_config(event);
3900 if (event->attr.precise_ip) {
3901 if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
3904 if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
3905 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
3906 if (!(event->attr.sample_type &
3907 ~intel_pmu_large_pebs_flags(event))) {
3908 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
3909 event->attach_state |= PERF_ATTACH_SCHED_CB;
3912 if (x86_pmu.pebs_aliases)
3913 x86_pmu.pebs_aliases(event);
3916 if (needs_branch_stack(event) && is_sampling_event(event))
3917 event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK;
3919 if (branch_sample_counters(event)) {
3920 struct perf_event *leader, *sibling;
3923 if (!(x86_pmu.flags & PMU_FL_BR_CNTR) ||
3924 (event->attr.config & ~INTEL_ARCH_EVENT_MASK))
3928 * The branch counter logging is not supported in the call stack
3929 * mode yet, since we cannot simply flush the LBR during e.g.,
3930 * multiplexing. Also, there is no obvious usage with the call
3931 * stack mode. Simply forbids it for now.
3933 * If any events in the group enable the branch counter logging
3934 * feature, the group is treated as a branch counter logging
3935 * group, which requires the extra space to store the counters.
3937 leader = event->group_leader;
3938 if (branch_sample_call_stack(leader))
3940 if (branch_sample_counters(leader))
3942 leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS;
3944 for_each_sibling_event(sibling, leader) {
3945 if (branch_sample_call_stack(sibling))
3947 if (branch_sample_counters(sibling))
3951 if (num > fls(x86_pmu.lbr_counters))
3954 * Only applying the PERF_SAMPLE_BRANCH_COUNTERS doesn't
3955 * require any branch stack setup.
3956 * Clear the bit to avoid unnecessary branch stack setup.
3958 if (0 == (event->attr.branch_sample_type &
3959 ~(PERF_SAMPLE_BRANCH_PLM_ALL |
3960 PERF_SAMPLE_BRANCH_COUNTERS)))
3961 event->hw.flags &= ~PERF_X86_EVENT_NEEDS_BRANCH_STACK;
3964 * Force the leader to be a LBR event. So LBRs can be reset
3965 * with the leader event. See intel_pmu_lbr_del() for details.
3967 if (!intel_pmu_needs_branch_stack(leader))
3971 if (intel_pmu_needs_branch_stack(event)) {
3972 ret = intel_pmu_setup_lbr_filter(event);
3975 event->attach_state |= PERF_ATTACH_SCHED_CB;
3978 * BTS is set up earlier in this path, so don't account twice
3980 if (!unlikely(intel_pmu_has_bts(event))) {
3981 /* disallow lbr if conflicting events are present */
3982 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3985 event->destroy = hw_perf_lbr_event_destroy;
3989 if (event->attr.aux_output) {
3990 if (!event->attr.precise_ip)
3993 event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT;
3996 if ((event->attr.type == PERF_TYPE_HARDWARE) ||
3997 (event->attr.type == PERF_TYPE_HW_CACHE))
4001 * Config Topdown slots and metric events
4003 * The slots event on Fixed Counter 3 can support sampling,
4004 * which will be handled normally in x86_perf_event_update().
4006 * Metric events don't support sampling and require being paired
4007 * with a slots event as group leader. When the slots event
4008 * is used in a metrics group, it too cannot support sampling.
4010 if (intel_pmu_has_cap(event, PERF_CAP_METRICS_IDX) && is_topdown_event(event)) {
4011 if (event->attr.config1 || event->attr.config2)
4015 * The TopDown metrics events and slots event don't
4016 * support any filters.
4018 if (event->attr.config & X86_ALL_EVENT_FLAGS)
4021 if (is_available_metric_event(event)) {
4022 struct perf_event *leader = event->group_leader;
4024 /* The metric events don't support sampling. */
4025 if (is_sampling_event(event))
4028 /* The metric events require a slots group leader. */
4029 if (!is_slots_event(leader))
4033 * The leader/SLOTS must not be a sampling event for
4034 * metric use; hardware requires it starts at 0 when used
4035 * in conjunction with MSR_PERF_METRICS.
4037 if (is_sampling_event(leader))
4040 event->event_caps |= PERF_EV_CAP_SIBLING;
4042 * Only once we have a METRICs sibling do we
4043 * need TopDown magic.
4045 leader->hw.flags |= PERF_X86_EVENT_TOPDOWN;
4046 event->hw.flags |= PERF_X86_EVENT_TOPDOWN;
4051 * The load latency event X86_CONFIG(.event=0xcd, .umask=0x01) on SPR
4052 * doesn't function quite right. As a work-around it needs to always be
4053 * co-scheduled with a auxiliary event X86_CONFIG(.event=0x03, .umask=0x82).
4054 * The actual count of this second event is irrelevant it just needs
4055 * to be active to make the first event function correctly.
4057 * In a group, the auxiliary event must be in front of the load latency
4058 * event. The rule is to simplify the implementation of the check.
4059 * That's because perf cannot have a complete group at the moment.
4061 if (require_mem_loads_aux_event(event) &&
4062 (event->attr.sample_type & PERF_SAMPLE_DATA_SRC) &&
4063 is_mem_loads_event(event)) {
4064 struct perf_event *leader = event->group_leader;
4065 struct perf_event *sibling = NULL;
4068 * When this memload event is also the first event (no group
4069 * exists yet), then there is no aux event before it.
4071 if (leader == event)
4074 if (!is_mem_loads_aux_event(leader)) {
4075 for_each_sibling_event(sibling, leader) {
4076 if (is_mem_loads_aux_event(sibling))
4079 if (list_entry_is_head(sibling, &leader->sibling_list, sibling_list))
4084 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
4087 if (x86_pmu.version < 3)
4090 ret = perf_allow_cpu(&event->attr);
4094 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
4100 * Currently, the only caller of this function is the atomic_switch_perf_msrs().
4101 * The host perf context helps to prepare the values of the real hardware for
4102 * a set of msrs that need to be switched atomically in a vmx transaction.
4104 * For example, the pseudocode needed to add a new msr should look like:
4106 * arr[(*nr)++] = (struct perf_guest_switch_msr){
4107 * .msr = the hardware msr address,
4108 * .host = the value the hardware has when it doesn't run a guest,
4109 * .guest = the value the hardware has when it runs a guest,
4112 * These values have nothing to do with the emulated values the guest sees
4113 * when it uses {RD,WR}MSR, which should be handled by the KVM context,
4114 * specifically in the intel_pmu_{get,set}_msr().
4116 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
4118 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4119 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
4120 struct kvm_pmu *kvm_pmu = (struct kvm_pmu *)data;
4121 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
4122 u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable;
4123 int global_ctrl, pebs_enable;
4126 * In addition to obeying exclude_guest/exclude_host, remove bits being
4127 * used for PEBS when running a guest, because PEBS writes to virtual
4128 * addresses (not physical addresses).
4131 global_ctrl = (*nr)++;
4132 arr[global_ctrl] = (struct perf_guest_switch_msr){
4133 .msr = MSR_CORE_PERF_GLOBAL_CTRL,
4134 .host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask,
4135 .guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask & ~pebs_mask,
4142 * If PMU counter has PEBS enabled it is not enough to
4143 * disable counter on a guest entry since PEBS memory
4144 * write can overshoot guest entry and corrupt guest
4145 * memory. Disabling PEBS solves the problem.
4147 * Don't do this if the CPU already enforces it.
4149 if (x86_pmu.pebs_no_isolation) {
4150 arr[(*nr)++] = (struct perf_guest_switch_msr){
4151 .msr = MSR_IA32_PEBS_ENABLE,
4152 .host = cpuc->pebs_enabled,
4158 if (!kvm_pmu || !x86_pmu.pebs_ept)
4161 arr[(*nr)++] = (struct perf_guest_switch_msr){
4162 .msr = MSR_IA32_DS_AREA,
4163 .host = (unsigned long)cpuc->ds,
4164 .guest = kvm_pmu->ds_area,
4167 if (x86_pmu.intel_cap.pebs_baseline) {
4168 arr[(*nr)++] = (struct perf_guest_switch_msr){
4169 .msr = MSR_PEBS_DATA_CFG,
4170 .host = cpuc->active_pebs_data_cfg,
4171 .guest = kvm_pmu->pebs_data_cfg,
4175 pebs_enable = (*nr)++;
4176 arr[pebs_enable] = (struct perf_guest_switch_msr){
4177 .msr = MSR_IA32_PEBS_ENABLE,
4178 .host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
4179 .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask,
4182 if (arr[pebs_enable].host) {
4183 /* Disable guest PEBS if host PEBS is enabled. */
4184 arr[pebs_enable].guest = 0;
4186 /* Disable guest PEBS thoroughly for cross-mapped PEBS counters. */
4187 arr[pebs_enable].guest &= ~kvm_pmu->host_cross_mapped_mask;
4188 arr[global_ctrl].guest &= ~kvm_pmu->host_cross_mapped_mask;
4189 /* Set hw GLOBAL_CTRL bits for PEBS counter when it runs for guest */
4190 arr[global_ctrl].guest |= arr[pebs_enable].guest;
4196 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr, void *data)
4198 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4199 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
4202 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
4203 struct perf_event *event = cpuc->events[idx];
4205 arr[idx].msr = x86_pmu_config_addr(idx);
4206 arr[idx].host = arr[idx].guest = 0;
4208 if (!test_bit(idx, cpuc->active_mask))
4211 arr[idx].host = arr[idx].guest =
4212 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
4214 if (event->attr.exclude_host)
4215 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
4216 else if (event->attr.exclude_guest)
4217 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
4220 *nr = x86_pmu.num_counters;
4224 static void core_pmu_enable_event(struct perf_event *event)
4226 if (!event->attr.exclude_host)
4227 x86_pmu_enable_event(event);
4230 static void core_pmu_enable_all(int added)
4232 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4235 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
4236 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
4238 if (!test_bit(idx, cpuc->active_mask) ||
4239 cpuc->events[idx]->attr.exclude_host)
4242 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
4246 static int hsw_hw_config(struct perf_event *event)
4248 int ret = intel_pmu_hw_config(event);
4252 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
4254 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
4257 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
4258 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
4261 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
4262 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
4263 event->attr.precise_ip > 0))
4266 if (event_is_checkpointed(event)) {
4268 * Sampling of checkpointed events can cause situations where
4269 * the CPU constantly aborts because of a overflow, which is
4270 * then checkpointed back and ignored. Forbid checkpointing
4273 * But still allow a long sampling period, so that perf stat
4276 if (event->attr.sample_period > 0 &&
4277 event->attr.sample_period < 0x7fffffff)
4283 static struct event_constraint counter0_constraint =
4284 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
4286 static struct event_constraint counter1_constraint =
4287 INTEL_ALL_EVENT_CONSTRAINT(0, 0x2);
4289 static struct event_constraint counter0_1_constraint =
4290 INTEL_ALL_EVENT_CONSTRAINT(0, 0x3);
4292 static struct event_constraint counter2_constraint =
4293 EVENT_CONSTRAINT(0, 0x4, 0);
4295 static struct event_constraint fixed0_constraint =
4296 FIXED_EVENT_CONSTRAINT(0x00c0, 0);
4298 static struct event_constraint fixed0_counter0_constraint =
4299 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
4301 static struct event_constraint fixed0_counter0_1_constraint =
4302 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000003ULL);
4304 static struct event_constraint counters_1_7_constraint =
4305 INTEL_ALL_EVENT_CONSTRAINT(0, 0xfeULL);
4307 static struct event_constraint *
4308 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4309 struct perf_event *event)
4311 struct event_constraint *c;
4313 c = intel_get_event_constraints(cpuc, idx, event);
4315 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
4316 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
4317 if (c->idxmsk64 & (1U << 2))
4318 return &counter2_constraint;
4319 return &emptyconstraint;
4325 static struct event_constraint *
4326 icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4327 struct perf_event *event)
4330 * Fixed counter 0 has less skid.
4331 * Force instruction:ppp in Fixed counter 0
4333 if ((event->attr.precise_ip == 3) &&
4334 constraint_match(&fixed0_constraint, event->hw.config))
4335 return &fixed0_constraint;
4337 return hsw_get_event_constraints(cpuc, idx, event);
4340 static struct event_constraint *
4341 glc_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4342 struct perf_event *event)
4344 struct event_constraint *c;
4346 c = icl_get_event_constraints(cpuc, idx, event);
4349 * The :ppp indicates the Precise Distribution (PDist) facility, which
4350 * is only supported on the GP counter 0. If a :ppp event which is not
4351 * available on the GP counter 0, error out.
4352 * Exception: Instruction PDIR is only available on the fixed counter 0.
4354 if ((event->attr.precise_ip == 3) &&
4355 !constraint_match(&fixed0_constraint, event->hw.config)) {
4356 if (c->idxmsk64 & BIT_ULL(0))
4357 return &counter0_constraint;
4359 return &emptyconstraint;
4365 static struct event_constraint *
4366 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4367 struct perf_event *event)
4369 struct event_constraint *c;
4371 /* :ppp means to do reduced skid PEBS which is PMC0 only. */
4372 if (event->attr.precise_ip == 3)
4373 return &counter0_constraint;
4375 c = intel_get_event_constraints(cpuc, idx, event);
4380 static struct event_constraint *
4381 tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4382 struct perf_event *event)
4384 struct event_constraint *c;
4386 c = intel_get_event_constraints(cpuc, idx, event);
4389 * :ppp means to do reduced skid PEBS,
4390 * which is available on PMC0 and fixed counter 0.
4392 if (event->attr.precise_ip == 3) {
4393 /* Force instruction:ppp on PMC0 and Fixed counter 0 */
4394 if (constraint_match(&fixed0_constraint, event->hw.config))
4395 return &fixed0_counter0_constraint;
4397 return &counter0_constraint;
4403 static bool allow_tsx_force_abort = true;
4405 static struct event_constraint *
4406 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4407 struct perf_event *event)
4409 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
4412 * Without TFA we must not use PMC3.
4414 if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
4415 c = dyn_constraint(cpuc, c, idx);
4416 c->idxmsk64 &= ~(1ULL << 3);
4423 static struct event_constraint *
4424 adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4425 struct perf_event *event)
4427 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4429 if (pmu->pmu_type == hybrid_big)
4430 return glc_get_event_constraints(cpuc, idx, event);
4431 else if (pmu->pmu_type == hybrid_small)
4432 return tnt_get_event_constraints(cpuc, idx, event);
4435 return &emptyconstraint;
4438 static struct event_constraint *
4439 cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4440 struct perf_event *event)
4442 struct event_constraint *c;
4444 c = intel_get_event_constraints(cpuc, idx, event);
4447 * The :ppp indicates the Precise Distribution (PDist) facility, which
4448 * is only supported on the GP counter 0 & 1 and Fixed counter 0.
4449 * If a :ppp event which is not available on the above eligible counters,
4452 if (event->attr.precise_ip == 3) {
4453 /* Force instruction:ppp on PMC0, 1 and Fixed counter 0 */
4454 if (constraint_match(&fixed0_constraint, event->hw.config)) {
4455 /* The fixed counter 0 doesn't support LBR event logging. */
4456 if (branch_sample_counters(event))
4457 return &counter0_1_constraint;
4459 return &fixed0_counter0_1_constraint;
4462 switch (c->idxmsk64 & 0x3ull) {
4464 return &counter0_constraint;
4466 return &counter1_constraint;
4468 return &counter0_1_constraint;
4470 return &emptyconstraint;
4476 static struct event_constraint *
4477 rwc_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4478 struct perf_event *event)
4480 struct event_constraint *c;
4482 c = glc_get_event_constraints(cpuc, idx, event);
4484 /* The Retire Latency is not supported by the fixed counter 0. */
4485 if (event->attr.precise_ip &&
4486 (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
4487 constraint_match(&fixed0_constraint, event->hw.config)) {
4489 * The Instruction PDIR is only available
4490 * on the fixed counter 0. Error out for this case.
4492 if (event->attr.precise_ip == 3)
4493 return &emptyconstraint;
4494 return &counters_1_7_constraint;
4500 static struct event_constraint *
4501 mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4502 struct perf_event *event)
4504 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4506 if (pmu->pmu_type == hybrid_big)
4507 return rwc_get_event_constraints(cpuc, idx, event);
4508 if (pmu->pmu_type == hybrid_small)
4509 return cmt_get_event_constraints(cpuc, idx, event);
4512 return &emptyconstraint;
4515 static int adl_hw_config(struct perf_event *event)
4517 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4519 if (pmu->pmu_type == hybrid_big)
4520 return hsw_hw_config(event);
4521 else if (pmu->pmu_type == hybrid_small)
4522 return intel_pmu_hw_config(event);
4528 static enum hybrid_cpu_type adl_get_hybrid_cpu_type(void)
4530 return HYBRID_INTEL_CORE;
4536 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
4537 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
4538 * the two to enforce a minimum period of 128 (the smallest value that has bits
4539 * 0-5 cleared and >= 100).
4541 * Because of how the code in x86_perf_event_set_period() works, the truncation
4542 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
4543 * to make up for the 'lost' events due to carrying the 'error' in period_left.
4545 * Therefore the effective (average) period matches the requested period,
4546 * despite coarser hardware granularity.
4548 static void bdw_limit_period(struct perf_event *event, s64 *left)
4550 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
4551 X86_CONFIG(.event=0xc0, .umask=0x01)) {
4558 static void nhm_limit_period(struct perf_event *event, s64 *left)
4560 *left = max(*left, 32LL);
4563 static void glc_limit_period(struct perf_event *event, s64 *left)
4565 if (event->attr.precise_ip == 3)
4566 *left = max(*left, 128LL);
4569 PMU_FORMAT_ATTR(event, "config:0-7" );
4570 PMU_FORMAT_ATTR(umask, "config:8-15" );
4571 PMU_FORMAT_ATTR(edge, "config:18" );
4572 PMU_FORMAT_ATTR(pc, "config:19" );
4573 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
4574 PMU_FORMAT_ATTR(inv, "config:23" );
4575 PMU_FORMAT_ATTR(cmask, "config:24-31" );
4576 PMU_FORMAT_ATTR(in_tx, "config:32");
4577 PMU_FORMAT_ATTR(in_tx_cp, "config:33");
4579 static struct attribute *intel_arch_formats_attr[] = {
4580 &format_attr_event.attr,
4581 &format_attr_umask.attr,
4582 &format_attr_edge.attr,
4583 &format_attr_pc.attr,
4584 &format_attr_inv.attr,
4585 &format_attr_cmask.attr,
4589 ssize_t intel_event_sysfs_show(char *page, u64 config)
4591 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
4593 return x86_event_sysfs_show(page, config, event);
4596 static struct intel_shared_regs *allocate_shared_regs(int cpu)
4598 struct intel_shared_regs *regs;
4601 regs = kzalloc_node(sizeof(struct intel_shared_regs),
4602 GFP_KERNEL, cpu_to_node(cpu));
4605 * initialize the locks to keep lockdep happy
4607 for (i = 0; i < EXTRA_REG_MAX; i++)
4608 raw_spin_lock_init(®s->regs[i].lock);
4615 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
4617 struct intel_excl_cntrs *c;
4619 c = kzalloc_node(sizeof(struct intel_excl_cntrs),
4620 GFP_KERNEL, cpu_to_node(cpu));
4622 raw_spin_lock_init(&c->lock);
4629 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
4631 cpuc->pebs_record_size = x86_pmu.pebs_record_size;
4633 if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
4634 cpuc->shared_regs = allocate_shared_regs(cpu);
4635 if (!cpuc->shared_regs)
4639 if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_BR_CNTR)) {
4640 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
4642 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
4643 if (!cpuc->constraint_list)
4644 goto err_shared_regs;
4647 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4648 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
4649 if (!cpuc->excl_cntrs)
4650 goto err_constraint_list;
4652 cpuc->excl_thread_id = 0;
4657 err_constraint_list:
4658 kfree(cpuc->constraint_list);
4659 cpuc->constraint_list = NULL;
4662 kfree(cpuc->shared_regs);
4663 cpuc->shared_regs = NULL;
4669 static int intel_pmu_cpu_prepare(int cpu)
4671 return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
4674 static void flip_smm_bit(void *data)
4676 unsigned long set = *(unsigned long *)data;
4679 msr_set_bit(MSR_IA32_DEBUGCTLMSR,
4680 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
4682 msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
4683 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
4687 static void intel_pmu_check_num_counters(int *num_counters,
4688 int *num_counters_fixed,
4689 u64 *intel_ctrl, u64 fixed_mask);
4691 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
4693 int num_counters_fixed,
4696 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs);
4698 static inline bool intel_pmu_broken_perf_cap(void)
4700 /* The Perf Metric (Bit 15) is always cleared */
4701 if ((boot_cpu_data.x86_model == INTEL_FAM6_METEORLAKE) ||
4702 (boot_cpu_data.x86_model == INTEL_FAM6_METEORLAKE_L))
4708 static void update_pmu_cap(struct x86_hybrid_pmu *pmu)
4710 unsigned int sub_bitmaps = cpuid_eax(ARCH_PERFMON_EXT_LEAF);
4711 unsigned int eax, ebx, ecx, edx;
4713 if (sub_bitmaps & ARCH_PERFMON_NUM_COUNTER_LEAF_BIT) {
4714 cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF,
4715 &eax, &ebx, &ecx, &edx);
4716 pmu->num_counters = fls(eax);
4717 pmu->num_counters_fixed = fls(ebx);
4721 if (!intel_pmu_broken_perf_cap()) {
4722 /* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */
4723 rdmsrl(MSR_IA32_PERF_CAPABILITIES, pmu->intel_cap.capabilities);
4727 static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
4729 intel_pmu_check_num_counters(&pmu->num_counters, &pmu->num_counters_fixed,
4730 &pmu->intel_ctrl, (1ULL << pmu->num_counters_fixed) - 1);
4731 pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
4732 pmu->unconstrained = (struct event_constraint)
4733 __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
4734 0, pmu->num_counters, 0, 0);
4736 if (pmu->intel_cap.perf_metrics)
4737 pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
4739 pmu->intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
4741 if (pmu->intel_cap.pebs_output_pt_available)
4742 pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
4744 pmu->pmu.capabilities &= ~PERF_PMU_CAP_AUX_OUTPUT;
4746 intel_pmu_check_event_constraints(pmu->event_constraints,
4748 pmu->num_counters_fixed,
4751 intel_pmu_check_extra_regs(pmu->extra_regs);
4754 static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void)
4756 u8 cpu_type = get_this_hybrid_cpu_type();
4760 * This is running on a CPU model that is known to have hybrid
4761 * configurations. But the CPU told us it is not hybrid, shame
4762 * on it. There should be a fixup function provided for these
4763 * troublesome CPUs (->get_hybrid_cpu_type).
4765 if (cpu_type == HYBRID_INTEL_NONE) {
4766 if (x86_pmu.get_hybrid_cpu_type)
4767 cpu_type = x86_pmu.get_hybrid_cpu_type();
4773 * This essentially just maps between the 'hybrid_cpu_type'
4774 * and 'hybrid_pmu_type' enums:
4776 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
4777 enum hybrid_pmu_type pmu_type = x86_pmu.hybrid_pmu[i].pmu_type;
4779 if (cpu_type == HYBRID_INTEL_CORE &&
4780 pmu_type == hybrid_big)
4781 return &x86_pmu.hybrid_pmu[i];
4782 if (cpu_type == HYBRID_INTEL_ATOM &&
4783 pmu_type == hybrid_small)
4784 return &x86_pmu.hybrid_pmu[i];
4790 static bool init_hybrid_pmu(int cpu)
4792 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4793 struct x86_hybrid_pmu *pmu = find_hybrid_pmu_for_cpu();
4795 if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) {
4800 /* Only check and dump the PMU information for the first CPU */
4801 if (!cpumask_empty(&pmu->supported_cpus))
4804 if (this_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT))
4805 update_pmu_cap(pmu);
4807 intel_pmu_check_hybrid_pmus(pmu);
4809 if (!check_hw_exists(&pmu->pmu, pmu->num_counters, pmu->num_counters_fixed))
4812 pr_info("%s PMU driver: ", pmu->name);
4814 if (pmu->intel_cap.pebs_output_pt_available)
4815 pr_cont("PEBS-via-PT ");
4819 x86_pmu_show_pmu_cap(pmu->num_counters, pmu->num_counters_fixed,
4823 cpumask_set_cpu(cpu, &pmu->supported_cpus);
4824 cpuc->pmu = &pmu->pmu;
4829 static void intel_pmu_cpu_starting(int cpu)
4831 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4832 int core_id = topology_core_id(cpu);
4835 if (is_hybrid() && !init_hybrid_pmu(cpu))
4838 init_debug_store_on_cpu(cpu);
4840 * Deal with CPUs that don't clear their LBRs on power-up.
4842 intel_pmu_lbr_reset();
4844 cpuc->lbr_sel = NULL;
4846 if (x86_pmu.flags & PMU_FL_TFA) {
4847 WARN_ON_ONCE(cpuc->tfa_shadow);
4848 cpuc->tfa_shadow = ~0ULL;
4849 intel_set_tfa(cpuc, false);
4852 if (x86_pmu.version > 1)
4853 flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
4856 * Disable perf metrics if any added CPU doesn't support it.
4858 * Turn off the check for a hybrid architecture, because the
4859 * architecture MSR, MSR_IA32_PERF_CAPABILITIES, only indicate
4860 * the architecture features. The perf metrics is a model-specific
4861 * feature for now. The corresponding bit should always be 0 on
4862 * a hybrid platform, e.g., Alder Lake.
4864 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) {
4865 union perf_capabilities perf_cap;
4867 rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
4868 if (!perf_cap.perf_metrics) {
4869 x86_pmu.intel_cap.perf_metrics = 0;
4870 x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
4874 if (!cpuc->shared_regs)
4877 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
4878 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
4879 struct intel_shared_regs *pc;
4881 pc = per_cpu(cpu_hw_events, i).shared_regs;
4882 if (pc && pc->core_id == core_id) {
4883 cpuc->kfree_on_online[0] = cpuc->shared_regs;
4884 cpuc->shared_regs = pc;
4888 cpuc->shared_regs->core_id = core_id;
4889 cpuc->shared_regs->refcnt++;
4892 if (x86_pmu.lbr_sel_map)
4893 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
4895 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4896 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
4897 struct cpu_hw_events *sibling;
4898 struct intel_excl_cntrs *c;
4900 sibling = &per_cpu(cpu_hw_events, i);
4901 c = sibling->excl_cntrs;
4902 if (c && c->core_id == core_id) {
4903 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
4904 cpuc->excl_cntrs = c;
4905 if (!sibling->excl_thread_id)
4906 cpuc->excl_thread_id = 1;
4910 cpuc->excl_cntrs->core_id = core_id;
4911 cpuc->excl_cntrs->refcnt++;
4915 static void free_excl_cntrs(struct cpu_hw_events *cpuc)
4917 struct intel_excl_cntrs *c;
4919 c = cpuc->excl_cntrs;
4921 if (c->core_id == -1 || --c->refcnt == 0)
4923 cpuc->excl_cntrs = NULL;
4926 kfree(cpuc->constraint_list);
4927 cpuc->constraint_list = NULL;
4930 static void intel_pmu_cpu_dying(int cpu)
4932 fini_debug_store_on_cpu(cpu);
4935 void intel_cpuc_finish(struct cpu_hw_events *cpuc)
4937 struct intel_shared_regs *pc;
4939 pc = cpuc->shared_regs;
4941 if (pc->core_id == -1 || --pc->refcnt == 0)
4943 cpuc->shared_regs = NULL;
4946 free_excl_cntrs(cpuc);
4949 static void intel_pmu_cpu_dead(int cpu)
4951 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4953 intel_cpuc_finish(cpuc);
4955 if (is_hybrid() && cpuc->pmu)
4956 cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus);
4959 static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
4962 intel_pmu_pebs_sched_task(pmu_ctx, sched_in);
4963 intel_pmu_lbr_sched_task(pmu_ctx, sched_in);
4966 static void intel_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
4967 struct perf_event_pmu_context *next_epc)
4969 intel_pmu_lbr_swap_task_ctx(prev_epc, next_epc);
4972 static int intel_pmu_check_period(struct perf_event *event, u64 value)
4974 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
4977 static void intel_aux_output_init(void)
4979 /* Refer also intel_pmu_aux_output_match() */
4980 if (x86_pmu.intel_cap.pebs_output_pt_available)
4981 x86_pmu.assign = intel_pmu_assign_event;
4984 static int intel_pmu_aux_output_match(struct perf_event *event)
4986 /* intel_pmu_assign_event() is needed, refer intel_aux_output_init() */
4987 if (!x86_pmu.intel_cap.pebs_output_pt_available)
4990 return is_intel_pt_event(event);
4993 static void intel_pmu_filter(struct pmu *pmu, int cpu, bool *ret)
4995 struct x86_hybrid_pmu *hpmu = hybrid_pmu(pmu);
4997 *ret = !cpumask_test_cpu(cpu, &hpmu->supported_cpus);
5000 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
5002 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
5004 PMU_FORMAT_ATTR(frontend, "config1:0-23");
5006 PMU_FORMAT_ATTR(snoop_rsp, "config1:0-63");
5008 static struct attribute *intel_arch3_formats_attr[] = {
5009 &format_attr_event.attr,
5010 &format_attr_umask.attr,
5011 &format_attr_edge.attr,
5012 &format_attr_pc.attr,
5013 &format_attr_any.attr,
5014 &format_attr_inv.attr,
5015 &format_attr_cmask.attr,
5019 static struct attribute *hsw_format_attr[] = {
5020 &format_attr_in_tx.attr,
5021 &format_attr_in_tx_cp.attr,
5022 &format_attr_offcore_rsp.attr,
5023 &format_attr_ldlat.attr,
5027 static struct attribute *nhm_format_attr[] = {
5028 &format_attr_offcore_rsp.attr,
5029 &format_attr_ldlat.attr,
5033 static struct attribute *slm_format_attr[] = {
5034 &format_attr_offcore_rsp.attr,
5038 static struct attribute *cmt_format_attr[] = {
5039 &format_attr_offcore_rsp.attr,
5040 &format_attr_ldlat.attr,
5041 &format_attr_snoop_rsp.attr,
5045 static struct attribute *skl_format_attr[] = {
5046 &format_attr_frontend.attr,
5050 static __initconst const struct x86_pmu core_pmu = {
5052 .handle_irq = x86_pmu_handle_irq,
5053 .disable_all = x86_pmu_disable_all,
5054 .enable_all = core_pmu_enable_all,
5055 .enable = core_pmu_enable_event,
5056 .disable = x86_pmu_disable_event,
5057 .hw_config = core_pmu_hw_config,
5058 .schedule_events = x86_schedule_events,
5059 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
5060 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
5061 .event_map = intel_pmu_event_map,
5062 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
5064 .large_pebs_flags = LARGE_PEBS_FLAGS,
5067 * Intel PMCs cannot be accessed sanely above 32-bit width,
5068 * so we install an artificial 1<<31 period regardless of
5069 * the generic event period:
5071 .max_period = (1ULL<<31) - 1,
5072 .get_event_constraints = intel_get_event_constraints,
5073 .put_event_constraints = intel_put_event_constraints,
5074 .event_constraints = intel_core_event_constraints,
5075 .guest_get_msrs = core_guest_get_msrs,
5076 .format_attrs = intel_arch_formats_attr,
5077 .events_sysfs_show = intel_event_sysfs_show,
5080 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
5081 * together with PMU version 1 and thus be using core_pmu with
5082 * shared_regs. We need following callbacks here to allocate
5085 .cpu_prepare = intel_pmu_cpu_prepare,
5086 .cpu_starting = intel_pmu_cpu_starting,
5087 .cpu_dying = intel_pmu_cpu_dying,
5088 .cpu_dead = intel_pmu_cpu_dead,
5090 .check_period = intel_pmu_check_period,
5092 .lbr_reset = intel_pmu_lbr_reset_64,
5093 .lbr_read = intel_pmu_lbr_read_64,
5094 .lbr_save = intel_pmu_lbr_save,
5095 .lbr_restore = intel_pmu_lbr_restore,
5098 static __initconst const struct x86_pmu intel_pmu = {
5100 .handle_irq = intel_pmu_handle_irq,
5101 .disable_all = intel_pmu_disable_all,
5102 .enable_all = intel_pmu_enable_all,
5103 .enable = intel_pmu_enable_event,
5104 .disable = intel_pmu_disable_event,
5105 .add = intel_pmu_add_event,
5106 .del = intel_pmu_del_event,
5107 .read = intel_pmu_read_event,
5108 .set_period = intel_pmu_set_period,
5109 .update = intel_pmu_update,
5110 .hw_config = intel_pmu_hw_config,
5111 .schedule_events = x86_schedule_events,
5112 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
5113 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
5114 .event_map = intel_pmu_event_map,
5115 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
5117 .large_pebs_flags = LARGE_PEBS_FLAGS,
5119 * Intel PMCs cannot be accessed sanely above 32 bit width,
5120 * so we install an artificial 1<<31 period regardless of
5121 * the generic event period:
5123 .max_period = (1ULL << 31) - 1,
5124 .get_event_constraints = intel_get_event_constraints,
5125 .put_event_constraints = intel_put_event_constraints,
5126 .pebs_aliases = intel_pebs_aliases_core2,
5128 .format_attrs = intel_arch3_formats_attr,
5129 .events_sysfs_show = intel_event_sysfs_show,
5131 .cpu_prepare = intel_pmu_cpu_prepare,
5132 .cpu_starting = intel_pmu_cpu_starting,
5133 .cpu_dying = intel_pmu_cpu_dying,
5134 .cpu_dead = intel_pmu_cpu_dead,
5136 .guest_get_msrs = intel_guest_get_msrs,
5137 .sched_task = intel_pmu_sched_task,
5138 .swap_task_ctx = intel_pmu_swap_task_ctx,
5140 .check_period = intel_pmu_check_period,
5142 .aux_output_match = intel_pmu_aux_output_match,
5144 .lbr_reset = intel_pmu_lbr_reset_64,
5145 .lbr_read = intel_pmu_lbr_read_64,
5146 .lbr_save = intel_pmu_lbr_save,
5147 .lbr_restore = intel_pmu_lbr_restore,
5150 * SMM has access to all 4 rings and while traditionally SMM code only
5151 * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM.
5153 * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction
5154 * between SMM or not, this results in what should be pure userspace
5155 * counters including SMM data.
5157 * This is a clear privilege issue, therefore globally disable
5158 * counting SMM by default.
5160 .attr_freeze_on_smi = 1,
5163 static __init void intel_clovertown_quirk(void)
5166 * PEBS is unreliable due to:
5168 * AJ67 - PEBS may experience CPL leaks
5169 * AJ68 - PEBS PMI may be delayed by one event
5170 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
5171 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
5173 * AJ67 could be worked around by restricting the OS/USR flags.
5174 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
5176 * AJ106 could possibly be worked around by not allowing LBR
5177 * usage from PEBS, including the fixup.
5178 * AJ68 could possibly be worked around by always programming
5179 * a pebs_event_reset[0] value and coping with the lost events.
5181 * But taken together it might just make sense to not enable PEBS on
5184 pr_warn("PEBS disabled due to CPU errata\n");
5186 x86_pmu.pebs_constraints = NULL;
5189 static const struct x86_cpu_desc isolation_ucodes[] = {
5190 INTEL_CPU_DESC(INTEL_FAM6_HASWELL, 3, 0x0000001f),
5191 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_L, 1, 0x0000001e),
5192 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_G, 1, 0x00000015),
5193 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037),
5194 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a),
5195 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL, 4, 0x00000023),
5196 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_G, 1, 0x00000014),
5197 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 2, 0x00000010),
5198 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009),
5199 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009),
5200 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002),
5201 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 1, 0x0b000014),
5202 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021),
5203 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000),
5204 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000),
5205 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 6, 0x00000000),
5206 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 7, 0x00000000),
5207 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 11, 0x00000000),
5208 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c),
5209 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c),
5210 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e),
5211 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 9, 0x0000004e),
5212 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 10, 0x0000004e),
5213 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 11, 0x0000004e),
5214 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 12, 0x0000004e),
5215 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 10, 0x0000004e),
5216 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 11, 0x0000004e),
5217 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 12, 0x0000004e),
5218 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 13, 0x0000004e),
5222 static void intel_check_pebs_isolation(void)
5224 x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes);
5227 static __init void intel_pebs_isolation_quirk(void)
5229 WARN_ON_ONCE(x86_pmu.check_microcode);
5230 x86_pmu.check_microcode = intel_check_pebs_isolation;
5231 intel_check_pebs_isolation();
5234 static const struct x86_cpu_desc pebs_ucodes[] = {
5235 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE, 7, 0x00000028),
5236 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 6, 0x00000618),
5237 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 7, 0x0000070c),
5241 static bool intel_snb_pebs_broken(void)
5243 return !x86_cpu_has_min_microcode_rev(pebs_ucodes);
5246 static void intel_snb_check_microcode(void)
5248 if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
5252 * Serialized by the microcode lock..
5254 if (x86_pmu.pebs_broken) {
5255 pr_info("PEBS enabled due to microcode update\n");
5256 x86_pmu.pebs_broken = 0;
5258 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
5259 x86_pmu.pebs_broken = 1;
5263 static bool is_lbr_from(unsigned long msr)
5265 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
5267 return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
5271 * Under certain circumstances, access certain MSR may cause #GP.
5272 * The function tests if the input MSR can be safely accessed.
5274 static bool check_msr(unsigned long msr, u64 mask)
5276 u64 val_old, val_new, val_tmp;
5279 * Disable the check for real HW, so we don't
5280 * mess with potentially enabled registers:
5282 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
5286 * Read the current value, change it and read it back to see if it
5287 * matches, this is needed to detect certain hardware emulators
5288 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
5290 if (rdmsrl_safe(msr, &val_old))
5294 * Only change the bits which can be updated by wrmsrl.
5296 val_tmp = val_old ^ mask;
5298 if (is_lbr_from(msr))
5299 val_tmp = lbr_from_signext_quirk_wr(val_tmp);
5301 if (wrmsrl_safe(msr, val_tmp) ||
5302 rdmsrl_safe(msr, &val_new))
5306 * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
5307 * should equal rdmsrl()'s even with the quirk.
5309 if (val_new != val_tmp)
5312 if (is_lbr_from(msr))
5313 val_old = lbr_from_signext_quirk_wr(val_old);
5315 /* Here it's sure that the MSR can be safely accessed.
5316 * Restore the old value and return.
5318 wrmsrl(msr, val_old);
5323 static __init void intel_sandybridge_quirk(void)
5325 x86_pmu.check_microcode = intel_snb_check_microcode;
5327 intel_snb_check_microcode();
5331 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
5332 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
5333 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
5334 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
5335 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
5336 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
5337 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
5338 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
5341 static __init void intel_arch_events_quirk(void)
5345 /* disable event that reported as not present by cpuid */
5346 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
5347 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
5348 pr_warn("CPUID marked event: \'%s\' unavailable\n",
5349 intel_arch_events_map[bit].name);
5353 static __init void intel_nehalem_quirk(void)
5355 union cpuid10_ebx ebx;
5357 ebx.full = x86_pmu.events_maskl;
5358 if (ebx.split.no_branch_misses_retired) {
5360 * Erratum AAJ80 detected, we work it around by using
5361 * the BR_MISP_EXEC.ANY event. This will over-count
5362 * branch-misses, but it's still much better than the
5363 * architectural event which is often completely bogus:
5365 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
5366 ebx.split.no_branch_misses_retired = 0;
5367 x86_pmu.events_maskl = ebx.full;
5368 pr_info("CPU erratum AAJ80 worked around\n");
5373 * enable software workaround for errata:
5378 * Only needed when HT is enabled. However detecting
5379 * if HT is enabled is difficult (model specific). So instead,
5380 * we enable the workaround in the early boot, and verify if
5381 * it is needed in a later initcall phase once we have valid
5382 * topology information to check if HT is actually enabled
5384 static __init void intel_ht_bug(void)
5386 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
5388 x86_pmu.start_scheduling = intel_start_scheduling;
5389 x86_pmu.commit_scheduling = intel_commit_scheduling;
5390 x86_pmu.stop_scheduling = intel_stop_scheduling;
5393 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
5394 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
5396 /* Haswell special events */
5397 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
5398 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
5399 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
5400 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
5401 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
5402 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
5403 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
5404 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
5405 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
5406 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
5407 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
5408 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
5410 static struct attribute *hsw_events_attrs[] = {
5411 EVENT_PTR(td_slots_issued),
5412 EVENT_PTR(td_slots_retired),
5413 EVENT_PTR(td_fetch_bubbles),
5414 EVENT_PTR(td_total_slots),
5415 EVENT_PTR(td_total_slots_scale),
5416 EVENT_PTR(td_recovery_bubbles),
5417 EVENT_PTR(td_recovery_bubbles_scale),
5421 static struct attribute *hsw_mem_events_attrs[] = {
5422 EVENT_PTR(mem_ld_hsw),
5423 EVENT_PTR(mem_st_hsw),
5427 static struct attribute *hsw_tsx_events_attrs[] = {
5428 EVENT_PTR(tx_start),
5429 EVENT_PTR(tx_commit),
5430 EVENT_PTR(tx_abort),
5431 EVENT_PTR(tx_capacity),
5432 EVENT_PTR(tx_conflict),
5433 EVENT_PTR(el_start),
5434 EVENT_PTR(el_commit),
5435 EVENT_PTR(el_abort),
5436 EVENT_PTR(el_capacity),
5437 EVENT_PTR(el_conflict),
5438 EVENT_PTR(cycles_t),
5439 EVENT_PTR(cycles_ct),
5443 EVENT_ATTR_STR(tx-capacity-read, tx_capacity_read, "event=0x54,umask=0x80");
5444 EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2");
5445 EVENT_ATTR_STR(el-capacity-read, el_capacity_read, "event=0x54,umask=0x80");
5446 EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2");
5448 static struct attribute *icl_events_attrs[] = {
5449 EVENT_PTR(mem_ld_hsw),
5450 EVENT_PTR(mem_st_hsw),
5454 static struct attribute *icl_td_events_attrs[] = {
5456 EVENT_PTR(td_retiring),
5457 EVENT_PTR(td_bad_spec),
5458 EVENT_PTR(td_fe_bound),
5459 EVENT_PTR(td_be_bound),
5463 static struct attribute *icl_tsx_events_attrs[] = {
5464 EVENT_PTR(tx_start),
5465 EVENT_PTR(tx_abort),
5466 EVENT_PTR(tx_commit),
5467 EVENT_PTR(tx_capacity_read),
5468 EVENT_PTR(tx_capacity_write),
5469 EVENT_PTR(tx_conflict),
5470 EVENT_PTR(el_start),
5471 EVENT_PTR(el_abort),
5472 EVENT_PTR(el_commit),
5473 EVENT_PTR(el_capacity_read),
5474 EVENT_PTR(el_capacity_write),
5475 EVENT_PTR(el_conflict),
5476 EVENT_PTR(cycles_t),
5477 EVENT_PTR(cycles_ct),
5482 EVENT_ATTR_STR(mem-stores, mem_st_spr, "event=0xcd,umask=0x2");
5483 EVENT_ATTR_STR(mem-loads-aux, mem_ld_aux, "event=0x03,umask=0x82");
5485 static struct attribute *glc_events_attrs[] = {
5486 EVENT_PTR(mem_ld_hsw),
5487 EVENT_PTR(mem_st_spr),
5488 EVENT_PTR(mem_ld_aux),
5492 static struct attribute *glc_td_events_attrs[] = {
5494 EVENT_PTR(td_retiring),
5495 EVENT_PTR(td_bad_spec),
5496 EVENT_PTR(td_fe_bound),
5497 EVENT_PTR(td_be_bound),
5498 EVENT_PTR(td_heavy_ops),
5499 EVENT_PTR(td_br_mispredict),
5500 EVENT_PTR(td_fetch_lat),
5501 EVENT_PTR(td_mem_bound),
5505 static struct attribute *glc_tsx_events_attrs[] = {
5506 EVENT_PTR(tx_start),
5507 EVENT_PTR(tx_abort),
5508 EVENT_PTR(tx_commit),
5509 EVENT_PTR(tx_capacity_read),
5510 EVENT_PTR(tx_capacity_write),
5511 EVENT_PTR(tx_conflict),
5512 EVENT_PTR(cycles_t),
5513 EVENT_PTR(cycles_ct),
5517 static ssize_t freeze_on_smi_show(struct device *cdev,
5518 struct device_attribute *attr,
5521 return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
5524 static DEFINE_MUTEX(freeze_on_smi_mutex);
5526 static ssize_t freeze_on_smi_store(struct device *cdev,
5527 struct device_attribute *attr,
5528 const char *buf, size_t count)
5533 ret = kstrtoul(buf, 0, &val);
5540 mutex_lock(&freeze_on_smi_mutex);
5542 if (x86_pmu.attr_freeze_on_smi == val)
5545 x86_pmu.attr_freeze_on_smi = val;
5548 on_each_cpu(flip_smm_bit, &val, 1);
5551 mutex_unlock(&freeze_on_smi_mutex);
5556 static void update_tfa_sched(void *ignored)
5558 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
5561 * check if PMC3 is used
5562 * and if so force schedule out for all event types all contexts
5564 if (test_bit(3, cpuc->active_mask))
5565 perf_pmu_resched(x86_get_pmu(smp_processor_id()));
5568 static ssize_t show_sysctl_tfa(struct device *cdev,
5569 struct device_attribute *attr,
5572 return snprintf(buf, 40, "%d\n", allow_tsx_force_abort);
5575 static ssize_t set_sysctl_tfa(struct device *cdev,
5576 struct device_attribute *attr,
5577 const char *buf, size_t count)
5582 ret = kstrtobool(buf, &val);
5587 if (val == allow_tsx_force_abort)
5590 allow_tsx_force_abort = val;
5593 on_each_cpu(update_tfa_sched, NULL, 1);
5600 static DEVICE_ATTR_RW(freeze_on_smi);
5602 static ssize_t branches_show(struct device *cdev,
5603 struct device_attribute *attr,
5606 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
5609 static DEVICE_ATTR_RO(branches);
5611 static ssize_t branch_counter_nr_show(struct device *cdev,
5612 struct device_attribute *attr,
5615 return snprintf(buf, PAGE_SIZE, "%d\n", fls(x86_pmu.lbr_counters));
5618 static DEVICE_ATTR_RO(branch_counter_nr);
5620 static ssize_t branch_counter_width_show(struct device *cdev,
5621 struct device_attribute *attr,
5624 return snprintf(buf, PAGE_SIZE, "%d\n", LBR_INFO_BR_CNTR_BITS);
5627 static DEVICE_ATTR_RO(branch_counter_width);
5629 static struct attribute *lbr_attrs[] = {
5630 &dev_attr_branches.attr,
5631 &dev_attr_branch_counter_nr.attr,
5632 &dev_attr_branch_counter_width.attr,
5637 lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5641 return x86_pmu.lbr_nr ? attr->mode : 0;
5643 return (x86_pmu.flags & PMU_FL_BR_CNTR) ? attr->mode : 0;
5646 static char pmu_name_str[30];
5648 static ssize_t pmu_name_show(struct device *cdev,
5649 struct device_attribute *attr,
5652 return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str);
5655 static DEVICE_ATTR_RO(pmu_name);
5657 static struct attribute *intel_pmu_caps_attrs[] = {
5658 &dev_attr_pmu_name.attr,
5662 static DEVICE_ATTR(allow_tsx_force_abort, 0644,
5666 static struct attribute *intel_pmu_attrs[] = {
5667 &dev_attr_freeze_on_smi.attr,
5668 &dev_attr_allow_tsx_force_abort.attr,
5673 default_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5675 if (attr == &dev_attr_allow_tsx_force_abort.attr)
5676 return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0;
5682 tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5684 return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0;
5688 pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5690 return x86_pmu.pebs ? attr->mode : 0;
5694 mem_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5696 if (attr == &event_attr_mem_ld_aux.attr.attr)
5697 return x86_pmu.flags & PMU_FL_MEM_LOADS_AUX ? attr->mode : 0;
5699 return pebs_is_visible(kobj, attr, i);
5703 exra_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5705 return x86_pmu.version >= 2 ? attr->mode : 0;
5708 static struct attribute_group group_events_td = {
5712 static struct attribute_group group_events_mem = {
5714 .is_visible = mem_is_visible,
5717 static struct attribute_group group_events_tsx = {
5719 .is_visible = tsx_is_visible,
5722 static struct attribute_group group_caps_gen = {
5724 .attrs = intel_pmu_caps_attrs,
5727 static struct attribute_group group_caps_lbr = {
5730 .is_visible = lbr_is_visible,
5733 static struct attribute_group group_format_extra = {
5735 .is_visible = exra_is_visible,
5738 static struct attribute_group group_format_extra_skl = {
5740 .is_visible = exra_is_visible,
5743 static struct attribute_group group_default = {
5744 .attrs = intel_pmu_attrs,
5745 .is_visible = default_is_visible,
5748 static const struct attribute_group *attr_update[] = {
5754 &group_format_extra,
5755 &group_format_extra_skl,
5760 EVENT_ATTR_STR_HYBRID(slots, slots_adl, "event=0x00,umask=0x4", hybrid_big);
5761 EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_adl, "event=0xc2,umask=0x0;event=0x00,umask=0x80", hybrid_big_small);
5762 EVENT_ATTR_STR_HYBRID(topdown-bad-spec, td_bad_spec_adl, "event=0x73,umask=0x0;event=0x00,umask=0x81", hybrid_big_small);
5763 EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_adl, "event=0x71,umask=0x0;event=0x00,umask=0x82", hybrid_big_small);
5764 EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_adl, "event=0x74,umask=0x0;event=0x00,umask=0x83", hybrid_big_small);
5765 EVENT_ATTR_STR_HYBRID(topdown-heavy-ops, td_heavy_ops_adl, "event=0x00,umask=0x84", hybrid_big);
5766 EVENT_ATTR_STR_HYBRID(topdown-br-mispredict, td_br_mis_adl, "event=0x00,umask=0x85", hybrid_big);
5767 EVENT_ATTR_STR_HYBRID(topdown-fetch-lat, td_fetch_lat_adl, "event=0x00,umask=0x86", hybrid_big);
5768 EVENT_ATTR_STR_HYBRID(topdown-mem-bound, td_mem_bound_adl, "event=0x00,umask=0x87", hybrid_big);
5770 static struct attribute *adl_hybrid_events_attrs[] = {
5771 EVENT_PTR(slots_adl),
5772 EVENT_PTR(td_retiring_adl),
5773 EVENT_PTR(td_bad_spec_adl),
5774 EVENT_PTR(td_fe_bound_adl),
5775 EVENT_PTR(td_be_bound_adl),
5776 EVENT_PTR(td_heavy_ops_adl),
5777 EVENT_PTR(td_br_mis_adl),
5778 EVENT_PTR(td_fetch_lat_adl),
5779 EVENT_PTR(td_mem_bound_adl),
5783 /* Must be in IDX order */
5784 EVENT_ATTR_STR_HYBRID(mem-loads, mem_ld_adl, "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small);
5785 EVENT_ATTR_STR_HYBRID(mem-stores, mem_st_adl, "event=0xd0,umask=0x6;event=0xcd,umask=0x2", hybrid_big_small);
5786 EVENT_ATTR_STR_HYBRID(mem-loads-aux, mem_ld_aux_adl, "event=0x03,umask=0x82", hybrid_big);
5788 static struct attribute *adl_hybrid_mem_attrs[] = {
5789 EVENT_PTR(mem_ld_adl),
5790 EVENT_PTR(mem_st_adl),
5791 EVENT_PTR(mem_ld_aux_adl),
5795 static struct attribute *mtl_hybrid_mem_attrs[] = {
5796 EVENT_PTR(mem_ld_adl),
5797 EVENT_PTR(mem_st_adl),
5801 EVENT_ATTR_STR_HYBRID(tx-start, tx_start_adl, "event=0xc9,umask=0x1", hybrid_big);
5802 EVENT_ATTR_STR_HYBRID(tx-commit, tx_commit_adl, "event=0xc9,umask=0x2", hybrid_big);
5803 EVENT_ATTR_STR_HYBRID(tx-abort, tx_abort_adl, "event=0xc9,umask=0x4", hybrid_big);
5804 EVENT_ATTR_STR_HYBRID(tx-conflict, tx_conflict_adl, "event=0x54,umask=0x1", hybrid_big);
5805 EVENT_ATTR_STR_HYBRID(cycles-t, cycles_t_adl, "event=0x3c,in_tx=1", hybrid_big);
5806 EVENT_ATTR_STR_HYBRID(cycles-ct, cycles_ct_adl, "event=0x3c,in_tx=1,in_tx_cp=1", hybrid_big);
5807 EVENT_ATTR_STR_HYBRID(tx-capacity-read, tx_capacity_read_adl, "event=0x54,umask=0x80", hybrid_big);
5808 EVENT_ATTR_STR_HYBRID(tx-capacity-write, tx_capacity_write_adl, "event=0x54,umask=0x2", hybrid_big);
5810 static struct attribute *adl_hybrid_tsx_attrs[] = {
5811 EVENT_PTR(tx_start_adl),
5812 EVENT_PTR(tx_abort_adl),
5813 EVENT_PTR(tx_commit_adl),
5814 EVENT_PTR(tx_capacity_read_adl),
5815 EVENT_PTR(tx_capacity_write_adl),
5816 EVENT_PTR(tx_conflict_adl),
5817 EVENT_PTR(cycles_t_adl),
5818 EVENT_PTR(cycles_ct_adl),
5822 FORMAT_ATTR_HYBRID(in_tx, hybrid_big);
5823 FORMAT_ATTR_HYBRID(in_tx_cp, hybrid_big);
5824 FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small);
5825 FORMAT_ATTR_HYBRID(ldlat, hybrid_big_small);
5826 FORMAT_ATTR_HYBRID(frontend, hybrid_big);
5828 #define ADL_HYBRID_RTM_FORMAT_ATTR \
5829 FORMAT_HYBRID_PTR(in_tx), \
5830 FORMAT_HYBRID_PTR(in_tx_cp)
5832 #define ADL_HYBRID_FORMAT_ATTR \
5833 FORMAT_HYBRID_PTR(offcore_rsp), \
5834 FORMAT_HYBRID_PTR(ldlat), \
5835 FORMAT_HYBRID_PTR(frontend)
5837 static struct attribute *adl_hybrid_extra_attr_rtm[] = {
5838 ADL_HYBRID_RTM_FORMAT_ATTR,
5839 ADL_HYBRID_FORMAT_ATTR,
5843 static struct attribute *adl_hybrid_extra_attr[] = {
5844 ADL_HYBRID_FORMAT_ATTR,
5848 FORMAT_ATTR_HYBRID(snoop_rsp, hybrid_small);
5850 static struct attribute *mtl_hybrid_extra_attr_rtm[] = {
5851 ADL_HYBRID_RTM_FORMAT_ATTR,
5852 ADL_HYBRID_FORMAT_ATTR,
5853 FORMAT_HYBRID_PTR(snoop_rsp),
5857 static struct attribute *mtl_hybrid_extra_attr[] = {
5858 ADL_HYBRID_FORMAT_ATTR,
5859 FORMAT_HYBRID_PTR(snoop_rsp),
5863 static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr)
5865 struct device *dev = kobj_to_dev(kobj);
5866 struct x86_hybrid_pmu *pmu =
5867 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5868 struct perf_pmu_events_hybrid_attr *pmu_attr =
5869 container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr);
5871 return pmu->pmu_type & pmu_attr->pmu_type;
5874 static umode_t hybrid_events_is_visible(struct kobject *kobj,
5875 struct attribute *attr, int i)
5877 return is_attr_for_this_pmu(kobj, attr) ? attr->mode : 0;
5880 static inline int hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu)
5882 int cpu = cpumask_first(&pmu->supported_cpus);
5884 return (cpu >= nr_cpu_ids) ? -1 : cpu;
5887 static umode_t hybrid_tsx_is_visible(struct kobject *kobj,
5888 struct attribute *attr, int i)
5890 struct device *dev = kobj_to_dev(kobj);
5891 struct x86_hybrid_pmu *pmu =
5892 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5893 int cpu = hybrid_find_supported_cpu(pmu);
5895 return (cpu >= 0) && is_attr_for_this_pmu(kobj, attr) && cpu_has(&cpu_data(cpu), X86_FEATURE_RTM) ? attr->mode : 0;
5898 static umode_t hybrid_format_is_visible(struct kobject *kobj,
5899 struct attribute *attr, int i)
5901 struct device *dev = kobj_to_dev(kobj);
5902 struct x86_hybrid_pmu *pmu =
5903 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5904 struct perf_pmu_format_hybrid_attr *pmu_attr =
5905 container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr);
5906 int cpu = hybrid_find_supported_cpu(pmu);
5908 return (cpu >= 0) && (pmu->pmu_type & pmu_attr->pmu_type) ? attr->mode : 0;
5911 static struct attribute_group hybrid_group_events_td = {
5913 .is_visible = hybrid_events_is_visible,
5916 static struct attribute_group hybrid_group_events_mem = {
5918 .is_visible = hybrid_events_is_visible,
5921 static struct attribute_group hybrid_group_events_tsx = {
5923 .is_visible = hybrid_tsx_is_visible,
5926 static struct attribute_group hybrid_group_format_extra = {
5928 .is_visible = hybrid_format_is_visible,
5931 static ssize_t intel_hybrid_get_attr_cpus(struct device *dev,
5932 struct device_attribute *attr,
5935 struct x86_hybrid_pmu *pmu =
5936 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5938 return cpumap_print_to_pagebuf(true, buf, &pmu->supported_cpus);
5941 static DEVICE_ATTR(cpus, S_IRUGO, intel_hybrid_get_attr_cpus, NULL);
5942 static struct attribute *intel_hybrid_cpus_attrs[] = {
5943 &dev_attr_cpus.attr,
5947 static struct attribute_group hybrid_group_cpus = {
5948 .attrs = intel_hybrid_cpus_attrs,
5951 static const struct attribute_group *hybrid_attr_update[] = {
5952 &hybrid_group_events_td,
5953 &hybrid_group_events_mem,
5954 &hybrid_group_events_tsx,
5957 &hybrid_group_format_extra,
5963 static struct attribute *empty_attrs;
5965 static void intel_pmu_check_num_counters(int *num_counters,
5966 int *num_counters_fixed,
5967 u64 *intel_ctrl, u64 fixed_mask)
5969 if (*num_counters > INTEL_PMC_MAX_GENERIC) {
5970 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
5971 *num_counters, INTEL_PMC_MAX_GENERIC);
5972 *num_counters = INTEL_PMC_MAX_GENERIC;
5974 *intel_ctrl = (1ULL << *num_counters) - 1;
5976 if (*num_counters_fixed > INTEL_PMC_MAX_FIXED) {
5977 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
5978 *num_counters_fixed, INTEL_PMC_MAX_FIXED);
5979 *num_counters_fixed = INTEL_PMC_MAX_FIXED;
5982 *intel_ctrl |= fixed_mask << INTEL_PMC_IDX_FIXED;
5985 static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
5987 int num_counters_fixed,
5990 struct event_constraint *c;
5992 if (!event_constraints)
5996 * event on fixed counter2 (REF_CYCLES) only works on this
5997 * counter, so do not extend mask to generic counters
5999 for_each_event_constraint(c, event_constraints) {
6001 * Don't extend the topdown slots and metrics
6002 * events to the generic counters.
6004 if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
6006 * Disable topdown slots and metrics events,
6007 * if slots event is not in CPUID.
6009 if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl))
6011 c->weight = hweight64(c->idxmsk64);
6015 if (c->cmask == FIXED_EVENT_FLAGS) {
6016 /* Disabled fixed counters which are not in CPUID */
6017 c->idxmsk64 &= intel_ctrl;
6020 * Don't extend the pseudo-encoding to the
6023 if (!use_fixed_pseudo_encoding(c->code))
6024 c->idxmsk64 |= (1ULL << num_counters) - 1;
6027 ~(~0ULL << (INTEL_PMC_IDX_FIXED + num_counters_fixed));
6028 c->weight = hweight64(c->idxmsk64);
6032 static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs)
6034 struct extra_reg *er;
6037 * Access extra MSR may cause #GP under certain circumstances.
6038 * E.g. KVM doesn't support offcore event
6039 * Check all extra_regs here.
6044 for (er = extra_regs; er->msr; er++) {
6045 er->extra_msr_access = check_msr(er->msr, 0x11UL);
6046 /* Disable LBR select mapping */
6047 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
6048 x86_pmu.lbr_sel_map = NULL;
6052 static const struct { enum hybrid_pmu_type id; char *name; } intel_hybrid_pmu_type_map[] __initconst = {
6053 { hybrid_small, "cpu_atom" },
6054 { hybrid_big, "cpu_core" },
6057 static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)
6059 unsigned long pmus_mask = pmus;
6060 struct x86_hybrid_pmu *pmu;
6063 x86_pmu.num_hybrid_pmus = hweight_long(pmus_mask);
6064 x86_pmu.hybrid_pmu = kcalloc(x86_pmu.num_hybrid_pmus,
6065 sizeof(struct x86_hybrid_pmu),
6067 if (!x86_pmu.hybrid_pmu)
6070 static_branch_enable(&perf_is_hybrid);
6071 x86_pmu.filter = intel_pmu_filter;
6073 for_each_set_bit(bit, &pmus_mask, ARRAY_SIZE(intel_hybrid_pmu_type_map)) {
6074 pmu = &x86_pmu.hybrid_pmu[idx++];
6075 pmu->pmu_type = intel_hybrid_pmu_type_map[bit].id;
6076 pmu->name = intel_hybrid_pmu_type_map[bit].name;
6078 pmu->num_counters = x86_pmu.num_counters;
6079 pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
6080 pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
6081 pmu->unconstrained = (struct event_constraint)
6082 __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
6083 0, pmu->num_counters, 0, 0);
6085 pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
6086 if (pmu->pmu_type & hybrid_small) {
6087 pmu->intel_cap.perf_metrics = 0;
6088 pmu->intel_cap.pebs_output_pt_available = 1;
6089 pmu->mid_ack = true;
6090 } else if (pmu->pmu_type & hybrid_big) {
6091 pmu->intel_cap.perf_metrics = 1;
6092 pmu->intel_cap.pebs_output_pt_available = 0;
6093 pmu->late_ack = true;
6100 static __always_inline void intel_pmu_ref_cycles_ext(void)
6102 if (!(x86_pmu.events_maskl & (INTEL_PMC_MSK_FIXED_REF_CYCLES >> INTEL_PMC_IDX_FIXED)))
6103 intel_perfmon_event_map[PERF_COUNT_HW_REF_CPU_CYCLES] = 0x013c;
6106 static __always_inline void intel_pmu_init_glc(struct pmu *pmu)
6108 x86_pmu.late_ack = true;
6109 x86_pmu.limit_period = glc_limit_period;
6110 x86_pmu.pebs_aliases = NULL;
6111 x86_pmu.pebs_prec_dist = true;
6112 x86_pmu.pebs_block = true;
6113 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6114 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6115 x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6116 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6117 x86_pmu.lbr_pt_coexist = true;
6118 x86_pmu.num_topdown_events = 8;
6119 static_call_update(intel_pmu_update_topdown_event,
6120 &icl_update_topdown_event);
6121 static_call_update(intel_pmu_set_topdown_event_period,
6122 &icl_set_topdown_event_period);
6124 memcpy(hybrid_var(pmu, hw_cache_event_ids), glc_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6125 memcpy(hybrid_var(pmu, hw_cache_extra_regs), glc_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6126 hybrid(pmu, event_constraints) = intel_glc_event_constraints;
6127 hybrid(pmu, pebs_constraints) = intel_glc_pebs_event_constraints;
6129 intel_pmu_ref_cycles_ext();
6132 static __always_inline void intel_pmu_init_grt(struct pmu *pmu)
6134 x86_pmu.mid_ack = true;
6135 x86_pmu.limit_period = glc_limit_period;
6136 x86_pmu.pebs_aliases = NULL;
6137 x86_pmu.pebs_prec_dist = true;
6138 x86_pmu.pebs_block = true;
6139 x86_pmu.lbr_pt_coexist = true;
6140 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6141 x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6143 memcpy(hybrid_var(pmu, hw_cache_event_ids), glp_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6144 memcpy(hybrid_var(pmu, hw_cache_extra_regs), tnt_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6145 hybrid_var(pmu, hw_cache_event_ids)[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6146 hybrid(pmu, event_constraints) = intel_grt_event_constraints;
6147 hybrid(pmu, pebs_constraints) = intel_grt_pebs_event_constraints;
6148 hybrid(pmu, extra_regs) = intel_grt_extra_regs;
6150 intel_pmu_ref_cycles_ext();
6153 __init int intel_pmu_init(void)
6155 struct attribute **extra_skl_attr = &empty_attrs;
6156 struct attribute **extra_attr = &empty_attrs;
6157 struct attribute **td_attr = &empty_attrs;
6158 struct attribute **mem_attr = &empty_attrs;
6159 struct attribute **tsx_attr = &empty_attrs;
6160 union cpuid10_edx edx;
6161 union cpuid10_eax eax;
6162 union cpuid10_ebx ebx;
6163 unsigned int fixed_mask;
6167 struct x86_hybrid_pmu *pmu;
6169 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
6170 switch (boot_cpu_data.x86) {
6172 return p6_pmu_init();
6174 return knc_pmu_init();
6176 return p4_pmu_init();
6182 * Check whether the Architectural PerfMon supports
6183 * Branch Misses Retired hw_event or not.
6185 cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full);
6186 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
6189 version = eax.split.version_id;
6193 x86_pmu = intel_pmu;
6195 x86_pmu.version = version;
6196 x86_pmu.num_counters = eax.split.num_counters;
6197 x86_pmu.cntval_bits = eax.split.bit_width;
6198 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
6200 x86_pmu.events_maskl = ebx.full;
6201 x86_pmu.events_mask_len = eax.split.mask_length;
6203 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
6204 x86_pmu.pebs_capable = PEBS_COUNTER_MASK;
6207 * Quirk: v2 perfmon does not report fixed-purpose events, so
6208 * assume at least 3 events, when not running in a hypervisor:
6210 if (version > 1 && version < 5) {
6211 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
6213 x86_pmu.num_counters_fixed =
6214 max((int)edx.split.num_counters_fixed, assume);
6216 fixed_mask = (1L << x86_pmu.num_counters_fixed) - 1;
6217 } else if (version >= 5)
6218 x86_pmu.num_counters_fixed = fls(fixed_mask);
6220 if (boot_cpu_has(X86_FEATURE_PDCM)) {
6223 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
6224 x86_pmu.intel_cap.capabilities = capabilities;
6227 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) {
6228 x86_pmu.lbr_reset = intel_pmu_lbr_reset_32;
6229 x86_pmu.lbr_read = intel_pmu_lbr_read_32;
6232 if (boot_cpu_has(X86_FEATURE_ARCH_LBR))
6233 intel_pmu_arch_lbr_init();
6237 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
6240 x86_pmu.intel_cap.anythread_deprecated = edx.split.anythread_deprecated;
6241 if (x86_pmu.intel_cap.anythread_deprecated)
6242 pr_cont(" AnyThread deprecated, ");
6246 * Install the hw-cache-events table:
6248 switch (boot_cpu_data.x86_model) {
6249 case INTEL_FAM6_CORE_YONAH:
6250 pr_cont("Core events, ");
6254 case INTEL_FAM6_CORE2_MEROM:
6255 x86_add_quirk(intel_clovertown_quirk);
6258 case INTEL_FAM6_CORE2_MEROM_L:
6259 case INTEL_FAM6_CORE2_PENRYN:
6260 case INTEL_FAM6_CORE2_DUNNINGTON:
6261 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
6262 sizeof(hw_cache_event_ids));
6264 intel_pmu_lbr_init_core();
6266 x86_pmu.event_constraints = intel_core2_event_constraints;
6267 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
6268 pr_cont("Core2 events, ");
6272 case INTEL_FAM6_NEHALEM:
6273 case INTEL_FAM6_NEHALEM_EP:
6274 case INTEL_FAM6_NEHALEM_EX:
6275 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
6276 sizeof(hw_cache_event_ids));
6277 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
6278 sizeof(hw_cache_extra_regs));
6280 intel_pmu_lbr_init_nhm();
6282 x86_pmu.event_constraints = intel_nehalem_event_constraints;
6283 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
6284 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
6285 x86_pmu.extra_regs = intel_nehalem_extra_regs;
6286 x86_pmu.limit_period = nhm_limit_period;
6288 mem_attr = nhm_mem_events_attrs;
6290 /* UOPS_ISSUED.STALLED_CYCLES */
6291 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6292 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6293 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
6294 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
6295 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
6297 intel_pmu_pebs_data_source_nhm();
6298 x86_add_quirk(intel_nehalem_quirk);
6299 x86_pmu.pebs_no_tlb = 1;
6300 extra_attr = nhm_format_attr;
6302 pr_cont("Nehalem events, ");
6306 case INTEL_FAM6_ATOM_BONNELL:
6307 case INTEL_FAM6_ATOM_BONNELL_MID:
6308 case INTEL_FAM6_ATOM_SALTWELL:
6309 case INTEL_FAM6_ATOM_SALTWELL_MID:
6310 case INTEL_FAM6_ATOM_SALTWELL_TABLET:
6311 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
6312 sizeof(hw_cache_event_ids));
6314 intel_pmu_lbr_init_atom();
6316 x86_pmu.event_constraints = intel_gen_event_constraints;
6317 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
6318 x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
6319 pr_cont("Atom events, ");
6323 case INTEL_FAM6_ATOM_SILVERMONT:
6324 case INTEL_FAM6_ATOM_SILVERMONT_D:
6325 case INTEL_FAM6_ATOM_SILVERMONT_MID:
6326 case INTEL_FAM6_ATOM_AIRMONT:
6327 case INTEL_FAM6_ATOM_AIRMONT_MID:
6328 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
6329 sizeof(hw_cache_event_ids));
6330 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
6331 sizeof(hw_cache_extra_regs));
6333 intel_pmu_lbr_init_slm();
6335 x86_pmu.event_constraints = intel_slm_event_constraints;
6336 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
6337 x86_pmu.extra_regs = intel_slm_extra_regs;
6338 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6339 td_attr = slm_events_attrs;
6340 extra_attr = slm_format_attr;
6341 pr_cont("Silvermont events, ");
6342 name = "silvermont";
6345 case INTEL_FAM6_ATOM_GOLDMONT:
6346 case INTEL_FAM6_ATOM_GOLDMONT_D:
6347 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
6348 sizeof(hw_cache_event_ids));
6349 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
6350 sizeof(hw_cache_extra_regs));
6352 intel_pmu_lbr_init_skl();
6354 x86_pmu.event_constraints = intel_slm_event_constraints;
6355 x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
6356 x86_pmu.extra_regs = intel_glm_extra_regs;
6358 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
6359 * for precise cycles.
6360 * :pp is identical to :ppp
6362 x86_pmu.pebs_aliases = NULL;
6363 x86_pmu.pebs_prec_dist = true;
6364 x86_pmu.lbr_pt_coexist = true;
6365 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6366 td_attr = glm_events_attrs;
6367 extra_attr = slm_format_attr;
6368 pr_cont("Goldmont events, ");
6372 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
6373 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
6374 sizeof(hw_cache_event_ids));
6375 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
6376 sizeof(hw_cache_extra_regs));
6378 intel_pmu_lbr_init_skl();
6380 x86_pmu.event_constraints = intel_slm_event_constraints;
6381 x86_pmu.extra_regs = intel_glm_extra_regs;
6383 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
6384 * for precise cycles.
6386 x86_pmu.pebs_aliases = NULL;
6387 x86_pmu.pebs_prec_dist = true;
6388 x86_pmu.lbr_pt_coexist = true;
6389 x86_pmu.pebs_capable = ~0ULL;
6390 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6391 x86_pmu.flags |= PMU_FL_PEBS_ALL;
6392 x86_pmu.get_event_constraints = glp_get_event_constraints;
6393 td_attr = glm_events_attrs;
6394 /* Goldmont Plus has 4-wide pipeline */
6395 event_attr_td_total_slots_scale_glm.event_str = "4";
6396 extra_attr = slm_format_attr;
6397 pr_cont("Goldmont plus events, ");
6398 name = "goldmont_plus";
6401 case INTEL_FAM6_ATOM_TREMONT_D:
6402 case INTEL_FAM6_ATOM_TREMONT:
6403 case INTEL_FAM6_ATOM_TREMONT_L:
6404 x86_pmu.late_ack = true;
6405 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
6406 sizeof(hw_cache_event_ids));
6407 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
6408 sizeof(hw_cache_extra_regs));
6409 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6411 intel_pmu_lbr_init_skl();
6413 x86_pmu.event_constraints = intel_slm_event_constraints;
6414 x86_pmu.extra_regs = intel_tnt_extra_regs;
6416 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
6417 * for precise cycles.
6419 x86_pmu.pebs_aliases = NULL;
6420 x86_pmu.pebs_prec_dist = true;
6421 x86_pmu.lbr_pt_coexist = true;
6422 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6423 x86_pmu.get_event_constraints = tnt_get_event_constraints;
6424 td_attr = tnt_events_attrs;
6425 extra_attr = slm_format_attr;
6426 pr_cont("Tremont events, ");
6430 case INTEL_FAM6_ATOM_GRACEMONT:
6431 intel_pmu_init_grt(NULL);
6432 intel_pmu_pebs_data_source_grt();
6433 x86_pmu.pebs_latency_data = adl_latency_data_small;
6434 x86_pmu.get_event_constraints = tnt_get_event_constraints;
6435 td_attr = tnt_events_attrs;
6436 mem_attr = grt_mem_attrs;
6437 extra_attr = nhm_format_attr;
6438 pr_cont("Gracemont events, ");
6442 case INTEL_FAM6_ATOM_CRESTMONT:
6443 case INTEL_FAM6_ATOM_CRESTMONT_X:
6444 intel_pmu_init_grt(NULL);
6445 x86_pmu.extra_regs = intel_cmt_extra_regs;
6446 intel_pmu_pebs_data_source_cmt();
6447 x86_pmu.pebs_latency_data = mtl_latency_data_small;
6448 x86_pmu.get_event_constraints = cmt_get_event_constraints;
6449 td_attr = cmt_events_attrs;
6450 mem_attr = grt_mem_attrs;
6451 extra_attr = cmt_format_attr;
6452 pr_cont("Crestmont events, ");
6456 case INTEL_FAM6_WESTMERE:
6457 case INTEL_FAM6_WESTMERE_EP:
6458 case INTEL_FAM6_WESTMERE_EX:
6459 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
6460 sizeof(hw_cache_event_ids));
6461 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
6462 sizeof(hw_cache_extra_regs));
6464 intel_pmu_lbr_init_nhm();
6466 x86_pmu.event_constraints = intel_westmere_event_constraints;
6467 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
6468 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
6469 x86_pmu.extra_regs = intel_westmere_extra_regs;
6470 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6472 mem_attr = nhm_mem_events_attrs;
6474 /* UOPS_ISSUED.STALLED_CYCLES */
6475 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6476 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6477 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
6478 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
6479 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
6481 intel_pmu_pebs_data_source_nhm();
6482 extra_attr = nhm_format_attr;
6483 pr_cont("Westmere events, ");
6487 case INTEL_FAM6_SANDYBRIDGE:
6488 case INTEL_FAM6_SANDYBRIDGE_X:
6489 x86_add_quirk(intel_sandybridge_quirk);
6490 x86_add_quirk(intel_ht_bug);
6491 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
6492 sizeof(hw_cache_event_ids));
6493 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
6494 sizeof(hw_cache_extra_regs));
6496 intel_pmu_lbr_init_snb();
6498 x86_pmu.event_constraints = intel_snb_event_constraints;
6499 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
6500 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
6501 if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
6502 x86_pmu.extra_regs = intel_snbep_extra_regs;
6504 x86_pmu.extra_regs = intel_snb_extra_regs;
6507 /* all extra regs are per-cpu when HT is on */
6508 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6509 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6511 td_attr = snb_events_attrs;
6512 mem_attr = snb_mem_events_attrs;
6514 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
6515 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6516 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6517 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
6518 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
6519 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
6521 extra_attr = nhm_format_attr;
6523 pr_cont("SandyBridge events, ");
6524 name = "sandybridge";
6527 case INTEL_FAM6_IVYBRIDGE:
6528 case INTEL_FAM6_IVYBRIDGE_X:
6529 x86_add_quirk(intel_ht_bug);
6530 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
6531 sizeof(hw_cache_event_ids));
6532 /* dTLB-load-misses on IVB is different than SNB */
6533 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
6535 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
6536 sizeof(hw_cache_extra_regs));
6538 intel_pmu_lbr_init_snb();
6540 x86_pmu.event_constraints = intel_ivb_event_constraints;
6541 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
6542 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
6543 x86_pmu.pebs_prec_dist = true;
6544 if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
6545 x86_pmu.extra_regs = intel_snbep_extra_regs;
6547 x86_pmu.extra_regs = intel_snb_extra_regs;
6548 /* all extra regs are per-cpu when HT is on */
6549 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6550 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6552 td_attr = snb_events_attrs;
6553 mem_attr = snb_mem_events_attrs;
6555 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
6556 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6557 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6559 extra_attr = nhm_format_attr;
6561 pr_cont("IvyBridge events, ");
6566 case INTEL_FAM6_HASWELL:
6567 case INTEL_FAM6_HASWELL_X:
6568 case INTEL_FAM6_HASWELL_L:
6569 case INTEL_FAM6_HASWELL_G:
6570 x86_add_quirk(intel_ht_bug);
6571 x86_add_quirk(intel_pebs_isolation_quirk);
6572 x86_pmu.late_ack = true;
6573 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6574 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6576 intel_pmu_lbr_init_hsw();
6578 x86_pmu.event_constraints = intel_hsw_event_constraints;
6579 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
6580 x86_pmu.extra_regs = intel_snbep_extra_regs;
6581 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
6582 x86_pmu.pebs_prec_dist = true;
6583 /* all extra regs are per-cpu when HT is on */
6584 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6585 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6587 x86_pmu.hw_config = hsw_hw_config;
6588 x86_pmu.get_event_constraints = hsw_get_event_constraints;
6589 x86_pmu.lbr_double_abort = true;
6590 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6591 hsw_format_attr : nhm_format_attr;
6592 td_attr = hsw_events_attrs;
6593 mem_attr = hsw_mem_events_attrs;
6594 tsx_attr = hsw_tsx_events_attrs;
6595 pr_cont("Haswell events, ");
6599 case INTEL_FAM6_BROADWELL:
6600 case INTEL_FAM6_BROADWELL_D:
6601 case INTEL_FAM6_BROADWELL_G:
6602 case INTEL_FAM6_BROADWELL_X:
6603 x86_add_quirk(intel_pebs_isolation_quirk);
6604 x86_pmu.late_ack = true;
6605 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6606 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6608 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
6609 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
6610 BDW_L3_MISS|HSW_SNOOP_DRAM;
6611 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
6613 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
6614 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
6615 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
6616 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
6618 intel_pmu_lbr_init_hsw();
6620 x86_pmu.event_constraints = intel_bdw_event_constraints;
6621 x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
6622 x86_pmu.extra_regs = intel_snbep_extra_regs;
6623 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
6624 x86_pmu.pebs_prec_dist = true;
6625 /* all extra regs are per-cpu when HT is on */
6626 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6627 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6629 x86_pmu.hw_config = hsw_hw_config;
6630 x86_pmu.get_event_constraints = hsw_get_event_constraints;
6631 x86_pmu.limit_period = bdw_limit_period;
6632 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6633 hsw_format_attr : nhm_format_attr;
6634 td_attr = hsw_events_attrs;
6635 mem_attr = hsw_mem_events_attrs;
6636 tsx_attr = hsw_tsx_events_attrs;
6637 pr_cont("Broadwell events, ");
6641 case INTEL_FAM6_XEON_PHI_KNL:
6642 case INTEL_FAM6_XEON_PHI_KNM:
6643 memcpy(hw_cache_event_ids,
6644 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6645 memcpy(hw_cache_extra_regs,
6646 knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6647 intel_pmu_lbr_init_knl();
6649 x86_pmu.event_constraints = intel_slm_event_constraints;
6650 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
6651 x86_pmu.extra_regs = intel_knl_extra_regs;
6653 /* all extra regs are per-cpu when HT is on */
6654 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6655 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6656 extra_attr = slm_format_attr;
6657 pr_cont("Knights Landing/Mill events, ");
6658 name = "knights-landing";
6661 case INTEL_FAM6_SKYLAKE_X:
6664 case INTEL_FAM6_SKYLAKE_L:
6665 case INTEL_FAM6_SKYLAKE:
6666 case INTEL_FAM6_KABYLAKE_L:
6667 case INTEL_FAM6_KABYLAKE:
6668 case INTEL_FAM6_COMETLAKE_L:
6669 case INTEL_FAM6_COMETLAKE:
6670 x86_add_quirk(intel_pebs_isolation_quirk);
6671 x86_pmu.late_ack = true;
6672 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6673 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6674 intel_pmu_lbr_init_skl();
6676 /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
6677 event_attr_td_recovery_bubbles.event_str_noht =
6678 "event=0xd,umask=0x1,cmask=1";
6679 event_attr_td_recovery_bubbles.event_str_ht =
6680 "event=0xd,umask=0x1,cmask=1,any=1";
6682 x86_pmu.event_constraints = intel_skl_event_constraints;
6683 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
6684 x86_pmu.extra_regs = intel_skl_extra_regs;
6685 x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
6686 x86_pmu.pebs_prec_dist = true;
6687 /* all extra regs are per-cpu when HT is on */
6688 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6689 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6691 x86_pmu.hw_config = hsw_hw_config;
6692 x86_pmu.get_event_constraints = hsw_get_event_constraints;
6693 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6694 hsw_format_attr : nhm_format_attr;
6695 extra_skl_attr = skl_format_attr;
6696 td_attr = hsw_events_attrs;
6697 mem_attr = hsw_mem_events_attrs;
6698 tsx_attr = hsw_tsx_events_attrs;
6699 intel_pmu_pebs_data_source_skl(pmem);
6702 * Processors with CPUID.RTM_ALWAYS_ABORT have TSX deprecated by default.
6703 * TSX force abort hooks are not required on these systems. Only deploy
6704 * workaround when microcode has not enabled X86_FEATURE_RTM_ALWAYS_ABORT.
6706 if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) &&
6707 !boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
6708 x86_pmu.flags |= PMU_FL_TFA;
6709 x86_pmu.get_event_constraints = tfa_get_event_constraints;
6710 x86_pmu.enable_all = intel_tfa_pmu_enable_all;
6711 x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
6714 pr_cont("Skylake events, ");
6718 case INTEL_FAM6_ICELAKE_X:
6719 case INTEL_FAM6_ICELAKE_D:
6720 x86_pmu.pebs_ept = 1;
6723 case INTEL_FAM6_ICELAKE_L:
6724 case INTEL_FAM6_ICELAKE:
6725 case INTEL_FAM6_TIGERLAKE_L:
6726 case INTEL_FAM6_TIGERLAKE:
6727 case INTEL_FAM6_ROCKETLAKE:
6728 x86_pmu.late_ack = true;
6729 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6730 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6731 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6732 intel_pmu_lbr_init_skl();
6734 x86_pmu.event_constraints = intel_icl_event_constraints;
6735 x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints;
6736 x86_pmu.extra_regs = intel_icl_extra_regs;
6737 x86_pmu.pebs_aliases = NULL;
6738 x86_pmu.pebs_prec_dist = true;
6739 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6740 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6742 x86_pmu.hw_config = hsw_hw_config;
6743 x86_pmu.get_event_constraints = icl_get_event_constraints;
6744 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6745 hsw_format_attr : nhm_format_attr;
6746 extra_skl_attr = skl_format_attr;
6747 mem_attr = icl_events_attrs;
6748 td_attr = icl_td_events_attrs;
6749 tsx_attr = icl_tsx_events_attrs;
6750 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6751 x86_pmu.lbr_pt_coexist = true;
6752 intel_pmu_pebs_data_source_skl(pmem);
6753 x86_pmu.num_topdown_events = 4;
6754 static_call_update(intel_pmu_update_topdown_event,
6755 &icl_update_topdown_event);
6756 static_call_update(intel_pmu_set_topdown_event_period,
6757 &icl_set_topdown_event_period);
6758 pr_cont("Icelake events, ");
6762 case INTEL_FAM6_SAPPHIRERAPIDS_X:
6763 case INTEL_FAM6_EMERALDRAPIDS_X:
6764 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
6765 x86_pmu.extra_regs = intel_glc_extra_regs;
6767 case INTEL_FAM6_GRANITERAPIDS_X:
6768 case INTEL_FAM6_GRANITERAPIDS_D:
6769 intel_pmu_init_glc(NULL);
6770 if (!x86_pmu.extra_regs)
6771 x86_pmu.extra_regs = intel_rwc_extra_regs;
6772 x86_pmu.pebs_ept = 1;
6773 x86_pmu.hw_config = hsw_hw_config;
6774 x86_pmu.get_event_constraints = glc_get_event_constraints;
6775 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6776 hsw_format_attr : nhm_format_attr;
6777 extra_skl_attr = skl_format_attr;
6778 mem_attr = glc_events_attrs;
6779 td_attr = glc_td_events_attrs;
6780 tsx_attr = glc_tsx_events_attrs;
6781 intel_pmu_pebs_data_source_skl(true);
6782 pr_cont("Sapphire Rapids events, ");
6783 name = "sapphire_rapids";
6786 case INTEL_FAM6_ALDERLAKE:
6787 case INTEL_FAM6_ALDERLAKE_L:
6788 case INTEL_FAM6_RAPTORLAKE:
6789 case INTEL_FAM6_RAPTORLAKE_P:
6790 case INTEL_FAM6_RAPTORLAKE_S:
6792 * Alder Lake has 2 types of CPU, core and atom.
6794 * Initialize the common PerfMon capabilities here.
6796 intel_pmu_init_hybrid(hybrid_big_small);
6798 x86_pmu.pebs_latency_data = adl_latency_data_small;
6799 x86_pmu.get_event_constraints = adl_get_event_constraints;
6800 x86_pmu.hw_config = adl_hw_config;
6801 x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type;
6803 td_attr = adl_hybrid_events_attrs;
6804 mem_attr = adl_hybrid_mem_attrs;
6805 tsx_attr = adl_hybrid_tsx_attrs;
6806 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6807 adl_hybrid_extra_attr_rtm : adl_hybrid_extra_attr;
6809 /* Initialize big core specific PerfMon capabilities.*/
6810 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
6811 intel_pmu_init_glc(&pmu->pmu);
6812 if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
6813 pmu->num_counters = x86_pmu.num_counters + 2;
6814 pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1;
6816 pmu->num_counters = x86_pmu.num_counters;
6817 pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
6821 * Quirk: For some Alder Lake machine, when all E-cores are disabled in
6822 * a BIOS, the leaf 0xA will enumerate all counters of P-cores. However,
6823 * the X86_FEATURE_HYBRID_CPU is still set. The above codes will
6824 * mistakenly add extra counters for P-cores. Correct the number of
6827 if ((pmu->num_counters > 8) || (pmu->num_counters_fixed > 4)) {
6828 pmu->num_counters = x86_pmu.num_counters;
6829 pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
6832 pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
6833 pmu->unconstrained = (struct event_constraint)
6834 __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
6835 0, pmu->num_counters, 0, 0);
6836 pmu->extra_regs = intel_glc_extra_regs;
6838 /* Initialize Atom core specific PerfMon capabilities.*/
6839 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
6840 intel_pmu_init_grt(&pmu->pmu);
6842 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
6843 intel_pmu_pebs_data_source_adl();
6844 pr_cont("Alderlake Hybrid events, ");
6845 name = "alderlake_hybrid";
6848 case INTEL_FAM6_METEORLAKE:
6849 case INTEL_FAM6_METEORLAKE_L:
6850 intel_pmu_init_hybrid(hybrid_big_small);
6852 x86_pmu.pebs_latency_data = mtl_latency_data_small;
6853 x86_pmu.get_event_constraints = mtl_get_event_constraints;
6854 x86_pmu.hw_config = adl_hw_config;
6856 td_attr = adl_hybrid_events_attrs;
6857 mem_attr = mtl_hybrid_mem_attrs;
6858 tsx_attr = adl_hybrid_tsx_attrs;
6859 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6860 mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
6862 /* Initialize big core specific PerfMon capabilities.*/
6863 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
6864 intel_pmu_init_glc(&pmu->pmu);
6865 pmu->extra_regs = intel_rwc_extra_regs;
6867 /* Initialize Atom core specific PerfMon capabilities.*/
6868 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
6869 intel_pmu_init_grt(&pmu->pmu);
6870 pmu->extra_regs = intel_cmt_extra_regs;
6872 intel_pmu_pebs_data_source_mtl();
6873 pr_cont("Meteorlake Hybrid events, ");
6874 name = "meteorlake_hybrid";
6878 switch (x86_pmu.version) {
6880 x86_pmu.event_constraints = intel_v1_event_constraints;
6881 pr_cont("generic architected perfmon v1, ");
6882 name = "generic_arch_v1";
6888 * default constraints for v2 and up
6890 x86_pmu.event_constraints = intel_gen_event_constraints;
6891 pr_cont("generic architected perfmon, ");
6892 name = "generic_arch_v2+";
6896 * The default constraints for v5 and up can support up to
6897 * 16 fixed counters. For the fixed counters 4 and later,
6898 * the pseudo-encoding is applied.
6899 * The constraints may be cut according to the CPUID enumeration
6900 * by inserting the EVENT_CONSTRAINT_END.
6902 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED)
6903 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
6904 intel_v5_gen_event_constraints[x86_pmu.num_counters_fixed].weight = -1;
6905 x86_pmu.event_constraints = intel_v5_gen_event_constraints;
6906 pr_cont("generic architected perfmon, ");
6907 name = "generic_arch_v5+";
6912 snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
6915 group_events_td.attrs = td_attr;
6916 group_events_mem.attrs = mem_attr;
6917 group_events_tsx.attrs = tsx_attr;
6918 group_format_extra.attrs = extra_attr;
6919 group_format_extra_skl.attrs = extra_skl_attr;
6921 x86_pmu.attr_update = attr_update;
6923 hybrid_group_events_td.attrs = td_attr;
6924 hybrid_group_events_mem.attrs = mem_attr;
6925 hybrid_group_events_tsx.attrs = tsx_attr;
6926 hybrid_group_format_extra.attrs = extra_attr;
6928 x86_pmu.attr_update = hybrid_attr_update;
6931 intel_pmu_check_num_counters(&x86_pmu.num_counters,
6932 &x86_pmu.num_counters_fixed,
6933 &x86_pmu.intel_ctrl,
6936 /* AnyThread may be deprecated on arch perfmon v5 or later */
6937 if (x86_pmu.intel_cap.anythread_deprecated)
6938 x86_pmu.format_attrs = intel_arch_formats_attr;
6940 intel_pmu_check_event_constraints(x86_pmu.event_constraints,
6941 x86_pmu.num_counters,
6942 x86_pmu.num_counters_fixed,
6943 x86_pmu.intel_ctrl);
6945 * Access LBR MSR may cause #GP under certain circumstances.
6946 * Check all LBR MSR here.
6947 * Disable LBR access if any LBR MSRs can not be accessed.
6949 if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
6951 for (i = 0; i < x86_pmu.lbr_nr; i++) {
6952 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
6953 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
6957 if (x86_pmu.lbr_nr) {
6958 intel_pmu_lbr_init();
6960 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
6962 /* only support branch_stack snapshot for perfmon >= v2 */
6963 if (x86_pmu.disable_all == intel_pmu_disable_all) {
6964 if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) {
6965 static_call_update(perf_snapshot_branch_stack,
6966 intel_pmu_snapshot_arch_branch_stack);
6968 static_call_update(perf_snapshot_branch_stack,
6969 intel_pmu_snapshot_branch_stack);
6974 intel_pmu_check_extra_regs(x86_pmu.extra_regs);
6976 /* Support full width counters using alternative MSR range */
6977 if (x86_pmu.intel_cap.full_width_write) {
6978 x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
6979 x86_pmu.perfctr = MSR_IA32_PMC0;
6980 pr_cont("full-width counters, ");
6983 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics)
6984 x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
6986 if (x86_pmu.intel_cap.pebs_timing_info)
6987 x86_pmu.flags |= PMU_FL_RETIRE_LATENCY;
6989 intel_aux_output_init();
6995 * HT bug: phase 2 init
6996 * Called once we have valid topology information to check
6997 * whether or not HT is enabled
6998 * If HT is off, then we disable the workaround
7000 static __init int fixup_ht_bug(void)
7004 * problem not present on this CPU model, nothing to do
7006 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
7009 if (topology_max_smt_threads() > 1) {
7010 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
7016 hardlockup_detector_perf_stop();
7018 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
7020 x86_pmu.start_scheduling = NULL;
7021 x86_pmu.commit_scheduling = NULL;
7022 x86_pmu.stop_scheduling = NULL;
7024 hardlockup_detector_perf_restart();
7026 for_each_online_cpu(c)
7027 free_excl_cntrs(&per_cpu(cpu_hw_events, c));
7030 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
7033 subsys_initcall(fixup_ht_bug)