1 // SPDX-License-Identifier: GPL-2.0
2 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
4 #include "uncore_discovery.h"
6 /* Uncore IMC PCI IDs */
7 #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
8 #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
9 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
10 #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
11 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
12 #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604
13 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904
14 #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c
15 #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900
16 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
17 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
18 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
19 #define PCI_DEVICE_ID_INTEL_SKL_E3_IMC 0x1918
20 #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c
21 #define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904
22 #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914
23 #define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f
24 #define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f
25 #define PCI_DEVICE_ID_INTEL_KBL_HQ_IMC 0x5910
26 #define PCI_DEVICE_ID_INTEL_KBL_WQ_IMC 0x5918
27 #define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc
28 #define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0
29 #define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10
30 #define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4
31 #define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f
32 #define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f
33 #define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2
34 #define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30
35 #define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18
36 #define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6
37 #define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31
38 #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33
39 #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca
40 #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32
41 #define PCI_DEVICE_ID_INTEL_AML_YD_IMC 0x590c
42 #define PCI_DEVICE_ID_INTEL_AML_YQ_IMC 0x590d
43 #define PCI_DEVICE_ID_INTEL_WHL_UQ_IMC 0x3ed0
44 #define PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC 0x3e34
45 #define PCI_DEVICE_ID_INTEL_WHL_UD_IMC 0x3e35
46 #define PCI_DEVICE_ID_INTEL_CML_H1_IMC 0x9b44
47 #define PCI_DEVICE_ID_INTEL_CML_H2_IMC 0x9b54
48 #define PCI_DEVICE_ID_INTEL_CML_H3_IMC 0x9b64
49 #define PCI_DEVICE_ID_INTEL_CML_U1_IMC 0x9b51
50 #define PCI_DEVICE_ID_INTEL_CML_U2_IMC 0x9b61
51 #define PCI_DEVICE_ID_INTEL_CML_U3_IMC 0x9b71
52 #define PCI_DEVICE_ID_INTEL_CML_S1_IMC 0x9b33
53 #define PCI_DEVICE_ID_INTEL_CML_S2_IMC 0x9b43
54 #define PCI_DEVICE_ID_INTEL_CML_S3_IMC 0x9b53
55 #define PCI_DEVICE_ID_INTEL_CML_S4_IMC 0x9b63
56 #define PCI_DEVICE_ID_INTEL_CML_S5_IMC 0x9b73
57 #define PCI_DEVICE_ID_INTEL_ICL_U_IMC 0x8a02
58 #define PCI_DEVICE_ID_INTEL_ICL_U2_IMC 0x8a12
59 #define PCI_DEVICE_ID_INTEL_TGL_U1_IMC 0x9a02
60 #define PCI_DEVICE_ID_INTEL_TGL_U2_IMC 0x9a04
61 #define PCI_DEVICE_ID_INTEL_TGL_U3_IMC 0x9a12
62 #define PCI_DEVICE_ID_INTEL_TGL_U4_IMC 0x9a14
63 #define PCI_DEVICE_ID_INTEL_TGL_H_IMC 0x9a36
64 #define PCI_DEVICE_ID_INTEL_RKL_1_IMC 0x4c43
65 #define PCI_DEVICE_ID_INTEL_RKL_2_IMC 0x4c53
66 #define PCI_DEVICE_ID_INTEL_ADL_1_IMC 0x4660
67 #define PCI_DEVICE_ID_INTEL_ADL_2_IMC 0x4641
68 #define PCI_DEVICE_ID_INTEL_ADL_3_IMC 0x4601
69 #define PCI_DEVICE_ID_INTEL_ADL_4_IMC 0x4602
70 #define PCI_DEVICE_ID_INTEL_ADL_5_IMC 0x4609
71 #define PCI_DEVICE_ID_INTEL_ADL_6_IMC 0x460a
72 #define PCI_DEVICE_ID_INTEL_ADL_7_IMC 0x4621
73 #define PCI_DEVICE_ID_INTEL_ADL_8_IMC 0x4623
74 #define PCI_DEVICE_ID_INTEL_ADL_9_IMC 0x4629
75 #define PCI_DEVICE_ID_INTEL_ADL_10_IMC 0x4637
76 #define PCI_DEVICE_ID_INTEL_ADL_11_IMC 0x463b
77 #define PCI_DEVICE_ID_INTEL_ADL_12_IMC 0x4648
78 #define PCI_DEVICE_ID_INTEL_ADL_13_IMC 0x4649
79 #define PCI_DEVICE_ID_INTEL_ADL_14_IMC 0x4650
80 #define PCI_DEVICE_ID_INTEL_ADL_15_IMC 0x4668
81 #define PCI_DEVICE_ID_INTEL_ADL_16_IMC 0x4670
83 /* SNB event control */
84 #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
85 #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
86 #define SNB_UNC_CTL_EDGE_DET (1 << 18)
87 #define SNB_UNC_CTL_EN (1 << 22)
88 #define SNB_UNC_CTL_INVERT (1 << 23)
89 #define SNB_UNC_CTL_CMASK_MASK 0x1f000000
90 #define NHM_UNC_CTL_CMASK_MASK 0xff000000
91 #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0)
93 #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
94 SNB_UNC_CTL_UMASK_MASK | \
95 SNB_UNC_CTL_EDGE_DET | \
96 SNB_UNC_CTL_INVERT | \
97 SNB_UNC_CTL_CMASK_MASK)
99 #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
100 SNB_UNC_CTL_UMASK_MASK | \
101 SNB_UNC_CTL_EDGE_DET | \
102 SNB_UNC_CTL_INVERT | \
103 NHM_UNC_CTL_CMASK_MASK)
105 /* SNB global control register */
106 #define SNB_UNC_PERF_GLOBAL_CTL 0x391
107 #define SNB_UNC_FIXED_CTR_CTRL 0x394
108 #define SNB_UNC_FIXED_CTR 0x395
110 /* SNB uncore global control */
111 #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1)
112 #define SNB_UNC_GLOBAL_CTL_EN (1 << 29)
114 /* SNB Cbo register */
115 #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700
116 #define SNB_UNC_CBO_0_PER_CTR0 0x706
117 #define SNB_UNC_CBO_MSR_OFFSET 0x10
119 /* SNB ARB register */
120 #define SNB_UNC_ARB_PER_CTR0 0x3b0
121 #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2
122 #define SNB_UNC_ARB_MSR_OFFSET 0x10
124 /* NHM global control register */
125 #define NHM_UNC_PERF_GLOBAL_CTL 0x391
126 #define NHM_UNC_FIXED_CTR 0x394
127 #define NHM_UNC_FIXED_CTR_CTRL 0x395
129 /* NHM uncore global control */
130 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1)
131 #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32)
133 /* NHM uncore register */
134 #define NHM_UNC_PERFEVTSEL0 0x3c0
135 #define NHM_UNC_UNCORE_PMC0 0x3b0
137 /* SKL uncore global control */
138 #define SKL_UNC_PERF_GLOBAL_CTL 0xe01
139 #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1)
141 /* ICL Cbo register */
142 #define ICL_UNC_CBO_CONFIG 0x396
143 #define ICL_UNC_NUM_CBO_MASK 0xf
144 #define ICL_UNC_CBO_0_PER_CTR0 0x702
145 #define ICL_UNC_CBO_MSR_OFFSET 0x8
147 /* ICL ARB register */
148 #define ICL_UNC_ARB_PER_CTR 0x3b1
149 #define ICL_UNC_ARB_PERFEVTSEL 0x3b3
151 /* ADL uncore global control */
152 #define ADL_UNC_PERF_GLOBAL_CTL 0x2ff0
153 #define ADL_UNC_FIXED_CTR_CTRL 0x2fde
154 #define ADL_UNC_FIXED_CTR 0x2fdf
156 /* ADL Cbo register */
157 #define ADL_UNC_CBO_0_PER_CTR0 0x2002
158 #define ADL_UNC_CBO_0_PERFEVTSEL0 0x2000
159 #define ADL_UNC_CTL_THRESHOLD 0x3f000000
160 #define ADL_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
161 SNB_UNC_CTL_UMASK_MASK | \
162 SNB_UNC_CTL_EDGE_DET | \
163 SNB_UNC_CTL_INVERT | \
164 ADL_UNC_CTL_THRESHOLD)
166 /* ADL ARB register */
167 #define ADL_UNC_ARB_PER_CTR0 0x2FD2
168 #define ADL_UNC_ARB_PERFEVTSEL0 0x2FD0
169 #define ADL_UNC_ARB_MSR_OFFSET 0x8
171 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
172 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
173 DEFINE_UNCORE_FORMAT_ATTR(chmask, chmask, "config:8-11");
174 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
175 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
176 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
177 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
178 DEFINE_UNCORE_FORMAT_ATTR(threshold, threshold, "config:24-29");
180 /* Sandy Bridge uncore support */
181 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
183 struct hw_perf_event *hwc = &event->hw;
185 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
186 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
188 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
191 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
193 wrmsrl(event->hw.config_base, 0);
196 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
198 if (box->pmu->pmu_idx == 0) {
199 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
200 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
204 static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
206 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
207 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
210 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
212 if (box->pmu->pmu_idx == 0)
213 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
216 static struct uncore_event_desc snb_uncore_events[] = {
217 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
218 { /* end: all zeroes */ },
221 static struct attribute *snb_uncore_formats_attr[] = {
222 &format_attr_event.attr,
223 &format_attr_umask.attr,
224 &format_attr_edge.attr,
225 &format_attr_inv.attr,
226 &format_attr_cmask5.attr,
230 static const struct attribute_group snb_uncore_format_group = {
232 .attrs = snb_uncore_formats_attr,
235 static struct intel_uncore_ops snb_uncore_msr_ops = {
236 .init_box = snb_uncore_msr_init_box,
237 .enable_box = snb_uncore_msr_enable_box,
238 .exit_box = snb_uncore_msr_exit_box,
239 .disable_event = snb_uncore_msr_disable_event,
240 .enable_event = snb_uncore_msr_enable_event,
241 .read_counter = uncore_msr_read_counter,
244 static struct event_constraint snb_uncore_arb_constraints[] = {
245 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
246 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
250 static struct intel_uncore_type snb_uncore_cbox = {
255 .fixed_ctr_bits = 48,
256 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
257 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
258 .fixed_ctr = SNB_UNC_FIXED_CTR,
259 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
261 .event_mask = SNB_UNC_RAW_EVENT_MASK,
262 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
263 .ops = &snb_uncore_msr_ops,
264 .format_group = &snb_uncore_format_group,
265 .event_descs = snb_uncore_events,
268 static struct intel_uncore_type snb_uncore_arb = {
273 .perf_ctr = SNB_UNC_ARB_PER_CTR0,
274 .event_ctl = SNB_UNC_ARB_PERFEVTSEL0,
275 .event_mask = SNB_UNC_RAW_EVENT_MASK,
276 .msr_offset = SNB_UNC_ARB_MSR_OFFSET,
277 .constraints = snb_uncore_arb_constraints,
278 .ops = &snb_uncore_msr_ops,
279 .format_group = &snb_uncore_format_group,
282 static struct intel_uncore_type *snb_msr_uncores[] = {
288 void snb_uncore_cpu_init(void)
290 uncore_msr_uncores = snb_msr_uncores;
291 if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
292 snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
295 static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
297 if (box->pmu->pmu_idx == 0) {
298 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
299 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
302 /* The 8th CBOX has different MSR space */
303 if (box->pmu->pmu_idx == 7)
304 __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags);
307 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
309 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
310 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
313 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
315 if (box->pmu->pmu_idx == 0)
316 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0);
319 static struct intel_uncore_ops skl_uncore_msr_ops = {
320 .init_box = skl_uncore_msr_init_box,
321 .enable_box = skl_uncore_msr_enable_box,
322 .exit_box = skl_uncore_msr_exit_box,
323 .disable_event = snb_uncore_msr_disable_event,
324 .enable_event = snb_uncore_msr_enable_event,
325 .read_counter = uncore_msr_read_counter,
328 static struct intel_uncore_type skl_uncore_cbox = {
333 .fixed_ctr_bits = 48,
334 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
335 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
336 .fixed_ctr = SNB_UNC_FIXED_CTR,
337 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
339 .event_mask = SNB_UNC_RAW_EVENT_MASK,
340 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
341 .ops = &skl_uncore_msr_ops,
342 .format_group = &snb_uncore_format_group,
343 .event_descs = snb_uncore_events,
346 static struct intel_uncore_type *skl_msr_uncores[] = {
352 void skl_uncore_cpu_init(void)
354 uncore_msr_uncores = skl_msr_uncores;
355 if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
356 skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
357 snb_uncore_arb.ops = &skl_uncore_msr_ops;
360 static struct intel_uncore_ops icl_uncore_msr_ops = {
361 .disable_event = snb_uncore_msr_disable_event,
362 .enable_event = snb_uncore_msr_enable_event,
363 .read_counter = uncore_msr_read_counter,
366 static struct intel_uncore_type icl_uncore_cbox = {
370 .perf_ctr = ICL_UNC_CBO_0_PER_CTR0,
371 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
372 .event_mask = SNB_UNC_RAW_EVENT_MASK,
373 .msr_offset = ICL_UNC_CBO_MSR_OFFSET,
374 .ops = &icl_uncore_msr_ops,
375 .format_group = &snb_uncore_format_group,
378 static struct uncore_event_desc icl_uncore_events[] = {
379 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"),
380 { /* end: all zeroes */ },
383 static struct attribute *icl_uncore_clock_formats_attr[] = {
384 &format_attr_event.attr,
388 static struct attribute_group icl_uncore_clock_format_group = {
390 .attrs = icl_uncore_clock_formats_attr,
393 static struct intel_uncore_type icl_uncore_clockbox = {
397 .fixed_ctr_bits = 48,
398 .fixed_ctr = SNB_UNC_FIXED_CTR,
399 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
401 .event_mask = SNB_UNC_CTL_EV_SEL_MASK,
402 .format_group = &icl_uncore_clock_format_group,
403 .ops = &icl_uncore_msr_ops,
404 .event_descs = icl_uncore_events,
407 static struct intel_uncore_type icl_uncore_arb = {
412 .perf_ctr = ICL_UNC_ARB_PER_CTR,
413 .event_ctl = ICL_UNC_ARB_PERFEVTSEL,
414 .event_mask = SNB_UNC_RAW_EVENT_MASK,
415 .ops = &icl_uncore_msr_ops,
416 .format_group = &snb_uncore_format_group,
419 static struct intel_uncore_type *icl_msr_uncores[] = {
422 &icl_uncore_clockbox,
426 static int icl_get_cbox_num(void)
430 rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes);
432 return num_boxes & ICL_UNC_NUM_CBO_MASK;
435 void icl_uncore_cpu_init(void)
437 uncore_msr_uncores = icl_msr_uncores;
438 icl_uncore_cbox.num_boxes = icl_get_cbox_num();
441 static struct intel_uncore_type *tgl_msr_uncores[] = {
444 &icl_uncore_clockbox,
448 static void rkl_uncore_msr_init_box(struct intel_uncore_box *box)
450 if (box->pmu->pmu_idx == 0)
451 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
454 void tgl_uncore_cpu_init(void)
456 uncore_msr_uncores = tgl_msr_uncores;
457 icl_uncore_cbox.num_boxes = icl_get_cbox_num();
458 icl_uncore_cbox.ops = &skl_uncore_msr_ops;
459 icl_uncore_clockbox.ops = &skl_uncore_msr_ops;
460 snb_uncore_arb.ops = &skl_uncore_msr_ops;
461 skl_uncore_msr_ops.init_box = rkl_uncore_msr_init_box;
464 static void adl_uncore_msr_init_box(struct intel_uncore_box *box)
466 if (box->pmu->pmu_idx == 0)
467 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
470 static void adl_uncore_msr_enable_box(struct intel_uncore_box *box)
472 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
475 static void adl_uncore_msr_disable_box(struct intel_uncore_box *box)
477 if (box->pmu->pmu_idx == 0)
478 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0);
481 static void adl_uncore_msr_exit_box(struct intel_uncore_box *box)
483 if (box->pmu->pmu_idx == 0)
484 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0);
487 static struct intel_uncore_ops adl_uncore_msr_ops = {
488 .init_box = adl_uncore_msr_init_box,
489 .enable_box = adl_uncore_msr_enable_box,
490 .disable_box = adl_uncore_msr_disable_box,
491 .exit_box = adl_uncore_msr_exit_box,
492 .disable_event = snb_uncore_msr_disable_event,
493 .enable_event = snb_uncore_msr_enable_event,
494 .read_counter = uncore_msr_read_counter,
497 static struct attribute *adl_uncore_formats_attr[] = {
498 &format_attr_event.attr,
499 &format_attr_umask.attr,
500 &format_attr_edge.attr,
501 &format_attr_inv.attr,
502 &format_attr_threshold.attr,
506 static const struct attribute_group adl_uncore_format_group = {
508 .attrs = adl_uncore_formats_attr,
511 static struct intel_uncore_type adl_uncore_cbox = {
515 .perf_ctr = ADL_UNC_CBO_0_PER_CTR0,
516 .event_ctl = ADL_UNC_CBO_0_PERFEVTSEL0,
517 .event_mask = ADL_UNC_RAW_EVENT_MASK,
518 .msr_offset = ICL_UNC_CBO_MSR_OFFSET,
519 .ops = &adl_uncore_msr_ops,
520 .format_group = &adl_uncore_format_group,
523 static struct intel_uncore_type adl_uncore_arb = {
528 .perf_ctr = ADL_UNC_ARB_PER_CTR0,
529 .event_ctl = ADL_UNC_ARB_PERFEVTSEL0,
530 .event_mask = SNB_UNC_RAW_EVENT_MASK,
531 .msr_offset = ADL_UNC_ARB_MSR_OFFSET,
532 .constraints = snb_uncore_arb_constraints,
533 .ops = &adl_uncore_msr_ops,
534 .format_group = &snb_uncore_format_group,
537 static struct intel_uncore_type adl_uncore_clockbox = {
541 .fixed_ctr_bits = 48,
542 .fixed_ctr = ADL_UNC_FIXED_CTR,
543 .fixed_ctl = ADL_UNC_FIXED_CTR_CTRL,
545 .event_mask = SNB_UNC_CTL_EV_SEL_MASK,
546 .format_group = &icl_uncore_clock_format_group,
547 .ops = &adl_uncore_msr_ops,
548 .event_descs = icl_uncore_events,
551 static struct intel_uncore_type *adl_msr_uncores[] = {
554 &adl_uncore_clockbox,
558 void adl_uncore_cpu_init(void)
560 adl_uncore_cbox.num_boxes = icl_get_cbox_num();
561 uncore_msr_uncores = adl_msr_uncores;
568 static struct uncore_event_desc snb_uncore_imc_events[] = {
569 INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"),
570 INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
571 INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
573 INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
574 INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
575 INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
577 INTEL_UNCORE_EVENT_DESC(gt_requests, "event=0x03"),
578 INTEL_UNCORE_EVENT_DESC(gt_requests.scale, "6.103515625e-5"),
579 INTEL_UNCORE_EVENT_DESC(gt_requests.unit, "MiB"),
581 INTEL_UNCORE_EVENT_DESC(ia_requests, "event=0x04"),
582 INTEL_UNCORE_EVENT_DESC(ia_requests.scale, "6.103515625e-5"),
583 INTEL_UNCORE_EVENT_DESC(ia_requests.unit, "MiB"),
585 INTEL_UNCORE_EVENT_DESC(io_requests, "event=0x05"),
586 INTEL_UNCORE_EVENT_DESC(io_requests.scale, "6.103515625e-5"),
587 INTEL_UNCORE_EVENT_DESC(io_requests.unit, "MiB"),
589 { /* end: all zeroes */ },
592 #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff
593 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48
595 /* page size multiple covering all config regs */
596 #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000
598 #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1
599 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050
600 #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2
601 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
602 #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
604 /* BW break down- legacy counters */
605 #define SNB_UNCORE_PCI_IMC_GT_REQUESTS 0x3
606 #define SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE 0x5040
607 #define SNB_UNCORE_PCI_IMC_IA_REQUESTS 0x4
608 #define SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE 0x5044
609 #define SNB_UNCORE_PCI_IMC_IO_REQUESTS 0x5
610 #define SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE 0x5048
612 enum perf_snb_uncore_imc_freerunning_types {
613 SNB_PCI_UNCORE_IMC_DATA_READS = 0,
614 SNB_PCI_UNCORE_IMC_DATA_WRITES,
615 SNB_PCI_UNCORE_IMC_GT_REQUESTS,
616 SNB_PCI_UNCORE_IMC_IA_REQUESTS,
617 SNB_PCI_UNCORE_IMC_IO_REQUESTS,
619 SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
622 static struct freerunning_counters snb_uncore_imc_freerunning[] = {
623 [SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
625 [SNB_PCI_UNCORE_IMC_DATA_WRITES] = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE,
627 [SNB_PCI_UNCORE_IMC_GT_REQUESTS] = { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE,
629 [SNB_PCI_UNCORE_IMC_IA_REQUESTS] = { SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE,
631 [SNB_PCI_UNCORE_IMC_IO_REQUESTS] = { SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE,
635 static struct attribute *snb_uncore_imc_formats_attr[] = {
636 &format_attr_event.attr,
640 static const struct attribute_group snb_uncore_imc_format_group = {
642 .attrs = snb_uncore_imc_formats_attr,
645 static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
647 struct intel_uncore_type *type = box->pmu->type;
648 struct pci_dev *pdev = box->pci_dev;
649 int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
650 resource_size_t addr;
653 pci_read_config_dword(pdev, where, &pci_dword);
656 #ifdef CONFIG_PHYS_ADDR_T_64BIT
657 pci_read_config_dword(pdev, where + 4, &pci_dword);
658 addr |= ((resource_size_t)pci_dword << 32);
661 addr &= ~(PAGE_SIZE - 1);
663 box->io_addr = ioremap(addr, type->mmio_map_size);
665 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
667 box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
670 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
673 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
676 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
679 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
683 * Keep the custom event_init() function compatible with old event
684 * encoding for free running counters.
686 static int snb_uncore_imc_event_init(struct perf_event *event)
688 struct intel_uncore_pmu *pmu;
689 struct intel_uncore_box *box;
690 struct hw_perf_event *hwc = &event->hw;
691 u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
694 if (event->attr.type != event->pmu->type)
697 pmu = uncore_event_to_pmu(event);
698 /* no device found for this pmu */
699 if (pmu->func_id < 0)
702 /* Sampling not supported yet */
703 if (hwc->sample_period)
706 /* unsupported modes and filters */
707 if (event->attr.sample_period) /* no sampling */
711 * Place all uncore events for a particular physical package
717 /* check only supported bits are set */
718 if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
721 box = uncore_pmu_to_box(pmu, event->cpu);
722 if (!box || box->cpu < 0)
725 event->cpu = box->cpu;
726 event->pmu_private = box;
728 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
731 event->hw.last_tag = ~0ULL;
732 event->hw.extra_reg.idx = EXTRA_REG_NONE;
733 event->hw.branch_reg.idx = EXTRA_REG_NONE;
735 * check event is known (whitelist, determines counter)
738 case SNB_UNCORE_PCI_IMC_DATA_READS:
739 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
740 idx = UNCORE_PMC_IDX_FREERUNNING;
742 case SNB_UNCORE_PCI_IMC_DATA_WRITES:
743 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
744 idx = UNCORE_PMC_IDX_FREERUNNING;
746 case SNB_UNCORE_PCI_IMC_GT_REQUESTS:
747 base = SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE;
748 idx = UNCORE_PMC_IDX_FREERUNNING;
750 case SNB_UNCORE_PCI_IMC_IA_REQUESTS:
751 base = SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE;
752 idx = UNCORE_PMC_IDX_FREERUNNING;
754 case SNB_UNCORE_PCI_IMC_IO_REQUESTS:
755 base = SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE;
756 idx = UNCORE_PMC_IDX_FREERUNNING;
762 /* must be done before validate_group */
763 event->hw.event_base = base;
766 /* Convert to standard encoding format for freerunning counters */
767 event->hw.config = ((cfg - 1) << 8) | 0x10ff;
769 /* no group validation needed, we have free running counters */
774 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
779 int snb_pci2phy_map_init(int devid)
781 struct pci_dev *dev = NULL;
782 struct pci2phy_map *map;
785 dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
789 bus = dev->bus->number;
790 segment = pci_domain_nr(dev->bus);
792 raw_spin_lock(&pci2phy_map_lock);
793 map = __find_pci2phy_map(segment);
795 raw_spin_unlock(&pci2phy_map_lock);
799 map->pbus_to_dieid[bus] = 0;
800 raw_spin_unlock(&pci2phy_map_lock);
807 static struct pmu snb_uncore_imc_pmu = {
808 .task_ctx_nr = perf_invalid_context,
809 .event_init = snb_uncore_imc_event_init,
810 .add = uncore_pmu_event_add,
811 .del = uncore_pmu_event_del,
812 .start = uncore_pmu_event_start,
813 .stop = uncore_pmu_event_stop,
814 .read = uncore_pmu_event_read,
815 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
818 static struct intel_uncore_ops snb_uncore_imc_ops = {
819 .init_box = snb_uncore_imc_init_box,
820 .exit_box = uncore_mmio_exit_box,
821 .enable_box = snb_uncore_imc_enable_box,
822 .disable_box = snb_uncore_imc_disable_box,
823 .disable_event = snb_uncore_imc_disable_event,
824 .enable_event = snb_uncore_imc_enable_event,
825 .hw_config = snb_uncore_imc_hw_config,
826 .read_counter = uncore_mmio_read_counter,
829 static struct intel_uncore_type snb_uncore_imc = {
833 .num_freerunning_types = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
834 .mmio_map_size = SNB_UNCORE_PCI_IMC_MAP_SIZE,
835 .freerunning = snb_uncore_imc_freerunning,
836 .event_descs = snb_uncore_imc_events,
837 .format_group = &snb_uncore_imc_format_group,
838 .ops = &snb_uncore_imc_ops,
839 .pmu = &snb_uncore_imc_pmu,
842 static struct intel_uncore_type *snb_pci_uncores[] = {
843 [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc,
847 static const struct pci_device_id snb_uncore_pci_ids[] = {
849 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
850 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
852 { /* end: all zeroes */ },
855 static const struct pci_device_id ivb_uncore_pci_ids[] = {
857 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
858 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
861 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC),
862 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
864 { /* end: all zeroes */ },
867 static const struct pci_device_id hsw_uncore_pci_ids[] = {
869 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
870 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
873 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC),
874 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
876 { /* end: all zeroes */ },
879 static const struct pci_device_id bdw_uncore_pci_ids[] = {
881 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC),
882 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
884 { /* end: all zeroes */ },
887 static const struct pci_device_id skl_uncore_pci_ids[] = {
889 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
890 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
893 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
894 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
897 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
898 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
901 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
902 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
905 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
906 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
909 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
910 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
913 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_E3_IMC),
914 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
917 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
918 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
921 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC),
922 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
925 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC),
926 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
929 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC),
930 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
933 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC),
934 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
937 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_HQ_IMC),
938 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
941 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_WQ_IMC),
942 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
945 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC),
946 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
949 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC),
950 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
953 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC),
954 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
957 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC),
958 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
961 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC),
962 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
965 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC),
966 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
969 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC),
970 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
973 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC),
974 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
977 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC),
978 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
981 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC),
982 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
985 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC),
986 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
989 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC),
990 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
993 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC),
994 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
997 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC),
998 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1001 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YD_IMC),
1002 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1005 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YQ_IMC),
1006 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1009 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UQ_IMC),
1010 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1013 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC),
1014 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1017 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UD_IMC),
1018 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1021 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H1_IMC),
1022 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1025 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H2_IMC),
1026 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1029 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_H3_IMC),
1030 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1033 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U1_IMC),
1034 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1037 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U2_IMC),
1038 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1041 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_U3_IMC),
1042 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1045 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S1_IMC),
1046 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1049 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S2_IMC),
1050 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1053 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S3_IMC),
1054 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1057 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S4_IMC),
1058 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1061 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CML_S5_IMC),
1062 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1064 { /* end: all zeroes */ },
1067 static const struct pci_device_id icl_uncore_pci_ids[] = {
1069 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U_IMC),
1070 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1073 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U2_IMC),
1074 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1077 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RKL_1_IMC),
1078 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1081 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RKL_2_IMC),
1082 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1084 { /* end: all zeroes */ },
1087 static struct pci_driver snb_uncore_pci_driver = {
1088 .name = "snb_uncore",
1089 .id_table = snb_uncore_pci_ids,
1092 static struct pci_driver ivb_uncore_pci_driver = {
1093 .name = "ivb_uncore",
1094 .id_table = ivb_uncore_pci_ids,
1097 static struct pci_driver hsw_uncore_pci_driver = {
1098 .name = "hsw_uncore",
1099 .id_table = hsw_uncore_pci_ids,
1102 static struct pci_driver bdw_uncore_pci_driver = {
1103 .name = "bdw_uncore",
1104 .id_table = bdw_uncore_pci_ids,
1107 static struct pci_driver skl_uncore_pci_driver = {
1108 .name = "skl_uncore",
1109 .id_table = skl_uncore_pci_ids,
1112 static struct pci_driver icl_uncore_pci_driver = {
1113 .name = "icl_uncore",
1114 .id_table = icl_uncore_pci_ids,
1117 struct imc_uncore_pci_dev {
1119 struct pci_driver *driver;
1121 #define IMC_DEV(a, d) \
1122 { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
1124 static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
1125 IMC_DEV(SNB_IMC, &snb_uncore_pci_driver),
1126 IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */
1127 IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
1128 IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */
1129 IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */
1130 IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */
1131 IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */
1132 IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */
1133 IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */
1134 IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
1135 IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
1136 IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
1137 IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver), /* Xeon E3 V5 Gen Core processor */
1138 IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */
1139 IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */
1140 IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */
1141 IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */
1142 IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */
1143 IMC_DEV(KBL_HQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core H Quad Core */
1144 IMC_DEV(KBL_WQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S 4 cores Work Station */
1145 IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */
1146 IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */
1147 IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */
1148 IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */
1149 IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */
1150 IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */
1151 IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */
1152 IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */
1153 IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */
1154 IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */
1155 IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */
1156 IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */
1157 IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */
1158 IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */
1159 IMC_DEV(AML_YD_IMC, &skl_uncore_pci_driver), /* 8th Gen Core Y Mobile Dual Core */
1160 IMC_DEV(AML_YQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core Y Mobile Quad Core */
1161 IMC_DEV(WHL_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */
1162 IMC_DEV(WHL_4_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */
1163 IMC_DEV(WHL_UD_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Dual Core */
1164 IMC_DEV(CML_H1_IMC, &skl_uncore_pci_driver),
1165 IMC_DEV(CML_H2_IMC, &skl_uncore_pci_driver),
1166 IMC_DEV(CML_H3_IMC, &skl_uncore_pci_driver),
1167 IMC_DEV(CML_U1_IMC, &skl_uncore_pci_driver),
1168 IMC_DEV(CML_U2_IMC, &skl_uncore_pci_driver),
1169 IMC_DEV(CML_U3_IMC, &skl_uncore_pci_driver),
1170 IMC_DEV(CML_S1_IMC, &skl_uncore_pci_driver),
1171 IMC_DEV(CML_S2_IMC, &skl_uncore_pci_driver),
1172 IMC_DEV(CML_S3_IMC, &skl_uncore_pci_driver),
1173 IMC_DEV(CML_S4_IMC, &skl_uncore_pci_driver),
1174 IMC_DEV(CML_S5_IMC, &skl_uncore_pci_driver),
1175 IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */
1176 IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */
1177 IMC_DEV(RKL_1_IMC, &icl_uncore_pci_driver),
1178 IMC_DEV(RKL_2_IMC, &icl_uncore_pci_driver),
1179 { /* end marker */ }
1183 #define for_each_imc_pci_id(x, t) \
1184 for (x = (t); (x)->pci_id; x++)
1186 static struct pci_driver *imc_uncore_find_dev(void)
1188 const struct imc_uncore_pci_dev *p;
1191 for_each_imc_pci_id(p, desktop_imc_pci_ids) {
1192 ret = snb_pci2phy_map_init(p->pci_id);
1199 static int imc_uncore_pci_init(void)
1201 struct pci_driver *imc_drv = imc_uncore_find_dev();
1206 uncore_pci_uncores = snb_pci_uncores;
1207 uncore_pci_driver = imc_drv;
1212 int snb_uncore_pci_init(void)
1214 return imc_uncore_pci_init();
1217 int ivb_uncore_pci_init(void)
1219 return imc_uncore_pci_init();
1221 int hsw_uncore_pci_init(void)
1223 return imc_uncore_pci_init();
1226 int bdw_uncore_pci_init(void)
1228 return imc_uncore_pci_init();
1231 int skl_uncore_pci_init(void)
1233 return imc_uncore_pci_init();
1236 /* end of Sandy Bridge uncore support */
1238 /* Nehalem uncore support */
1239 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
1241 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
1244 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
1246 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
1249 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1251 struct hw_perf_event *hwc = &event->hw;
1253 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1254 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1256 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
1259 static struct attribute *nhm_uncore_formats_attr[] = {
1260 &format_attr_event.attr,
1261 &format_attr_umask.attr,
1262 &format_attr_edge.attr,
1263 &format_attr_inv.attr,
1264 &format_attr_cmask8.attr,
1268 static const struct attribute_group nhm_uncore_format_group = {
1270 .attrs = nhm_uncore_formats_attr,
1273 static struct uncore_event_desc nhm_uncore_events[] = {
1274 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
1275 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
1276 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
1277 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
1278 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
1279 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
1280 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
1281 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
1282 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
1283 { /* end: all zeroes */ },
1286 static struct intel_uncore_ops nhm_uncore_msr_ops = {
1287 .disable_box = nhm_uncore_msr_disable_box,
1288 .enable_box = nhm_uncore_msr_enable_box,
1289 .disable_event = snb_uncore_msr_disable_event,
1290 .enable_event = nhm_uncore_msr_enable_event,
1291 .read_counter = uncore_msr_read_counter,
1294 static struct intel_uncore_type nhm_uncore = {
1298 .perf_ctr_bits = 48,
1299 .fixed_ctr_bits = 48,
1300 .event_ctl = NHM_UNC_PERFEVTSEL0,
1301 .perf_ctr = NHM_UNC_UNCORE_PMC0,
1302 .fixed_ctr = NHM_UNC_FIXED_CTR,
1303 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
1304 .event_mask = NHM_UNC_RAW_EVENT_MASK,
1305 .event_descs = nhm_uncore_events,
1306 .ops = &nhm_uncore_msr_ops,
1307 .format_group = &nhm_uncore_format_group,
1310 static struct intel_uncore_type *nhm_msr_uncores[] = {
1315 void nhm_uncore_cpu_init(void)
1317 uncore_msr_uncores = nhm_msr_uncores;
1320 /* end of Nehalem uncore support */
1322 /* Tiger Lake MMIO uncore support */
1324 static const struct pci_device_id tgl_uncore_pci_ids[] = {
1326 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U1_IMC),
1327 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1330 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U2_IMC),
1331 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1334 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U3_IMC),
1335 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1338 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U4_IMC),
1339 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1342 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_H_IMC),
1343 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1346 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_1_IMC),
1347 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1350 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_2_IMC),
1351 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1354 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_3_IMC),
1355 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1358 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_4_IMC),
1359 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1362 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_5_IMC),
1363 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1366 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_6_IMC),
1367 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1370 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_7_IMC),
1371 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1374 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_8_IMC),
1375 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1378 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_9_IMC),
1379 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1382 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_10_IMC),
1383 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1386 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_11_IMC),
1387 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1390 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_12_IMC),
1391 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1394 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_13_IMC),
1395 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1398 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_14_IMC),
1399 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1402 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_15_IMC),
1403 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1406 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_16_IMC),
1407 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
1409 { /* end: all zeroes */ }
1412 enum perf_tgl_uncore_imc_freerunning_types {
1413 TGL_MMIO_UNCORE_IMC_DATA_TOTAL,
1414 TGL_MMIO_UNCORE_IMC_DATA_READ,
1415 TGL_MMIO_UNCORE_IMC_DATA_WRITE,
1416 TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX
1419 static struct freerunning_counters tgl_l_uncore_imc_freerunning[] = {
1420 [TGL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0x5040, 0x0, 0x0, 1, 64 },
1421 [TGL_MMIO_UNCORE_IMC_DATA_READ] = { 0x5058, 0x0, 0x0, 1, 64 },
1422 [TGL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0x50A0, 0x0, 0x0, 1, 64 },
1425 static struct freerunning_counters tgl_uncore_imc_freerunning[] = {
1426 [TGL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0xd840, 0x0, 0x0, 1, 64 },
1427 [TGL_MMIO_UNCORE_IMC_DATA_READ] = { 0xd858, 0x0, 0x0, 1, 64 },
1428 [TGL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0xd8A0, 0x0, 0x0, 1, 64 },
1431 static struct uncore_event_desc tgl_uncore_imc_events[] = {
1432 INTEL_UNCORE_EVENT_DESC(data_total, "event=0xff,umask=0x10"),
1433 INTEL_UNCORE_EVENT_DESC(data_total.scale, "6.103515625e-5"),
1434 INTEL_UNCORE_EVENT_DESC(data_total.unit, "MiB"),
1436 INTEL_UNCORE_EVENT_DESC(data_read, "event=0xff,umask=0x20"),
1437 INTEL_UNCORE_EVENT_DESC(data_read.scale, "6.103515625e-5"),
1438 INTEL_UNCORE_EVENT_DESC(data_read.unit, "MiB"),
1440 INTEL_UNCORE_EVENT_DESC(data_write, "event=0xff,umask=0x30"),
1441 INTEL_UNCORE_EVENT_DESC(data_write.scale, "6.103515625e-5"),
1442 INTEL_UNCORE_EVENT_DESC(data_write.unit, "MiB"),
1444 { /* end: all zeroes */ }
1447 static struct pci_dev *tgl_uncore_get_mc_dev(void)
1449 const struct pci_device_id *ids = tgl_uncore_pci_ids;
1450 struct pci_dev *mc_dev = NULL;
1452 while (ids && ids->vendor) {
1453 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, ids->device, NULL);
1462 #define TGL_UNCORE_MMIO_IMC_MEM_OFFSET 0x10000
1463 #define TGL_UNCORE_PCI_IMC_MAP_SIZE 0xe000
1465 static void __uncore_imc_init_box(struct intel_uncore_box *box,
1466 unsigned int base_offset)
1468 struct pci_dev *pdev = tgl_uncore_get_mc_dev();
1469 struct intel_uncore_pmu *pmu = box->pmu;
1470 struct intel_uncore_type *type = pmu->type;
1471 resource_size_t addr;
1475 pr_warn("perf uncore: Cannot find matched IMC device.\n");
1479 pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET, &mch_bar);
1480 /* MCHBAR is disabled */
1481 if (!(mch_bar & BIT(0))) {
1482 pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n");
1486 addr = (resource_size_t)(mch_bar + TGL_UNCORE_MMIO_IMC_MEM_OFFSET * pmu->pmu_idx);
1488 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1489 pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET + 4, &mch_bar);
1490 addr |= ((resource_size_t)mch_bar << 32);
1493 addr += base_offset;
1494 box->io_addr = ioremap(addr, type->mmio_map_size);
1496 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
1499 static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
1501 __uncore_imc_init_box(box, 0);
1504 static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = {
1505 .init_box = tgl_uncore_imc_freerunning_init_box,
1506 .exit_box = uncore_mmio_exit_box,
1507 .read_counter = uncore_mmio_read_counter,
1508 .hw_config = uncore_freerunning_hw_config,
1511 static struct attribute *tgl_uncore_imc_formats_attr[] = {
1512 &format_attr_event.attr,
1513 &format_attr_umask.attr,
1517 static const struct attribute_group tgl_uncore_imc_format_group = {
1519 .attrs = tgl_uncore_imc_formats_attr,
1522 static struct intel_uncore_type tgl_uncore_imc_free_running = {
1523 .name = "imc_free_running",
1526 .num_freerunning_types = TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX,
1527 .mmio_map_size = TGL_UNCORE_PCI_IMC_MAP_SIZE,
1528 .freerunning = tgl_uncore_imc_freerunning,
1529 .ops = &tgl_uncore_imc_freerunning_ops,
1530 .event_descs = tgl_uncore_imc_events,
1531 .format_group = &tgl_uncore_imc_format_group,
1534 static struct intel_uncore_type *tgl_mmio_uncores[] = {
1535 &tgl_uncore_imc_free_running,
1539 void tgl_l_uncore_mmio_init(void)
1541 tgl_uncore_imc_free_running.freerunning = tgl_l_uncore_imc_freerunning;
1542 uncore_mmio_uncores = tgl_mmio_uncores;
1545 void tgl_uncore_mmio_init(void)
1547 uncore_mmio_uncores = tgl_mmio_uncores;
1550 /* end of Tiger Lake MMIO uncore support */
1552 /* Alder Lake MMIO uncore support */
1553 #define ADL_UNCORE_IMC_BASE 0xd900
1554 #define ADL_UNCORE_IMC_MAP_SIZE 0x200
1555 #define ADL_UNCORE_IMC_CTR 0xe8
1556 #define ADL_UNCORE_IMC_CTRL 0xd0
1557 #define ADL_UNCORE_IMC_GLOBAL_CTL 0xc0
1558 #define ADL_UNCORE_IMC_BOX_CTL 0xc4
1559 #define ADL_UNCORE_IMC_FREERUNNING_BASE 0xd800
1560 #define ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE 0x100
1562 #define ADL_UNCORE_IMC_CTL_FRZ (1 << 0)
1563 #define ADL_UNCORE_IMC_CTL_RST_CTRL (1 << 1)
1564 #define ADL_UNCORE_IMC_CTL_RST_CTRS (1 << 2)
1565 #define ADL_UNCORE_IMC_CTL_INT (ADL_UNCORE_IMC_CTL_RST_CTRL | \
1566 ADL_UNCORE_IMC_CTL_RST_CTRS)
1568 static void adl_uncore_imc_init_box(struct intel_uncore_box *box)
1570 __uncore_imc_init_box(box, ADL_UNCORE_IMC_BASE);
1572 /* The global control in MC1 can control both MCs. */
1573 if (box->io_addr && (box->pmu->pmu_idx == 1))
1574 writel(ADL_UNCORE_IMC_CTL_INT, box->io_addr + ADL_UNCORE_IMC_GLOBAL_CTL);
1577 static void adl_uncore_mmio_disable_box(struct intel_uncore_box *box)
1582 writel(ADL_UNCORE_IMC_CTL_FRZ, box->io_addr + uncore_mmio_box_ctl(box));
1585 static void adl_uncore_mmio_enable_box(struct intel_uncore_box *box)
1590 writel(0, box->io_addr + uncore_mmio_box_ctl(box));
1593 static struct intel_uncore_ops adl_uncore_mmio_ops = {
1594 .init_box = adl_uncore_imc_init_box,
1595 .exit_box = uncore_mmio_exit_box,
1596 .disable_box = adl_uncore_mmio_disable_box,
1597 .enable_box = adl_uncore_mmio_enable_box,
1598 .disable_event = intel_generic_uncore_mmio_disable_event,
1599 .enable_event = intel_generic_uncore_mmio_enable_event,
1600 .read_counter = uncore_mmio_read_counter,
1603 #define ADL_UNC_CTL_CHMASK_MASK 0x00000f00
1604 #define ADL_UNC_IMC_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
1605 ADL_UNC_CTL_CHMASK_MASK | \
1606 SNB_UNC_CTL_EDGE_DET)
1608 static struct attribute *adl_uncore_imc_formats_attr[] = {
1609 &format_attr_event.attr,
1610 &format_attr_chmask.attr,
1611 &format_attr_edge.attr,
1615 static const struct attribute_group adl_uncore_imc_format_group = {
1617 .attrs = adl_uncore_imc_formats_attr,
1620 static struct intel_uncore_type adl_uncore_imc = {
1624 .perf_ctr_bits = 64,
1625 .perf_ctr = ADL_UNCORE_IMC_CTR,
1626 .event_ctl = ADL_UNCORE_IMC_CTRL,
1627 .event_mask = ADL_UNC_IMC_EVENT_MASK,
1628 .box_ctl = ADL_UNCORE_IMC_BOX_CTL,
1630 .mmio_map_size = ADL_UNCORE_IMC_MAP_SIZE,
1631 .ops = &adl_uncore_mmio_ops,
1632 .format_group = &adl_uncore_imc_format_group,
1635 enum perf_adl_uncore_imc_freerunning_types {
1636 ADL_MMIO_UNCORE_IMC_DATA_TOTAL,
1637 ADL_MMIO_UNCORE_IMC_DATA_READ,
1638 ADL_MMIO_UNCORE_IMC_DATA_WRITE,
1639 ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX
1642 static struct freerunning_counters adl_uncore_imc_freerunning[] = {
1643 [ADL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0x40, 0x0, 0x0, 1, 64 },
1644 [ADL_MMIO_UNCORE_IMC_DATA_READ] = { 0x58, 0x0, 0x0, 1, 64 },
1645 [ADL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0xA0, 0x0, 0x0, 1, 64 },
1648 static void adl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
1650 __uncore_imc_init_box(box, ADL_UNCORE_IMC_FREERUNNING_BASE);
1653 static struct intel_uncore_ops adl_uncore_imc_freerunning_ops = {
1654 .init_box = adl_uncore_imc_freerunning_init_box,
1655 .exit_box = uncore_mmio_exit_box,
1656 .read_counter = uncore_mmio_read_counter,
1657 .hw_config = uncore_freerunning_hw_config,
1660 static struct intel_uncore_type adl_uncore_imc_free_running = {
1661 .name = "imc_free_running",
1664 .num_freerunning_types = ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX,
1665 .mmio_map_size = ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE,
1666 .freerunning = adl_uncore_imc_freerunning,
1667 .ops = &adl_uncore_imc_freerunning_ops,
1668 .event_descs = tgl_uncore_imc_events,
1669 .format_group = &tgl_uncore_imc_format_group,
1672 static struct intel_uncore_type *adl_mmio_uncores[] = {
1674 &adl_uncore_imc_free_running,
1678 void adl_uncore_mmio_init(void)
1680 uncore_mmio_uncores = adl_mmio_uncores;
1683 /* end of Alder Lake MMIO uncore support */