1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
4 #include "uncore_discovery.h"
6 /* SNB-EP pci bus to socket mapping */
7 #define SNBEP_CPUNODEID 0x40
8 #define SNBEP_GIDNIDMAP 0x54
10 /* SNB-EP Box level control */
11 #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
12 #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
13 #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
14 #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
15 #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
16 SNBEP_PMON_BOX_CTL_RST_CTRS | \
17 SNBEP_PMON_BOX_CTL_FRZ_EN)
18 /* SNB-EP event control */
19 #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
20 #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
21 #define SNBEP_PMON_CTL_RST (1 << 17)
22 #define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
23 #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
24 #define SNBEP_PMON_CTL_EN (1 << 22)
25 #define SNBEP_PMON_CTL_INVERT (1 << 23)
26 #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
27 #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
28 SNBEP_PMON_CTL_UMASK_MASK | \
29 SNBEP_PMON_CTL_EDGE_DET | \
30 SNBEP_PMON_CTL_INVERT | \
31 SNBEP_PMON_CTL_TRESH_MASK)
33 /* SNB-EP Ubox event control */
34 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
35 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
36 (SNBEP_PMON_CTL_EV_SEL_MASK | \
37 SNBEP_PMON_CTL_UMASK_MASK | \
38 SNBEP_PMON_CTL_EDGE_DET | \
39 SNBEP_PMON_CTL_INVERT | \
40 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
42 #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
43 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
44 SNBEP_CBO_PMON_CTL_TID_EN)
46 /* SNB-EP PCU event control */
47 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
48 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
50 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
51 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
52 (SNBEP_PMON_CTL_EV_SEL_MASK | \
53 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
54 SNBEP_PMON_CTL_EDGE_DET | \
55 SNBEP_PMON_CTL_INVERT | \
56 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
57 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
58 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
60 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
61 (SNBEP_PMON_RAW_EVENT_MASK | \
62 SNBEP_PMON_CTL_EV_SEL_EXT)
64 /* SNB-EP pci control register */
65 #define SNBEP_PCI_PMON_BOX_CTL 0xf4
66 #define SNBEP_PCI_PMON_CTL0 0xd8
67 /* SNB-EP pci counter register */
68 #define SNBEP_PCI_PMON_CTR0 0xa0
70 /* SNB-EP home agent register */
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
72 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
73 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
74 /* SNB-EP memory controller register */
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
76 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
77 /* SNB-EP QPI register */
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
81 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
83 /* SNB-EP Ubox register */
84 #define SNBEP_U_MSR_PMON_CTR0 0xc16
85 #define SNBEP_U_MSR_PMON_CTL0 0xc10
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
88 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
90 /* SNB-EP Cbo register */
91 #define SNBEP_C0_MSR_PMON_CTR0 0xd16
92 #define SNBEP_C0_MSR_PMON_CTL0 0xd10
93 #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
94 #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
95 #define SNBEP_CBO_MSR_OFFSET 0x20
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
100 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
102 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
104 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
105 .config_mask = (m), \
109 /* SNB-EP PCU register */
110 #define SNBEP_PCU_MSR_PMON_CTR0 0xc36
111 #define SNBEP_PCU_MSR_PMON_CTL0 0xc30
112 #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
114 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
115 #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
116 #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
118 /* IVBEP event control */
119 #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
120 SNBEP_PMON_BOX_CTL_RST_CTRS)
121 #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
122 SNBEP_PMON_CTL_UMASK_MASK | \
123 SNBEP_PMON_CTL_EDGE_DET | \
124 SNBEP_PMON_CTL_TRESH_MASK)
126 #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00
127 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
128 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
130 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
131 (SNBEP_PMON_CTL_EV_SEL_MASK | \
132 SNBEP_PMON_CTL_UMASK_MASK | \
133 SNBEP_PMON_CTL_EDGE_DET | \
134 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
136 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \
137 SNBEP_CBO_PMON_CTL_TID_EN)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
146 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
148 /* IVBEP home agent */
149 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
150 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \
151 (IVBEP_PMON_RAW_EVENT_MASK | \
152 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
154 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
155 (SNBEP_PMON_CTL_EV_SEL_MASK | \
156 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
157 SNBEP_PMON_CTL_EDGE_DET | \
158 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
159 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
160 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
162 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
163 (IVBEP_PMON_RAW_EVENT_MASK | \
164 SNBEP_PMON_CTL_EV_SEL_EXT)
166 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
167 ((1ULL << (n)) - 1)))
169 /* Haswell-EP Ubox */
170 #define HSWEP_U_MSR_PMON_CTR0 0x709
171 #define HSWEP_U_MSR_PMON_CTL0 0x705
172 #define HSWEP_U_MSR_PMON_FILTER 0x707
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703
175 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1)
179 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
180 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
181 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
184 #define HSWEP_C0_MSR_PMON_CTR0 0xe08
185 #define HSWEP_C0_MSR_PMON_CTL0 0xe01
186 #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00
187 #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05
188 #define HSWEP_CBO_MSR_OFFSET 0x10
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
198 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
201 /* Haswell-EP Sbox */
202 #define HSWEP_S0_MSR_PMON_CTR0 0x726
203 #define HSWEP_S0_MSR_PMON_CTL0 0x721
204 #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720
205 #define HSWEP_SBOX_MSR_OFFSET 0xa
206 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
207 SNBEP_CBO_PMON_CTL_TID_EN)
210 #define HSWEP_PCU_MSR_PMON_CTR0 0x717
211 #define HSWEP_PCU_MSR_PMON_CTL0 0x711
212 #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710
213 #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715
216 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
217 (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
218 SNBEP_CBO_PMON_CTL_TID_EN)
220 #define KNL_CHA_MSR_OFFSET 0xc
221 #define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16)
222 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
223 (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
224 KNL_CHA_MSR_PMON_CTL_QOR)
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33)
230 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37)
232 /* KNL EDC/MC UCLK */
233 #define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400
234 #define KNL_UCLK_MSR_PMON_CTL0 0x420
235 #define KNL_UCLK_MSR_PMON_BOX_CTL 0x430
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c
237 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454
238 #define KNL_PMON_FIXED_CTL_EN 0x1
241 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00
242 #define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20
243 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c
245 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44
248 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00
249 #define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20
250 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c
252 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44
255 #define KNL_IRP_PCI_PMON_BOX_CTL 0xf0
256 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
257 KNL_CHA_MSR_PMON_CTL_QOR)
259 #define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f
260 #define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7)
261 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000
262 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
263 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
264 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
265 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
266 SNBEP_PMON_CTL_EDGE_DET | \
267 SNBEP_CBO_PMON_CTL_TID_EN | \
268 SNBEP_PMON_CTL_INVERT | \
269 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
270 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
271 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
273 /* SKX pci bus to socket mapping */
274 #define SKX_CPUNODEID 0xc0
275 #define SKX_GIDNIDMAP 0xd4
278 * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
279 * that BIOS programmed. MSR has package scope.
280 * | Bit | Default | Description
281 * | [63] | 00h | VALID - When set, indicates the CPU bus
282 * numbers have been initialized. (RO)
283 * |[62:48]| --- | Reserved
284 * |[47:40]| 00h | BUS_NUM_5 - Return the bus number BIOS assigned
286 * |[39:32]| 00h | BUS_NUM_4 - Return the bus number BIOS assigned
288 * |[31:24]| 00h | BUS_NUM_3 - Return the bus number BIOS assigned
290 * |[23:16]| 00h | BUS_NUM_2 - Return the bus number BIOS assigned
292 * |[15:8] | 00h | BUS_NUM_1 - Return the bus number BIOS assigned
294 * | [7:0] | 00h | BUS_NUM_0 - Return the bus number BIOS assigned
297 #define SKX_MSR_CPU_BUS_NUMBER 0x300
298 #define SKX_MSR_CPU_BUS_VALID_BIT (1ULL << 63)
299 #define BUS_NUM_STRIDE 8
302 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0)
303 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9)
304 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17)
305 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32)
306 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33)
307 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35)
308 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36)
309 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37)
310 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41)
311 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51)
312 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
313 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
314 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
317 #define SKX_IIO0_MSR_PMON_CTL0 0xa48
318 #define SKX_IIO0_MSR_PMON_CTR0 0xa41
319 #define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40
320 #define SKX_IIO_MSR_OFFSET 0x20
322 #define SKX_PMON_CTL_TRESH_MASK (0xff << 24)
323 #define SKX_PMON_CTL_TRESH_MASK_EXT (0xf)
324 #define SKX_PMON_CTL_CH_MASK (0xff << 4)
325 #define SKX_PMON_CTL_FC_MASK (0x7 << 12)
326 #define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
327 SNBEP_PMON_CTL_UMASK_MASK | \
328 SNBEP_PMON_CTL_EDGE_DET | \
329 SNBEP_PMON_CTL_INVERT | \
330 SKX_PMON_CTL_TRESH_MASK)
331 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
332 SKX_PMON_CTL_CH_MASK | \
333 SKX_PMON_CTL_FC_MASK)
336 #define SKX_IRP0_MSR_PMON_CTL0 0xa5b
337 #define SKX_IRP0_MSR_PMON_CTR0 0xa59
338 #define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58
339 #define SKX_IRP_MSR_OFFSET 0x20
342 #define SKX_UPI_PCI_PMON_CTL0 0x350
343 #define SKX_UPI_PCI_PMON_CTR0 0x318
344 #define SKX_UPI_PCI_PMON_BOX_CTL 0x378
345 #define SKX_UPI_CTL_UMASK_EXT 0xffefff
348 #define SKX_M2M_PCI_PMON_CTL0 0x228
349 #define SKX_M2M_PCI_PMON_CTR0 0x200
350 #define SKX_M2M_PCI_PMON_BOX_CTL 0x258
352 /* Memory Map registers device ID */
353 #define SNR_ICX_MESH2IIO_MMAP_DID 0x9a2
354 #define SNR_ICX_SAD_CONTROL_CFG 0x3f4
356 /* Getting I/O stack id in SAD_COTROL_CFG notation */
357 #define SAD_CONTROL_STACK_ID(data) (((data) >> 4) & 0x7)
360 #define SNR_U_MSR_PMON_CTR0 0x1f98
361 #define SNR_U_MSR_PMON_CTL0 0x1f91
362 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL 0x1f93
363 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR 0x1f94
366 #define SNR_CHA_RAW_EVENT_MASK_EXT 0x3ffffff
367 #define SNR_CHA_MSR_PMON_CTL0 0x1c01
368 #define SNR_CHA_MSR_PMON_CTR0 0x1c08
369 #define SNR_CHA_MSR_PMON_BOX_CTL 0x1c00
370 #define SNR_C0_MSR_PMON_BOX_FILTER0 0x1c05
374 #define SNR_IIO_MSR_PMON_CTL0 0x1e08
375 #define SNR_IIO_MSR_PMON_CTR0 0x1e01
376 #define SNR_IIO_MSR_PMON_BOX_CTL 0x1e00
377 #define SNR_IIO_MSR_OFFSET 0x10
378 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT 0x7ffff
381 #define SNR_IRP0_MSR_PMON_CTL0 0x1ea8
382 #define SNR_IRP0_MSR_PMON_CTR0 0x1ea1
383 #define SNR_IRP0_MSR_PMON_BOX_CTL 0x1ea0
384 #define SNR_IRP_MSR_OFFSET 0x10
387 #define SNR_M2PCIE_MSR_PMON_CTL0 0x1e58
388 #define SNR_M2PCIE_MSR_PMON_CTR0 0x1e51
389 #define SNR_M2PCIE_MSR_PMON_BOX_CTL 0x1e50
390 #define SNR_M2PCIE_MSR_OFFSET 0x10
393 #define SNR_PCU_MSR_PMON_CTL0 0x1ef1
394 #define SNR_PCU_MSR_PMON_CTR0 0x1ef8
395 #define SNR_PCU_MSR_PMON_BOX_CTL 0x1ef0
396 #define SNR_PCU_MSR_PMON_BOX_FILTER 0x1efc
399 #define SNR_M2M_PCI_PMON_CTL0 0x468
400 #define SNR_M2M_PCI_PMON_CTR0 0x440
401 #define SNR_M2M_PCI_PMON_BOX_CTL 0x438
402 #define SNR_M2M_PCI_PMON_UMASK_EXT 0xff
405 #define SNR_PCIE3_PCI_PMON_CTL0 0x508
406 #define SNR_PCIE3_PCI_PMON_CTR0 0x4e8
407 #define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e0
410 #define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54
411 #define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38
412 #define SNR_IMC_MMIO_PMON_CTL0 0x40
413 #define SNR_IMC_MMIO_PMON_CTR0 0x8
414 #define SNR_IMC_MMIO_PMON_BOX_CTL 0x22800
415 #define SNR_IMC_MMIO_OFFSET 0x4000
416 #define SNR_IMC_MMIO_SIZE 0x4000
417 #define SNR_IMC_MMIO_BASE_OFFSET 0xd0
418 #define SNR_IMC_MMIO_BASE_MASK 0x1FFFFFFF
419 #define SNR_IMC_MMIO_MEM0_OFFSET 0xd8
420 #define SNR_IMC_MMIO_MEM0_MASK 0x7FF
423 #define ICX_C34_MSR_PMON_CTR0 0xb68
424 #define ICX_C34_MSR_PMON_CTL0 0xb61
425 #define ICX_C34_MSR_PMON_BOX_CTL 0xb60
426 #define ICX_C34_MSR_PMON_BOX_FILTER0 0xb65
429 #define ICX_IIO_MSR_PMON_CTL0 0xa58
430 #define ICX_IIO_MSR_PMON_CTR0 0xa51
431 #define ICX_IIO_MSR_PMON_BOX_CTL 0xa50
434 #define ICX_IRP0_MSR_PMON_CTL0 0xa4d
435 #define ICX_IRP0_MSR_PMON_CTR0 0xa4b
436 #define ICX_IRP0_MSR_PMON_BOX_CTL 0xa4a
439 #define ICX_M2PCIE_MSR_PMON_CTL0 0xa46
440 #define ICX_M2PCIE_MSR_PMON_CTR0 0xa41
441 #define ICX_M2PCIE_MSR_PMON_BOX_CTL 0xa40
444 #define ICX_UPI_PCI_PMON_CTL0 0x350
445 #define ICX_UPI_PCI_PMON_CTR0 0x320
446 #define ICX_UPI_PCI_PMON_BOX_CTL 0x318
447 #define ICX_UPI_CTL_UMASK_EXT 0xffffff
450 #define ICX_M3UPI_PCI_PMON_CTL0 0xd8
451 #define ICX_M3UPI_PCI_PMON_CTR0 0xa8
452 #define ICX_M3UPI_PCI_PMON_BOX_CTL 0xa0
455 #define ICX_NUMBER_IMC_CHN 2
456 #define ICX_IMC_MEM_STRIDE 0x4
459 #define SPR_RAW_EVENT_MASK_EXT 0xffffff
462 #define SPR_CHA_PMON_CTL_TID_EN (1 << 16)
463 #define SPR_CHA_PMON_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
464 SPR_CHA_PMON_CTL_TID_EN)
465 #define SPR_CHA_PMON_BOX_FILTER_TID 0x3ff
467 #define SPR_C0_MSR_PMON_BOX_FILTER0 0x200e
469 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
470 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
471 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
472 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
473 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
474 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
475 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
476 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
477 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
478 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
479 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
480 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
481 DEFINE_UNCORE_FORMAT_ATTR(tid_en2, tid_en, "config:16");
482 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
483 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
484 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
485 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
486 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
487 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
488 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
489 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
490 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
491 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
492 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
493 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
494 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
495 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
496 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
497 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
498 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
499 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
500 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
501 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
502 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
503 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
504 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
505 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
506 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
507 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
508 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
509 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
510 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
511 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
512 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
513 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
514 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
515 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
516 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
517 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
518 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
519 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
520 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
521 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
522 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
523 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
524 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
525 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
526 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
527 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
528 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
529 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
530 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
531 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
532 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
533 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
534 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
535 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
536 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
537 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
538 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
539 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
540 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
541 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
542 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
543 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
544 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
545 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
546 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
547 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
549 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
551 struct pci_dev *pdev = box->pci_dev;
552 int box_ctl = uncore_pci_box_ctl(box);
555 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
556 config |= SNBEP_PMON_BOX_CTL_FRZ;
557 pci_write_config_dword(pdev, box_ctl, config);
561 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
563 struct pci_dev *pdev = box->pci_dev;
564 int box_ctl = uncore_pci_box_ctl(box);
567 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
568 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
569 pci_write_config_dword(pdev, box_ctl, config);
573 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
575 struct pci_dev *pdev = box->pci_dev;
576 struct hw_perf_event *hwc = &event->hw;
578 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
581 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
583 struct pci_dev *pdev = box->pci_dev;
584 struct hw_perf_event *hwc = &event->hw;
586 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
589 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
591 struct pci_dev *pdev = box->pci_dev;
592 struct hw_perf_event *hwc = &event->hw;
595 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
596 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
601 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
603 struct pci_dev *pdev = box->pci_dev;
604 int box_ctl = uncore_pci_box_ctl(box);
606 pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
609 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
614 msr = uncore_msr_box_ctl(box);
617 config |= SNBEP_PMON_BOX_CTL_FRZ;
622 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
627 msr = uncore_msr_box_ctl(box);
630 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
635 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
637 struct hw_perf_event *hwc = &event->hw;
638 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
640 if (reg1->idx != EXTRA_REG_NONE)
641 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
643 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
646 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
647 struct perf_event *event)
649 struct hw_perf_event *hwc = &event->hw;
651 wrmsrl(hwc->config_base, hwc->config);
654 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
656 unsigned msr = uncore_msr_box_ctl(box);
659 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
662 static struct attribute *snbep_uncore_formats_attr[] = {
663 &format_attr_event.attr,
664 &format_attr_umask.attr,
665 &format_attr_edge.attr,
666 &format_attr_inv.attr,
667 &format_attr_thresh8.attr,
671 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
672 &format_attr_event.attr,
673 &format_attr_umask.attr,
674 &format_attr_edge.attr,
675 &format_attr_inv.attr,
676 &format_attr_thresh5.attr,
680 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
681 &format_attr_event.attr,
682 &format_attr_umask.attr,
683 &format_attr_edge.attr,
684 &format_attr_tid_en.attr,
685 &format_attr_inv.attr,
686 &format_attr_thresh8.attr,
687 &format_attr_filter_tid.attr,
688 &format_attr_filter_nid.attr,
689 &format_attr_filter_state.attr,
690 &format_attr_filter_opc.attr,
694 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
695 &format_attr_event.attr,
696 &format_attr_occ_sel.attr,
697 &format_attr_edge.attr,
698 &format_attr_inv.attr,
699 &format_attr_thresh5.attr,
700 &format_attr_occ_invert.attr,
701 &format_attr_occ_edge.attr,
702 &format_attr_filter_band0.attr,
703 &format_attr_filter_band1.attr,
704 &format_attr_filter_band2.attr,
705 &format_attr_filter_band3.attr,
709 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
710 &format_attr_event_ext.attr,
711 &format_attr_umask.attr,
712 &format_attr_edge.attr,
713 &format_attr_inv.attr,
714 &format_attr_thresh8.attr,
715 &format_attr_match_rds.attr,
716 &format_attr_match_rnid30.attr,
717 &format_attr_match_rnid4.attr,
718 &format_attr_match_dnid.attr,
719 &format_attr_match_mc.attr,
720 &format_attr_match_opc.attr,
721 &format_attr_match_vnw.attr,
722 &format_attr_match0.attr,
723 &format_attr_match1.attr,
724 &format_attr_mask_rds.attr,
725 &format_attr_mask_rnid30.attr,
726 &format_attr_mask_rnid4.attr,
727 &format_attr_mask_dnid.attr,
728 &format_attr_mask_mc.attr,
729 &format_attr_mask_opc.attr,
730 &format_attr_mask_vnw.attr,
731 &format_attr_mask0.attr,
732 &format_attr_mask1.attr,
736 static struct uncore_event_desc snbep_uncore_imc_events[] = {
737 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
738 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
739 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
740 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
741 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
742 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
743 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
744 { /* end: all zeroes */ },
747 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
748 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
749 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
750 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
751 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
752 { /* end: all zeroes */ },
755 static const struct attribute_group snbep_uncore_format_group = {
757 .attrs = snbep_uncore_formats_attr,
760 static const struct attribute_group snbep_uncore_ubox_format_group = {
762 .attrs = snbep_uncore_ubox_formats_attr,
765 static const struct attribute_group snbep_uncore_cbox_format_group = {
767 .attrs = snbep_uncore_cbox_formats_attr,
770 static const struct attribute_group snbep_uncore_pcu_format_group = {
772 .attrs = snbep_uncore_pcu_formats_attr,
775 static const struct attribute_group snbep_uncore_qpi_format_group = {
777 .attrs = snbep_uncore_qpi_formats_attr,
780 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
781 .disable_box = snbep_uncore_msr_disable_box, \
782 .enable_box = snbep_uncore_msr_enable_box, \
783 .disable_event = snbep_uncore_msr_disable_event, \
784 .enable_event = snbep_uncore_msr_enable_event, \
785 .read_counter = uncore_msr_read_counter
787 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
788 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
789 .init_box = snbep_uncore_msr_init_box \
791 static struct intel_uncore_ops snbep_uncore_msr_ops = {
792 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
795 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
796 .init_box = snbep_uncore_pci_init_box, \
797 .disable_box = snbep_uncore_pci_disable_box, \
798 .enable_box = snbep_uncore_pci_enable_box, \
799 .disable_event = snbep_uncore_pci_disable_event, \
800 .read_counter = snbep_uncore_pci_read_counter
802 static struct intel_uncore_ops snbep_uncore_pci_ops = {
803 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
804 .enable_event = snbep_uncore_pci_enable_event, \
807 static struct event_constraint snbep_uncore_cbox_constraints[] = {
808 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
809 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
810 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
811 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
812 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
813 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
814 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
815 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
816 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
817 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
818 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
819 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
820 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
821 UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
822 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
823 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
824 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
825 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
826 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
827 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
828 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
829 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
830 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
831 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
832 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
833 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
837 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
838 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
839 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
840 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
841 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
842 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
843 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
844 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
845 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
846 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
847 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
851 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
852 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
853 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
854 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
855 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
856 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
857 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
858 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
859 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
860 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
861 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
862 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
863 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
864 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
865 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
866 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
867 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
868 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
869 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
870 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
871 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
872 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
873 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
874 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
875 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
876 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
877 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
878 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
879 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
883 static struct intel_uncore_type snbep_uncore_ubox = {
888 .fixed_ctr_bits = 48,
889 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
890 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
891 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
892 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
893 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
894 .ops = &snbep_uncore_msr_ops,
895 .format_group = &snbep_uncore_ubox_format_group,
898 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
899 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
900 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
901 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
902 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
903 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
904 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
905 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
906 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
907 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
908 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
909 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
910 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
911 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
912 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
913 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
914 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
915 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
916 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
917 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
918 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
919 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
920 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
921 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
922 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
923 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
927 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
929 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
930 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
933 if (uncore_box_is_fake(box))
936 for (i = 0; i < 5; i++) {
937 if (reg1->alloc & (0x1 << i))
938 atomic_sub(1 << (i * 6), &er->ref);
943 static struct event_constraint *
944 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
945 u64 (*cbox_filter_mask)(int fields))
947 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
948 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
953 if (reg1->idx == EXTRA_REG_NONE)
956 raw_spin_lock_irqsave(&er->lock, flags);
957 for (i = 0; i < 5; i++) {
958 if (!(reg1->idx & (0x1 << i)))
960 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
963 mask = cbox_filter_mask(0x1 << i);
964 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
965 !((reg1->config ^ er->config) & mask)) {
966 atomic_add(1 << (i * 6), &er->ref);
968 er->config |= reg1->config & mask;
974 raw_spin_unlock_irqrestore(&er->lock, flags);
978 if (!uncore_box_is_fake(box))
979 reg1->alloc |= alloc;
983 for (; i >= 0; i--) {
984 if (alloc & (0x1 << i))
985 atomic_sub(1 << (i * 6), &er->ref);
987 return &uncore_constraint_empty;
990 static u64 snbep_cbox_filter_mask(int fields)
995 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
997 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
999 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1001 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1006 static struct event_constraint *
1007 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1009 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
1012 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1014 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1015 struct extra_reg *er;
1018 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
1019 if (er->event != (event->hw.config & er->config_mask))
1025 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1026 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1027 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1033 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1034 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1035 .hw_config = snbep_cbox_hw_config,
1036 .get_constraint = snbep_cbox_get_constraint,
1037 .put_constraint = snbep_cbox_put_constraint,
1040 static struct intel_uncore_type snbep_uncore_cbox = {
1044 .perf_ctr_bits = 44,
1045 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1046 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1047 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1048 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1049 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1050 .num_shared_regs = 1,
1051 .constraints = snbep_uncore_cbox_constraints,
1052 .ops = &snbep_uncore_cbox_ops,
1053 .format_group = &snbep_uncore_cbox_format_group,
1056 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1058 struct hw_perf_event *hwc = &event->hw;
1059 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1060 u64 config = reg1->config;
1062 if (new_idx > reg1->idx)
1063 config <<= 8 * (new_idx - reg1->idx);
1065 config >>= 8 * (reg1->idx - new_idx);
1068 hwc->config += new_idx - reg1->idx;
1069 reg1->config = config;
1070 reg1->idx = new_idx;
1075 static struct event_constraint *
1076 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1078 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1079 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1080 unsigned long flags;
1081 int idx = reg1->idx;
1082 u64 mask, config1 = reg1->config;
1085 if (reg1->idx == EXTRA_REG_NONE ||
1086 (!uncore_box_is_fake(box) && reg1->alloc))
1089 mask = 0xffULL << (idx * 8);
1090 raw_spin_lock_irqsave(&er->lock, flags);
1091 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1092 !((config1 ^ er->config) & mask)) {
1093 atomic_add(1 << (idx * 8), &er->ref);
1094 er->config &= ~mask;
1095 er->config |= config1 & mask;
1098 raw_spin_unlock_irqrestore(&er->lock, flags);
1101 idx = (idx + 1) % 4;
1102 if (idx != reg1->idx) {
1103 config1 = snbep_pcu_alter_er(event, idx, false);
1106 return &uncore_constraint_empty;
1109 if (!uncore_box_is_fake(box)) {
1110 if (idx != reg1->idx)
1111 snbep_pcu_alter_er(event, idx, true);
1117 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1119 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1120 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1122 if (uncore_box_is_fake(box) || !reg1->alloc)
1125 atomic_sub(1 << (reg1->idx * 8), &er->ref);
1129 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1131 struct hw_perf_event *hwc = &event->hw;
1132 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1133 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1135 if (ev_sel >= 0xb && ev_sel <= 0xe) {
1136 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1137 reg1->idx = ev_sel - 0xb;
1138 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1143 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1144 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1145 .hw_config = snbep_pcu_hw_config,
1146 .get_constraint = snbep_pcu_get_constraint,
1147 .put_constraint = snbep_pcu_put_constraint,
1150 static struct intel_uncore_type snbep_uncore_pcu = {
1154 .perf_ctr_bits = 48,
1155 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1156 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1157 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1158 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1159 .num_shared_regs = 1,
1160 .ops = &snbep_uncore_pcu_ops,
1161 .format_group = &snbep_uncore_pcu_format_group,
1164 static struct intel_uncore_type *snbep_msr_uncores[] = {
1171 void snbep_uncore_cpu_init(void)
1173 if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1174 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1175 uncore_msr_uncores = snbep_msr_uncores;
1179 SNBEP_PCI_QPI_PORT0_FILTER,
1180 SNBEP_PCI_QPI_PORT1_FILTER,
1181 BDX_PCI_QPI_PORT2_FILTER,
1184 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1186 struct hw_perf_event *hwc = &event->hw;
1187 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1188 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1190 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1192 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1193 reg1->config = event->attr.config1;
1194 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1195 reg2->config = event->attr.config2;
1200 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1202 struct pci_dev *pdev = box->pci_dev;
1203 struct hw_perf_event *hwc = &event->hw;
1204 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1205 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1207 if (reg1->idx != EXTRA_REG_NONE) {
1208 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1209 int die = box->dieid;
1210 struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1213 pci_write_config_dword(filter_pdev, reg1->reg,
1215 pci_write_config_dword(filter_pdev, reg1->reg + 4,
1216 (u32)(reg1->config >> 32));
1217 pci_write_config_dword(filter_pdev, reg2->reg,
1219 pci_write_config_dword(filter_pdev, reg2->reg + 4,
1220 (u32)(reg2->config >> 32));
1224 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1227 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1228 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1229 .enable_event = snbep_qpi_enable_event,
1230 .hw_config = snbep_qpi_hw_config,
1231 .get_constraint = uncore_get_constraint,
1232 .put_constraint = uncore_put_constraint,
1235 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
1236 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1237 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1238 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
1239 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1240 .ops = &snbep_uncore_pci_ops, \
1241 .format_group = &snbep_uncore_format_group
1243 static struct intel_uncore_type snbep_uncore_ha = {
1247 .perf_ctr_bits = 48,
1248 SNBEP_UNCORE_PCI_COMMON_INIT(),
1251 static struct intel_uncore_type snbep_uncore_imc = {
1255 .perf_ctr_bits = 48,
1256 .fixed_ctr_bits = 48,
1257 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1258 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1259 .event_descs = snbep_uncore_imc_events,
1260 SNBEP_UNCORE_PCI_COMMON_INIT(),
1263 static struct intel_uncore_type snbep_uncore_qpi = {
1267 .perf_ctr_bits = 48,
1268 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1269 .event_ctl = SNBEP_PCI_PMON_CTL0,
1270 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1271 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1272 .num_shared_regs = 1,
1273 .ops = &snbep_uncore_qpi_ops,
1274 .event_descs = snbep_uncore_qpi_events,
1275 .format_group = &snbep_uncore_qpi_format_group,
1279 static struct intel_uncore_type snbep_uncore_r2pcie = {
1283 .perf_ctr_bits = 44,
1284 .constraints = snbep_uncore_r2pcie_constraints,
1285 SNBEP_UNCORE_PCI_COMMON_INIT(),
1288 static struct intel_uncore_type snbep_uncore_r3qpi = {
1292 .perf_ctr_bits = 44,
1293 .constraints = snbep_uncore_r3qpi_constraints,
1294 SNBEP_UNCORE_PCI_COMMON_INIT(),
1298 SNBEP_PCI_UNCORE_HA,
1299 SNBEP_PCI_UNCORE_IMC,
1300 SNBEP_PCI_UNCORE_QPI,
1301 SNBEP_PCI_UNCORE_R2PCIE,
1302 SNBEP_PCI_UNCORE_R3QPI,
1305 static struct intel_uncore_type *snbep_pci_uncores[] = {
1306 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
1307 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
1308 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
1309 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
1310 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
1314 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1316 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1317 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1319 { /* MC Channel 0 */
1320 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1321 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1323 { /* MC Channel 1 */
1324 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1325 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1327 { /* MC Channel 2 */
1328 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1329 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1331 { /* MC Channel 3 */
1332 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1333 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1336 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1337 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1340 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1341 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1344 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1345 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1347 { /* R3QPI Link 0 */
1348 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1349 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1351 { /* R3QPI Link 1 */
1352 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1353 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1355 { /* QPI Port 0 filter */
1356 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1357 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1358 SNBEP_PCI_QPI_PORT0_FILTER),
1360 { /* QPI Port 0 filter */
1361 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1362 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1363 SNBEP_PCI_QPI_PORT1_FILTER),
1365 { /* end: all zeroes */ }
1368 static struct pci_driver snbep_uncore_pci_driver = {
1369 .name = "snbep_uncore",
1370 .id_table = snbep_uncore_pci_ids,
1373 #define NODE_ID_MASK 0x7
1376 * build pci bus to socket mapping
1378 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1380 struct pci_dev *ubox_dev = NULL;
1381 int i, bus, nodeid, segment, die_id;
1382 struct pci2phy_map *map;
1387 /* find the UBOX device */
1388 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1391 bus = ubox_dev->bus->number;
1393 * The nodeid and idmap registers only contain enough
1394 * information to handle 8 nodes. On systems with more
1395 * than 8 nodes, we need to rely on NUMA information,
1396 * filled in from BIOS supplied information, to determine
1399 if (nr_node_ids <= 8) {
1400 /* get the Node ID of the local register */
1401 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1404 nodeid = config & NODE_ID_MASK;
1405 /* get the Node ID mapping */
1406 err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1410 segment = pci_domain_nr(ubox_dev->bus);
1411 raw_spin_lock(&pci2phy_map_lock);
1412 map = __find_pci2phy_map(segment);
1414 raw_spin_unlock(&pci2phy_map_lock);
1420 * every three bits in the Node ID mapping register maps
1421 * to a particular node.
1423 for (i = 0; i < 8; i++) {
1424 if (nodeid == ((config >> (3 * i)) & 0x7)) {
1425 if (topology_max_die_per_package() > 1)
1428 die_id = topology_phys_to_logical_pkg(i);
1431 map->pbus_to_dieid[bus] = die_id;
1435 raw_spin_unlock(&pci2phy_map_lock);
1437 int node = pcibus_to_node(ubox_dev->bus);
1440 segment = pci_domain_nr(ubox_dev->bus);
1441 raw_spin_lock(&pci2phy_map_lock);
1442 map = __find_pci2phy_map(segment);
1444 raw_spin_unlock(&pci2phy_map_lock);
1450 for_each_cpu(cpu, cpumask_of_pcibus(ubox_dev->bus)) {
1451 struct cpuinfo_x86 *c = &cpu_data(cpu);
1453 if (c->initialized && cpu_to_node(cpu) == node) {
1454 map->pbus_to_dieid[bus] = die_id = c->logical_die_id;
1458 raw_spin_unlock(&pci2phy_map_lock);
1460 if (WARN_ON_ONCE(die_id == -1)) {
1469 * For PCI bus with no UBOX device, find the next bus
1470 * that has UBOX device and use its mapping.
1472 raw_spin_lock(&pci2phy_map_lock);
1473 list_for_each_entry(map, &pci2phy_map_head, list) {
1476 for (bus = 255; bus >= 0; bus--) {
1477 if (map->pbus_to_dieid[bus] != -1)
1478 i = map->pbus_to_dieid[bus];
1480 map->pbus_to_dieid[bus] = i;
1483 for (bus = 0; bus <= 255; bus++) {
1484 if (map->pbus_to_dieid[bus] != -1)
1485 i = map->pbus_to_dieid[bus];
1487 map->pbus_to_dieid[bus] = i;
1491 raw_spin_unlock(&pci2phy_map_lock);
1494 pci_dev_put(ubox_dev);
1496 return err ? pcibios_err_to_errno(err) : 0;
1499 int snbep_uncore_pci_init(void)
1501 int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1504 uncore_pci_uncores = snbep_pci_uncores;
1505 uncore_pci_driver = &snbep_uncore_pci_driver;
1508 /* end of Sandy Bridge-EP uncore support */
1510 /* IvyTown uncore support */
1511 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1513 unsigned msr = uncore_msr_box_ctl(box);
1515 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1518 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1520 struct pci_dev *pdev = box->pci_dev;
1522 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1525 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
1526 .init_box = ivbep_uncore_msr_init_box, \
1527 .disable_box = snbep_uncore_msr_disable_box, \
1528 .enable_box = snbep_uncore_msr_enable_box, \
1529 .disable_event = snbep_uncore_msr_disable_event, \
1530 .enable_event = snbep_uncore_msr_enable_event, \
1531 .read_counter = uncore_msr_read_counter
1533 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1534 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1537 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1538 .init_box = ivbep_uncore_pci_init_box,
1539 .disable_box = snbep_uncore_pci_disable_box,
1540 .enable_box = snbep_uncore_pci_enable_box,
1541 .disable_event = snbep_uncore_pci_disable_event,
1542 .enable_event = snbep_uncore_pci_enable_event,
1543 .read_counter = snbep_uncore_pci_read_counter,
1546 #define IVBEP_UNCORE_PCI_COMMON_INIT() \
1547 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1548 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1549 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \
1550 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1551 .ops = &ivbep_uncore_pci_ops, \
1552 .format_group = &ivbep_uncore_format_group
1554 static struct attribute *ivbep_uncore_formats_attr[] = {
1555 &format_attr_event.attr,
1556 &format_attr_umask.attr,
1557 &format_attr_edge.attr,
1558 &format_attr_inv.attr,
1559 &format_attr_thresh8.attr,
1563 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1564 &format_attr_event.attr,
1565 &format_attr_umask.attr,
1566 &format_attr_edge.attr,
1567 &format_attr_inv.attr,
1568 &format_attr_thresh5.attr,
1572 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1573 &format_attr_event.attr,
1574 &format_attr_umask.attr,
1575 &format_attr_edge.attr,
1576 &format_attr_tid_en.attr,
1577 &format_attr_thresh8.attr,
1578 &format_attr_filter_tid.attr,
1579 &format_attr_filter_link.attr,
1580 &format_attr_filter_state2.attr,
1581 &format_attr_filter_nid2.attr,
1582 &format_attr_filter_opc2.attr,
1583 &format_attr_filter_nc.attr,
1584 &format_attr_filter_c6.attr,
1585 &format_attr_filter_isoc.attr,
1589 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1590 &format_attr_event.attr,
1591 &format_attr_occ_sel.attr,
1592 &format_attr_edge.attr,
1593 &format_attr_thresh5.attr,
1594 &format_attr_occ_invert.attr,
1595 &format_attr_occ_edge.attr,
1596 &format_attr_filter_band0.attr,
1597 &format_attr_filter_band1.attr,
1598 &format_attr_filter_band2.attr,
1599 &format_attr_filter_band3.attr,
1603 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1604 &format_attr_event_ext.attr,
1605 &format_attr_umask.attr,
1606 &format_attr_edge.attr,
1607 &format_attr_thresh8.attr,
1608 &format_attr_match_rds.attr,
1609 &format_attr_match_rnid30.attr,
1610 &format_attr_match_rnid4.attr,
1611 &format_attr_match_dnid.attr,
1612 &format_attr_match_mc.attr,
1613 &format_attr_match_opc.attr,
1614 &format_attr_match_vnw.attr,
1615 &format_attr_match0.attr,
1616 &format_attr_match1.attr,
1617 &format_attr_mask_rds.attr,
1618 &format_attr_mask_rnid30.attr,
1619 &format_attr_mask_rnid4.attr,
1620 &format_attr_mask_dnid.attr,
1621 &format_attr_mask_mc.attr,
1622 &format_attr_mask_opc.attr,
1623 &format_attr_mask_vnw.attr,
1624 &format_attr_mask0.attr,
1625 &format_attr_mask1.attr,
1629 static const struct attribute_group ivbep_uncore_format_group = {
1631 .attrs = ivbep_uncore_formats_attr,
1634 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1636 .attrs = ivbep_uncore_ubox_formats_attr,
1639 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1641 .attrs = ivbep_uncore_cbox_formats_attr,
1644 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1646 .attrs = ivbep_uncore_pcu_formats_attr,
1649 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1651 .attrs = ivbep_uncore_qpi_formats_attr,
1654 static struct intel_uncore_type ivbep_uncore_ubox = {
1658 .perf_ctr_bits = 44,
1659 .fixed_ctr_bits = 48,
1660 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1661 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1662 .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1663 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1664 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1665 .ops = &ivbep_uncore_msr_ops,
1666 .format_group = &ivbep_uncore_ubox_format_group,
1669 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1670 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1671 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1672 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1673 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1674 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1675 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1676 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1677 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1678 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1679 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1680 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1681 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1682 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1683 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1684 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1685 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1686 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1687 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1688 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1689 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1690 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1691 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1692 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1693 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1694 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1695 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1696 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1697 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1698 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1699 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1700 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1701 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1702 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1703 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1704 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1705 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1706 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1710 static u64 ivbep_cbox_filter_mask(int fields)
1715 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1717 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1719 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1721 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1722 if (fields & 0x10) {
1723 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1724 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1725 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1726 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1732 static struct event_constraint *
1733 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1735 return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1738 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1740 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1741 struct extra_reg *er;
1744 for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1745 if (er->event != (event->hw.config & er->config_mask))
1751 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1752 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1753 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1759 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1761 struct hw_perf_event *hwc = &event->hw;
1762 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1764 if (reg1->idx != EXTRA_REG_NONE) {
1765 u64 filter = uncore_shared_reg_config(box, 0);
1766 wrmsrl(reg1->reg, filter & 0xffffffff);
1767 wrmsrl(reg1->reg + 6, filter >> 32);
1770 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1773 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1774 .init_box = ivbep_uncore_msr_init_box,
1775 .disable_box = snbep_uncore_msr_disable_box,
1776 .enable_box = snbep_uncore_msr_enable_box,
1777 .disable_event = snbep_uncore_msr_disable_event,
1778 .enable_event = ivbep_cbox_enable_event,
1779 .read_counter = uncore_msr_read_counter,
1780 .hw_config = ivbep_cbox_hw_config,
1781 .get_constraint = ivbep_cbox_get_constraint,
1782 .put_constraint = snbep_cbox_put_constraint,
1785 static struct intel_uncore_type ivbep_uncore_cbox = {
1789 .perf_ctr_bits = 44,
1790 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1791 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1792 .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1793 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1794 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1795 .num_shared_regs = 1,
1796 .constraints = snbep_uncore_cbox_constraints,
1797 .ops = &ivbep_uncore_cbox_ops,
1798 .format_group = &ivbep_uncore_cbox_format_group,
1801 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1802 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1803 .hw_config = snbep_pcu_hw_config,
1804 .get_constraint = snbep_pcu_get_constraint,
1805 .put_constraint = snbep_pcu_put_constraint,
1808 static struct intel_uncore_type ivbep_uncore_pcu = {
1812 .perf_ctr_bits = 48,
1813 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1814 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1815 .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1816 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1817 .num_shared_regs = 1,
1818 .ops = &ivbep_uncore_pcu_ops,
1819 .format_group = &ivbep_uncore_pcu_format_group,
1822 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1829 void ivbep_uncore_cpu_init(void)
1831 if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1832 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1833 uncore_msr_uncores = ivbep_msr_uncores;
1836 static struct intel_uncore_type ivbep_uncore_ha = {
1840 .perf_ctr_bits = 48,
1841 IVBEP_UNCORE_PCI_COMMON_INIT(),
1844 static struct intel_uncore_type ivbep_uncore_imc = {
1848 .perf_ctr_bits = 48,
1849 .fixed_ctr_bits = 48,
1850 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1851 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1852 .event_descs = snbep_uncore_imc_events,
1853 IVBEP_UNCORE_PCI_COMMON_INIT(),
1856 /* registers in IRP boxes are not properly aligned */
1857 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1858 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1860 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1862 struct pci_dev *pdev = box->pci_dev;
1863 struct hw_perf_event *hwc = &event->hw;
1865 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1866 hwc->config | SNBEP_PMON_CTL_EN);
1869 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1871 struct pci_dev *pdev = box->pci_dev;
1872 struct hw_perf_event *hwc = &event->hw;
1874 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1877 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1879 struct pci_dev *pdev = box->pci_dev;
1880 struct hw_perf_event *hwc = &event->hw;
1883 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1884 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1889 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1890 .init_box = ivbep_uncore_pci_init_box,
1891 .disable_box = snbep_uncore_pci_disable_box,
1892 .enable_box = snbep_uncore_pci_enable_box,
1893 .disable_event = ivbep_uncore_irp_disable_event,
1894 .enable_event = ivbep_uncore_irp_enable_event,
1895 .read_counter = ivbep_uncore_irp_read_counter,
1898 static struct intel_uncore_type ivbep_uncore_irp = {
1902 .perf_ctr_bits = 48,
1903 .event_mask = IVBEP_PMON_RAW_EVENT_MASK,
1904 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1905 .ops = &ivbep_uncore_irp_ops,
1906 .format_group = &ivbep_uncore_format_group,
1909 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1910 .init_box = ivbep_uncore_pci_init_box,
1911 .disable_box = snbep_uncore_pci_disable_box,
1912 .enable_box = snbep_uncore_pci_enable_box,
1913 .disable_event = snbep_uncore_pci_disable_event,
1914 .enable_event = snbep_qpi_enable_event,
1915 .read_counter = snbep_uncore_pci_read_counter,
1916 .hw_config = snbep_qpi_hw_config,
1917 .get_constraint = uncore_get_constraint,
1918 .put_constraint = uncore_put_constraint,
1921 static struct intel_uncore_type ivbep_uncore_qpi = {
1925 .perf_ctr_bits = 48,
1926 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1927 .event_ctl = SNBEP_PCI_PMON_CTL0,
1928 .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1929 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1930 .num_shared_regs = 1,
1931 .ops = &ivbep_uncore_qpi_ops,
1932 .format_group = &ivbep_uncore_qpi_format_group,
1935 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1939 .perf_ctr_bits = 44,
1940 .constraints = snbep_uncore_r2pcie_constraints,
1941 IVBEP_UNCORE_PCI_COMMON_INIT(),
1944 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1948 .perf_ctr_bits = 44,
1949 .constraints = snbep_uncore_r3qpi_constraints,
1950 IVBEP_UNCORE_PCI_COMMON_INIT(),
1954 IVBEP_PCI_UNCORE_HA,
1955 IVBEP_PCI_UNCORE_IMC,
1956 IVBEP_PCI_UNCORE_IRP,
1957 IVBEP_PCI_UNCORE_QPI,
1958 IVBEP_PCI_UNCORE_R2PCIE,
1959 IVBEP_PCI_UNCORE_R3QPI,
1962 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1963 [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha,
1964 [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc,
1965 [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp,
1966 [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi,
1967 [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie,
1968 [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi,
1972 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1973 { /* Home Agent 0 */
1974 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1975 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1977 { /* Home Agent 1 */
1978 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1979 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1981 { /* MC0 Channel 0 */
1982 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1983 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1985 { /* MC0 Channel 1 */
1986 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1987 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1989 { /* MC0 Channel 3 */
1990 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1991 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1993 { /* MC0 Channel 4 */
1994 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1995 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1997 { /* MC1 Channel 0 */
1998 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1999 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
2001 { /* MC1 Channel 1 */
2002 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
2003 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
2005 { /* MC1 Channel 3 */
2006 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
2007 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
2009 { /* MC1 Channel 4 */
2010 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
2011 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
2014 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
2015 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
2018 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
2019 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
2022 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
2023 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
2026 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
2027 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
2030 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
2031 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
2033 { /* R3QPI0 Link 0 */
2034 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
2035 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
2037 { /* R3QPI0 Link 1 */
2038 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
2039 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
2041 { /* R3QPI1 Link 2 */
2042 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
2043 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
2045 { /* QPI Port 0 filter */
2046 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
2047 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2048 SNBEP_PCI_QPI_PORT0_FILTER),
2050 { /* QPI Port 0 filter */
2051 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
2052 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2053 SNBEP_PCI_QPI_PORT1_FILTER),
2055 { /* end: all zeroes */ }
2058 static struct pci_driver ivbep_uncore_pci_driver = {
2059 .name = "ivbep_uncore",
2060 .id_table = ivbep_uncore_pci_ids,
2063 int ivbep_uncore_pci_init(void)
2065 int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2068 uncore_pci_uncores = ivbep_pci_uncores;
2069 uncore_pci_driver = &ivbep_uncore_pci_driver;
2072 /* end of IvyTown uncore support */
2074 /* KNL uncore support */
2075 static struct attribute *knl_uncore_ubox_formats_attr[] = {
2076 &format_attr_event.attr,
2077 &format_attr_umask.attr,
2078 &format_attr_edge.attr,
2079 &format_attr_tid_en.attr,
2080 &format_attr_inv.attr,
2081 &format_attr_thresh5.attr,
2085 static const struct attribute_group knl_uncore_ubox_format_group = {
2087 .attrs = knl_uncore_ubox_formats_attr,
2090 static struct intel_uncore_type knl_uncore_ubox = {
2094 .perf_ctr_bits = 48,
2095 .fixed_ctr_bits = 48,
2096 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2097 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2098 .event_mask = KNL_U_MSR_PMON_RAW_EVENT_MASK,
2099 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2100 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2101 .ops = &snbep_uncore_msr_ops,
2102 .format_group = &knl_uncore_ubox_format_group,
2105 static struct attribute *knl_uncore_cha_formats_attr[] = {
2106 &format_attr_event.attr,
2107 &format_attr_umask.attr,
2108 &format_attr_qor.attr,
2109 &format_attr_edge.attr,
2110 &format_attr_tid_en.attr,
2111 &format_attr_inv.attr,
2112 &format_attr_thresh8.attr,
2113 &format_attr_filter_tid4.attr,
2114 &format_attr_filter_link3.attr,
2115 &format_attr_filter_state4.attr,
2116 &format_attr_filter_local.attr,
2117 &format_attr_filter_all_op.attr,
2118 &format_attr_filter_nnm.attr,
2119 &format_attr_filter_opc3.attr,
2120 &format_attr_filter_nc.attr,
2121 &format_attr_filter_isoc.attr,
2125 static const struct attribute_group knl_uncore_cha_format_group = {
2127 .attrs = knl_uncore_cha_formats_attr,
2130 static struct event_constraint knl_uncore_cha_constraints[] = {
2131 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2132 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2133 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2134 EVENT_CONSTRAINT_END
2137 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2138 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2139 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2140 SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2141 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2142 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2146 static u64 knl_cha_filter_mask(int fields)
2151 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2153 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2155 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2159 static struct event_constraint *
2160 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2162 return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2165 static int knl_cha_hw_config(struct intel_uncore_box *box,
2166 struct perf_event *event)
2168 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2169 struct extra_reg *er;
2172 for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2173 if (er->event != (event->hw.config & er->config_mask))
2179 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2180 KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2181 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2183 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2184 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2185 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2191 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2192 struct perf_event *event);
2194 static struct intel_uncore_ops knl_uncore_cha_ops = {
2195 .init_box = snbep_uncore_msr_init_box,
2196 .disable_box = snbep_uncore_msr_disable_box,
2197 .enable_box = snbep_uncore_msr_enable_box,
2198 .disable_event = snbep_uncore_msr_disable_event,
2199 .enable_event = hswep_cbox_enable_event,
2200 .read_counter = uncore_msr_read_counter,
2201 .hw_config = knl_cha_hw_config,
2202 .get_constraint = knl_cha_get_constraint,
2203 .put_constraint = snbep_cbox_put_constraint,
2206 static struct intel_uncore_type knl_uncore_cha = {
2210 .perf_ctr_bits = 48,
2211 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2212 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2213 .event_mask = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2214 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2215 .msr_offset = KNL_CHA_MSR_OFFSET,
2216 .num_shared_regs = 1,
2217 .constraints = knl_uncore_cha_constraints,
2218 .ops = &knl_uncore_cha_ops,
2219 .format_group = &knl_uncore_cha_format_group,
2222 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2223 &format_attr_event2.attr,
2224 &format_attr_use_occ_ctr.attr,
2225 &format_attr_occ_sel.attr,
2226 &format_attr_edge.attr,
2227 &format_attr_tid_en.attr,
2228 &format_attr_inv.attr,
2229 &format_attr_thresh6.attr,
2230 &format_attr_occ_invert.attr,
2231 &format_attr_occ_edge_det.attr,
2235 static const struct attribute_group knl_uncore_pcu_format_group = {
2237 .attrs = knl_uncore_pcu_formats_attr,
2240 static struct intel_uncore_type knl_uncore_pcu = {
2244 .perf_ctr_bits = 48,
2245 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2246 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2247 .event_mask = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2248 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2249 .ops = &snbep_uncore_msr_ops,
2250 .format_group = &knl_uncore_pcu_format_group,
2253 static struct intel_uncore_type *knl_msr_uncores[] = {
2260 void knl_uncore_cpu_init(void)
2262 uncore_msr_uncores = knl_msr_uncores;
2265 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2267 struct pci_dev *pdev = box->pci_dev;
2268 int box_ctl = uncore_pci_box_ctl(box);
2270 pci_write_config_dword(pdev, box_ctl, 0);
2273 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2274 struct perf_event *event)
2276 struct pci_dev *pdev = box->pci_dev;
2277 struct hw_perf_event *hwc = &event->hw;
2279 if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2280 == UNCORE_FIXED_EVENT)
2281 pci_write_config_dword(pdev, hwc->config_base,
2282 hwc->config | KNL_PMON_FIXED_CTL_EN);
2284 pci_write_config_dword(pdev, hwc->config_base,
2285 hwc->config | SNBEP_PMON_CTL_EN);
2288 static struct intel_uncore_ops knl_uncore_imc_ops = {
2289 .init_box = snbep_uncore_pci_init_box,
2290 .disable_box = snbep_uncore_pci_disable_box,
2291 .enable_box = knl_uncore_imc_enable_box,
2292 .read_counter = snbep_uncore_pci_read_counter,
2293 .enable_event = knl_uncore_imc_enable_event,
2294 .disable_event = snbep_uncore_pci_disable_event,
2297 static struct intel_uncore_type knl_uncore_imc_uclk = {
2301 .perf_ctr_bits = 48,
2302 .fixed_ctr_bits = 48,
2303 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2304 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2305 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2306 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2307 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2308 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2309 .ops = &knl_uncore_imc_ops,
2310 .format_group = &snbep_uncore_format_group,
2313 static struct intel_uncore_type knl_uncore_imc_dclk = {
2317 .perf_ctr_bits = 48,
2318 .fixed_ctr_bits = 48,
2319 .perf_ctr = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2320 .event_ctl = KNL_MC0_CH0_MSR_PMON_CTL0,
2321 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2322 .fixed_ctr = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2323 .fixed_ctl = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2324 .box_ctl = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2325 .ops = &knl_uncore_imc_ops,
2326 .format_group = &snbep_uncore_format_group,
2329 static struct intel_uncore_type knl_uncore_edc_uclk = {
2333 .perf_ctr_bits = 48,
2334 .fixed_ctr_bits = 48,
2335 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2336 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2337 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2338 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2339 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2340 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2341 .ops = &knl_uncore_imc_ops,
2342 .format_group = &snbep_uncore_format_group,
2345 static struct intel_uncore_type knl_uncore_edc_eclk = {
2349 .perf_ctr_bits = 48,
2350 .fixed_ctr_bits = 48,
2351 .perf_ctr = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2352 .event_ctl = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2353 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2354 .fixed_ctr = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2355 .fixed_ctl = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2356 .box_ctl = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2357 .ops = &knl_uncore_imc_ops,
2358 .format_group = &snbep_uncore_format_group,
2361 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2362 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2363 EVENT_CONSTRAINT_END
2366 static struct intel_uncore_type knl_uncore_m2pcie = {
2370 .perf_ctr_bits = 48,
2371 .constraints = knl_uncore_m2pcie_constraints,
2372 SNBEP_UNCORE_PCI_COMMON_INIT(),
2375 static struct attribute *knl_uncore_irp_formats_attr[] = {
2376 &format_attr_event.attr,
2377 &format_attr_umask.attr,
2378 &format_attr_qor.attr,
2379 &format_attr_edge.attr,
2380 &format_attr_inv.attr,
2381 &format_attr_thresh8.attr,
2385 static const struct attribute_group knl_uncore_irp_format_group = {
2387 .attrs = knl_uncore_irp_formats_attr,
2390 static struct intel_uncore_type knl_uncore_irp = {
2394 .perf_ctr_bits = 48,
2395 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2396 .event_ctl = SNBEP_PCI_PMON_CTL0,
2397 .event_mask = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2398 .box_ctl = KNL_IRP_PCI_PMON_BOX_CTL,
2399 .ops = &snbep_uncore_pci_ops,
2400 .format_group = &knl_uncore_irp_format_group,
2404 KNL_PCI_UNCORE_MC_UCLK,
2405 KNL_PCI_UNCORE_MC_DCLK,
2406 KNL_PCI_UNCORE_EDC_UCLK,
2407 KNL_PCI_UNCORE_EDC_ECLK,
2408 KNL_PCI_UNCORE_M2PCIE,
2412 static struct intel_uncore_type *knl_pci_uncores[] = {
2413 [KNL_PCI_UNCORE_MC_UCLK] = &knl_uncore_imc_uclk,
2414 [KNL_PCI_UNCORE_MC_DCLK] = &knl_uncore_imc_dclk,
2415 [KNL_PCI_UNCORE_EDC_UCLK] = &knl_uncore_edc_uclk,
2416 [KNL_PCI_UNCORE_EDC_ECLK] = &knl_uncore_edc_eclk,
2417 [KNL_PCI_UNCORE_M2PCIE] = &knl_uncore_m2pcie,
2418 [KNL_PCI_UNCORE_IRP] = &knl_uncore_irp,
2423 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2424 * device type. prior to KNL, each instance of a PMU device type had a unique
2427 * PCI Device ID Uncore PMU Devices
2428 * ----------------------------------
2429 * 0x7841 MC0 UClk, MC1 UClk
2430 * 0x7843 MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2431 * MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2432 * 0x7833 EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2433 * EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2434 * 0x7835 EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2435 * EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2440 static const struct pci_device_id knl_uncore_pci_ids[] = {
2442 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2443 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2446 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2447 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2449 { /* MC0 DClk CH 0 */
2450 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2451 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2453 { /* MC0 DClk CH 1 */
2454 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2455 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2457 { /* MC0 DClk CH 2 */
2458 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2459 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2461 { /* MC1 DClk CH 0 */
2462 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2463 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2465 { /* MC1 DClk CH 1 */
2466 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2467 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2469 { /* MC1 DClk CH 2 */
2470 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2471 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2474 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2475 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2478 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2479 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2482 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2483 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2486 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2487 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2490 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2491 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2494 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2495 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2498 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2499 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2502 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2503 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2506 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2507 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2510 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2511 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2514 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2515 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2518 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2519 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2522 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2523 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2526 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2527 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2530 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2531 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2534 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2535 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2538 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2539 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2542 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2543 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2545 { /* end: all zeroes */ }
2548 static struct pci_driver knl_uncore_pci_driver = {
2549 .name = "knl_uncore",
2550 .id_table = knl_uncore_pci_ids,
2553 int knl_uncore_pci_init(void)
2557 /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2558 ret = snb_pci2phy_map_init(0x7814); /* IRP */
2561 ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2564 uncore_pci_uncores = knl_pci_uncores;
2565 uncore_pci_driver = &knl_uncore_pci_driver;
2569 /* end of KNL uncore support */
2571 /* Haswell-EP uncore support */
2572 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2573 &format_attr_event.attr,
2574 &format_attr_umask.attr,
2575 &format_attr_edge.attr,
2576 &format_attr_inv.attr,
2577 &format_attr_thresh5.attr,
2578 &format_attr_filter_tid2.attr,
2579 &format_attr_filter_cid.attr,
2583 static const struct attribute_group hswep_uncore_ubox_format_group = {
2585 .attrs = hswep_uncore_ubox_formats_attr,
2588 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2590 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2591 reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2592 reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2597 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2598 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2599 .hw_config = hswep_ubox_hw_config,
2600 .get_constraint = uncore_get_constraint,
2601 .put_constraint = uncore_put_constraint,
2604 static struct intel_uncore_type hswep_uncore_ubox = {
2608 .perf_ctr_bits = 44,
2609 .fixed_ctr_bits = 48,
2610 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2611 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2612 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2613 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2614 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2615 .num_shared_regs = 1,
2616 .ops = &hswep_uncore_ubox_ops,
2617 .format_group = &hswep_uncore_ubox_format_group,
2620 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2621 &format_attr_event.attr,
2622 &format_attr_umask.attr,
2623 &format_attr_edge.attr,
2624 &format_attr_tid_en.attr,
2625 &format_attr_thresh8.attr,
2626 &format_attr_filter_tid3.attr,
2627 &format_attr_filter_link2.attr,
2628 &format_attr_filter_state3.attr,
2629 &format_attr_filter_nid2.attr,
2630 &format_attr_filter_opc2.attr,
2631 &format_attr_filter_nc.attr,
2632 &format_attr_filter_c6.attr,
2633 &format_attr_filter_isoc.attr,
2637 static const struct attribute_group hswep_uncore_cbox_format_group = {
2639 .attrs = hswep_uncore_cbox_formats_attr,
2642 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2643 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2644 UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2645 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2646 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2647 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2648 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2649 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2650 EVENT_CONSTRAINT_END
2653 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2654 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2655 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2656 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2657 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2658 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2659 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2660 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2661 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2662 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2663 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2664 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2665 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2666 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2667 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2668 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2669 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2670 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2671 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2672 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2673 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2674 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2675 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2676 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2677 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2678 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2679 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2680 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2681 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2682 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2683 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2684 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2685 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2686 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2687 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2688 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2689 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2690 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2691 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2695 static u64 hswep_cbox_filter_mask(int fields)
2699 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2701 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2703 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2705 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2706 if (fields & 0x10) {
2707 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2708 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2709 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2710 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2715 static struct event_constraint *
2716 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2718 return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2721 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2723 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2724 struct extra_reg *er;
2727 for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2728 if (er->event != (event->hw.config & er->config_mask))
2734 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2735 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2736 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2742 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2743 struct perf_event *event)
2745 struct hw_perf_event *hwc = &event->hw;
2746 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2748 if (reg1->idx != EXTRA_REG_NONE) {
2749 u64 filter = uncore_shared_reg_config(box, 0);
2750 wrmsrl(reg1->reg, filter & 0xffffffff);
2751 wrmsrl(reg1->reg + 1, filter >> 32);
2754 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2757 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2758 .init_box = snbep_uncore_msr_init_box,
2759 .disable_box = snbep_uncore_msr_disable_box,
2760 .enable_box = snbep_uncore_msr_enable_box,
2761 .disable_event = snbep_uncore_msr_disable_event,
2762 .enable_event = hswep_cbox_enable_event,
2763 .read_counter = uncore_msr_read_counter,
2764 .hw_config = hswep_cbox_hw_config,
2765 .get_constraint = hswep_cbox_get_constraint,
2766 .put_constraint = snbep_cbox_put_constraint,
2769 static struct intel_uncore_type hswep_uncore_cbox = {
2773 .perf_ctr_bits = 48,
2774 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2775 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2776 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2777 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2778 .msr_offset = HSWEP_CBO_MSR_OFFSET,
2779 .num_shared_regs = 1,
2780 .constraints = hswep_uncore_cbox_constraints,
2781 .ops = &hswep_uncore_cbox_ops,
2782 .format_group = &hswep_uncore_cbox_format_group,
2786 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2788 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2790 unsigned msr = uncore_msr_box_ctl(box);
2793 u64 init = SNBEP_PMON_BOX_CTL_INT;
2797 for_each_set_bit(i, (unsigned long *)&init, 64) {
2798 flags |= (1ULL << i);
2804 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2805 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2806 .init_box = hswep_uncore_sbox_msr_init_box
2809 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2810 &format_attr_event.attr,
2811 &format_attr_umask.attr,
2812 &format_attr_edge.attr,
2813 &format_attr_tid_en.attr,
2814 &format_attr_inv.attr,
2815 &format_attr_thresh8.attr,
2819 static const struct attribute_group hswep_uncore_sbox_format_group = {
2821 .attrs = hswep_uncore_sbox_formats_attr,
2824 static struct intel_uncore_type hswep_uncore_sbox = {
2828 .perf_ctr_bits = 44,
2829 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
2830 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
2831 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2832 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
2833 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
2834 .ops = &hswep_uncore_sbox_msr_ops,
2835 .format_group = &hswep_uncore_sbox_format_group,
2838 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2840 struct hw_perf_event *hwc = &event->hw;
2841 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2842 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2844 if (ev_sel >= 0xb && ev_sel <= 0xe) {
2845 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2846 reg1->idx = ev_sel - 0xb;
2847 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2852 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2853 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2854 .hw_config = hswep_pcu_hw_config,
2855 .get_constraint = snbep_pcu_get_constraint,
2856 .put_constraint = snbep_pcu_put_constraint,
2859 static struct intel_uncore_type hswep_uncore_pcu = {
2863 .perf_ctr_bits = 48,
2864 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2865 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2866 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2867 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2868 .num_shared_regs = 1,
2869 .ops = &hswep_uncore_pcu_ops,
2870 .format_group = &snbep_uncore_pcu_format_group,
2873 static struct intel_uncore_type *hswep_msr_uncores[] = {
2881 #define HSWEP_PCU_DID 0x2fc0
2882 #define HSWEP_PCU_CAPID4_OFFET 0x94
2883 #define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3)
2885 static bool hswep_has_limit_sbox(unsigned int device)
2887 struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2893 pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2894 if (!hswep_get_chop(capid4))
2900 void hswep_uncore_cpu_init(void)
2902 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2903 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2905 /* Detect 6-8 core systems with only two SBOXes */
2906 if (hswep_has_limit_sbox(HSWEP_PCU_DID))
2907 hswep_uncore_sbox.num_boxes = 2;
2909 uncore_msr_uncores = hswep_msr_uncores;
2912 static struct intel_uncore_type hswep_uncore_ha = {
2916 .perf_ctr_bits = 48,
2917 SNBEP_UNCORE_PCI_COMMON_INIT(),
2920 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2921 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
2922 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
2923 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2924 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2925 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2926 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2927 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2928 { /* end: all zeroes */ },
2931 static struct intel_uncore_type hswep_uncore_imc = {
2935 .perf_ctr_bits = 48,
2936 .fixed_ctr_bits = 48,
2937 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2938 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2939 .event_descs = hswep_uncore_imc_events,
2940 SNBEP_UNCORE_PCI_COMMON_INIT(),
2943 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2945 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2947 struct pci_dev *pdev = box->pci_dev;
2948 struct hw_perf_event *hwc = &event->hw;
2951 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2952 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2957 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2958 .init_box = snbep_uncore_pci_init_box,
2959 .disable_box = snbep_uncore_pci_disable_box,
2960 .enable_box = snbep_uncore_pci_enable_box,
2961 .disable_event = ivbep_uncore_irp_disable_event,
2962 .enable_event = ivbep_uncore_irp_enable_event,
2963 .read_counter = hswep_uncore_irp_read_counter,
2966 static struct intel_uncore_type hswep_uncore_irp = {
2970 .perf_ctr_bits = 48,
2971 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2972 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2973 .ops = &hswep_uncore_irp_ops,
2974 .format_group = &snbep_uncore_format_group,
2977 static struct intel_uncore_type hswep_uncore_qpi = {
2981 .perf_ctr_bits = 48,
2982 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2983 .event_ctl = SNBEP_PCI_PMON_CTL0,
2984 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2985 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2986 .num_shared_regs = 1,
2987 .ops = &snbep_uncore_qpi_ops,
2988 .format_group = &snbep_uncore_qpi_format_group,
2991 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2992 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2993 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2994 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2995 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2996 UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2997 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2998 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2999 UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
3000 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3001 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3002 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
3003 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
3004 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3005 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3006 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3007 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3008 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3009 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
3010 EVENT_CONSTRAINT_END
3013 static struct intel_uncore_type hswep_uncore_r2pcie = {
3017 .perf_ctr_bits = 48,
3018 .constraints = hswep_uncore_r2pcie_constraints,
3019 SNBEP_UNCORE_PCI_COMMON_INIT(),
3022 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
3023 UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
3024 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3025 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3026 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3027 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3028 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3029 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3030 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3031 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
3032 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3033 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3034 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3035 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3036 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3037 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3038 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3039 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3040 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3041 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3042 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3043 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3044 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3045 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3046 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3047 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3048 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
3049 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3050 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3051 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3052 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3053 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3054 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3055 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3056 EVENT_CONSTRAINT_END
3059 static struct intel_uncore_type hswep_uncore_r3qpi = {
3063 .perf_ctr_bits = 44,
3064 .constraints = hswep_uncore_r3qpi_constraints,
3065 SNBEP_UNCORE_PCI_COMMON_INIT(),
3069 HSWEP_PCI_UNCORE_HA,
3070 HSWEP_PCI_UNCORE_IMC,
3071 HSWEP_PCI_UNCORE_IRP,
3072 HSWEP_PCI_UNCORE_QPI,
3073 HSWEP_PCI_UNCORE_R2PCIE,
3074 HSWEP_PCI_UNCORE_R3QPI,
3077 static struct intel_uncore_type *hswep_pci_uncores[] = {
3078 [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha,
3079 [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc,
3080 [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp,
3081 [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi,
3082 [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie,
3083 [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi,
3087 static const struct pci_device_id hswep_uncore_pci_ids[] = {
3088 { /* Home Agent 0 */
3089 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3090 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3092 { /* Home Agent 1 */
3093 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3094 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3096 { /* MC0 Channel 0 */
3097 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3098 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3100 { /* MC0 Channel 1 */
3101 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3102 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3104 { /* MC0 Channel 2 */
3105 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3106 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3108 { /* MC0 Channel 3 */
3109 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3110 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3112 { /* MC1 Channel 0 */
3113 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3114 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3116 { /* MC1 Channel 1 */
3117 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3118 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3120 { /* MC1 Channel 2 */
3121 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3122 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3124 { /* MC1 Channel 3 */
3125 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3126 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3129 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3130 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3133 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3134 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3137 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3138 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3141 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3142 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3145 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3146 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3148 { /* R3QPI0 Link 0 */
3149 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3150 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3152 { /* R3QPI0 Link 1 */
3153 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3154 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3156 { /* R3QPI1 Link 2 */
3157 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3158 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3160 { /* QPI Port 0 filter */
3161 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3162 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3163 SNBEP_PCI_QPI_PORT0_FILTER),
3165 { /* QPI Port 1 filter */
3166 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3167 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3168 SNBEP_PCI_QPI_PORT1_FILTER),
3170 { /* end: all zeroes */ }
3173 static struct pci_driver hswep_uncore_pci_driver = {
3174 .name = "hswep_uncore",
3175 .id_table = hswep_uncore_pci_ids,
3178 int hswep_uncore_pci_init(void)
3180 int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3183 uncore_pci_uncores = hswep_pci_uncores;
3184 uncore_pci_driver = &hswep_uncore_pci_driver;
3187 /* end of Haswell-EP uncore support */
3189 /* BDX uncore support */
3191 static struct intel_uncore_type bdx_uncore_ubox = {
3195 .perf_ctr_bits = 48,
3196 .fixed_ctr_bits = 48,
3197 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3198 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3199 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3200 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3201 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3202 .num_shared_regs = 1,
3203 .ops = &ivbep_uncore_msr_ops,
3204 .format_group = &ivbep_uncore_ubox_format_group,
3207 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3208 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3209 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3210 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3211 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3212 EVENT_CONSTRAINT_END
3215 static struct intel_uncore_type bdx_uncore_cbox = {
3219 .perf_ctr_bits = 48,
3220 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3221 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3222 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3223 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3224 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3225 .num_shared_regs = 1,
3226 .constraints = bdx_uncore_cbox_constraints,
3227 .ops = &hswep_uncore_cbox_ops,
3228 .format_group = &hswep_uncore_cbox_format_group,
3231 static struct intel_uncore_type bdx_uncore_sbox = {
3235 .perf_ctr_bits = 48,
3236 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
3237 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
3238 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3239 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
3240 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
3241 .ops = &hswep_uncore_sbox_msr_ops,
3242 .format_group = &hswep_uncore_sbox_format_group,
3245 #define BDX_MSR_UNCORE_SBOX 3
3247 static struct intel_uncore_type *bdx_msr_uncores[] = {
3255 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3256 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3257 EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3258 EVENT_CONSTRAINT_END
3261 #define BDX_PCU_DID 0x6fc0
3263 void bdx_uncore_cpu_init(void)
3265 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3266 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3267 uncore_msr_uncores = bdx_msr_uncores;
3269 /* Detect systems with no SBOXes */
3270 if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
3271 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3273 hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3276 static struct intel_uncore_type bdx_uncore_ha = {
3280 .perf_ctr_bits = 48,
3281 SNBEP_UNCORE_PCI_COMMON_INIT(),
3284 static struct intel_uncore_type bdx_uncore_imc = {
3288 .perf_ctr_bits = 48,
3289 .fixed_ctr_bits = 48,
3290 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3291 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3292 .event_descs = hswep_uncore_imc_events,
3293 SNBEP_UNCORE_PCI_COMMON_INIT(),
3296 static struct intel_uncore_type bdx_uncore_irp = {
3300 .perf_ctr_bits = 48,
3301 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3302 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3303 .ops = &hswep_uncore_irp_ops,
3304 .format_group = &snbep_uncore_format_group,
3307 static struct intel_uncore_type bdx_uncore_qpi = {
3311 .perf_ctr_bits = 48,
3312 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3313 .event_ctl = SNBEP_PCI_PMON_CTL0,
3314 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3315 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3316 .num_shared_regs = 1,
3317 .ops = &snbep_uncore_qpi_ops,
3318 .format_group = &snbep_uncore_qpi_format_group,
3321 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3322 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3323 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3324 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3325 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3326 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3327 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3328 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3329 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3330 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3331 EVENT_CONSTRAINT_END
3334 static struct intel_uncore_type bdx_uncore_r2pcie = {
3338 .perf_ctr_bits = 48,
3339 .constraints = bdx_uncore_r2pcie_constraints,
3340 SNBEP_UNCORE_PCI_COMMON_INIT(),
3343 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3344 UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3345 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3346 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3347 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3348 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3349 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3350 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3351 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3352 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3353 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3354 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3355 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3356 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3357 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3358 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3359 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3360 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3361 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3362 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3363 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3364 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3365 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3366 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3367 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3368 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3369 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3370 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3371 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3372 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3373 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3374 EVENT_CONSTRAINT_END
3377 static struct intel_uncore_type bdx_uncore_r3qpi = {
3381 .perf_ctr_bits = 48,
3382 .constraints = bdx_uncore_r3qpi_constraints,
3383 SNBEP_UNCORE_PCI_COMMON_INIT(),
3391 BDX_PCI_UNCORE_R2PCIE,
3392 BDX_PCI_UNCORE_R3QPI,
3395 static struct intel_uncore_type *bdx_pci_uncores[] = {
3396 [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha,
3397 [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc,
3398 [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp,
3399 [BDX_PCI_UNCORE_QPI] = &bdx_uncore_qpi,
3400 [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3401 [BDX_PCI_UNCORE_R3QPI] = &bdx_uncore_r3qpi,
3405 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3406 { /* Home Agent 0 */
3407 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3408 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3410 { /* Home Agent 1 */
3411 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3412 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3414 { /* MC0 Channel 0 */
3415 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3416 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3418 { /* MC0 Channel 1 */
3419 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3420 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3422 { /* MC0 Channel 2 */
3423 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3424 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3426 { /* MC0 Channel 3 */
3427 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3428 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3430 { /* MC1 Channel 0 */
3431 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3432 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3434 { /* MC1 Channel 1 */
3435 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3436 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3438 { /* MC1 Channel 2 */
3439 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3440 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3442 { /* MC1 Channel 3 */
3443 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3444 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3447 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3448 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3451 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3452 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3455 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3456 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3459 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3460 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3463 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3464 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3466 { /* R3QPI0 Link 0 */
3467 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3468 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3470 { /* R3QPI0 Link 1 */
3471 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3472 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3474 { /* R3QPI1 Link 2 */
3475 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3476 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3478 { /* QPI Port 0 filter */
3479 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3480 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3481 SNBEP_PCI_QPI_PORT0_FILTER),
3483 { /* QPI Port 1 filter */
3484 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3485 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3486 SNBEP_PCI_QPI_PORT1_FILTER),
3488 { /* QPI Port 2 filter */
3489 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3490 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3491 BDX_PCI_QPI_PORT2_FILTER),
3493 { /* end: all zeroes */ }
3496 static struct pci_driver bdx_uncore_pci_driver = {
3497 .name = "bdx_uncore",
3498 .id_table = bdx_uncore_pci_ids,
3501 int bdx_uncore_pci_init(void)
3503 int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3507 uncore_pci_uncores = bdx_pci_uncores;
3508 uncore_pci_driver = &bdx_uncore_pci_driver;
3512 /* end of BDX uncore support */
3514 /* SKX uncore support */
3516 static struct intel_uncore_type skx_uncore_ubox = {
3520 .perf_ctr_bits = 48,
3521 .fixed_ctr_bits = 48,
3522 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3523 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3524 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3525 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3526 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3527 .ops = &ivbep_uncore_msr_ops,
3528 .format_group = &ivbep_uncore_ubox_format_group,
3531 static struct attribute *skx_uncore_cha_formats_attr[] = {
3532 &format_attr_event.attr,
3533 &format_attr_umask.attr,
3534 &format_attr_edge.attr,
3535 &format_attr_tid_en.attr,
3536 &format_attr_inv.attr,
3537 &format_attr_thresh8.attr,
3538 &format_attr_filter_tid4.attr,
3539 &format_attr_filter_state5.attr,
3540 &format_attr_filter_rem.attr,
3541 &format_attr_filter_loc.attr,
3542 &format_attr_filter_nm.attr,
3543 &format_attr_filter_all_op.attr,
3544 &format_attr_filter_not_nm.attr,
3545 &format_attr_filter_opc_0.attr,
3546 &format_attr_filter_opc_1.attr,
3547 &format_attr_filter_nc.attr,
3548 &format_attr_filter_isoc.attr,
3552 static const struct attribute_group skx_uncore_chabox_format_group = {
3554 .attrs = skx_uncore_cha_formats_attr,
3557 static struct event_constraint skx_uncore_chabox_constraints[] = {
3558 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3559 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3560 EVENT_CONSTRAINT_END
3563 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3564 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3565 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3566 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3567 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3568 SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3569 SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3570 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3571 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3572 SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3576 static u64 skx_cha_filter_mask(int fields)
3581 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3583 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3585 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3587 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3588 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3589 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3590 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3591 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3592 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3593 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3594 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3595 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3600 static struct event_constraint *
3601 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3603 return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3606 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3608 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3609 struct extra_reg *er;
3612 for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3613 if (er->event != (event->hw.config & er->config_mask))
3619 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3620 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3621 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3627 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3628 /* There is no frz_en for chabox ctl */
3629 .init_box = ivbep_uncore_msr_init_box,
3630 .disable_box = snbep_uncore_msr_disable_box,
3631 .enable_box = snbep_uncore_msr_enable_box,
3632 .disable_event = snbep_uncore_msr_disable_event,
3633 .enable_event = hswep_cbox_enable_event,
3634 .read_counter = uncore_msr_read_counter,
3635 .hw_config = skx_cha_hw_config,
3636 .get_constraint = skx_cha_get_constraint,
3637 .put_constraint = snbep_cbox_put_constraint,
3640 static struct intel_uncore_type skx_uncore_chabox = {
3643 .perf_ctr_bits = 48,
3644 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3645 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3646 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3647 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3648 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3649 .num_shared_regs = 1,
3650 .constraints = skx_uncore_chabox_constraints,
3651 .ops = &skx_uncore_chabox_ops,
3652 .format_group = &skx_uncore_chabox_format_group,
3655 static struct attribute *skx_uncore_iio_formats_attr[] = {
3656 &format_attr_event.attr,
3657 &format_attr_umask.attr,
3658 &format_attr_edge.attr,
3659 &format_attr_inv.attr,
3660 &format_attr_thresh9.attr,
3661 &format_attr_ch_mask.attr,
3662 &format_attr_fc_mask.attr,
3666 static const struct attribute_group skx_uncore_iio_format_group = {
3668 .attrs = skx_uncore_iio_formats_attr,
3671 static struct event_constraint skx_uncore_iio_constraints[] = {
3672 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3673 UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3674 UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3675 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3676 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3677 UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3678 EVENT_CONSTRAINT_END
3681 static void skx_iio_enable_event(struct intel_uncore_box *box,
3682 struct perf_event *event)
3684 struct hw_perf_event *hwc = &event->hw;
3686 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3689 static struct intel_uncore_ops skx_uncore_iio_ops = {
3690 .init_box = ivbep_uncore_msr_init_box,
3691 .disable_box = snbep_uncore_msr_disable_box,
3692 .enable_box = snbep_uncore_msr_enable_box,
3693 .disable_event = snbep_uncore_msr_disable_event,
3694 .enable_event = skx_iio_enable_event,
3695 .read_counter = uncore_msr_read_counter,
3698 static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
3700 return pmu->type->topology[die].configuration >>
3701 (pmu->pmu_idx * BUS_NUM_STRIDE);
3705 pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
3706 int die, int zero_bus_pmu)
3708 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3710 return (!skx_iio_stack(pmu, die) && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
3714 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3716 /* Root bus 0x00 is valid only for pmu_idx = 0. */
3717 return pmu_iio_mapping_visible(kobj, attr, die, 0);
3720 static ssize_t skx_iio_mapping_show(struct device *dev,
3721 struct device_attribute *attr, char *buf)
3723 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
3724 struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3725 long die = (long)ea->var;
3727 return sprintf(buf, "%04x:%02x\n", pmu->type->topology[die].segment,
3728 skx_iio_stack(pmu, die));
3731 static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3735 if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3736 !(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3739 *topology = msr_value;
3744 static int die_to_cpu(int die)
3746 int res = 0, cpu, current_die;
3748 * Using cpus_read_lock() to ensure cpu is not going down between
3749 * looking at cpu_online_mask.
3752 for_each_online_cpu(cpu) {
3753 current_die = topology_logical_die_id(cpu);
3754 if (current_die == die) {
3763 static int skx_iio_get_topology(struct intel_uncore_type *type)
3765 int die, ret = -EPERM;
3767 type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
3769 if (!type->topology)
3772 for (die = 0; die < uncore_max_dies(); die++) {
3773 ret = skx_msr_cpu_bus_read(die_to_cpu(die),
3774 &type->topology[die].configuration);
3778 ret = uncore_die_to_segment(die);
3782 type->topology[die].segment = ret;
3786 kfree(type->topology);
3787 type->topology = NULL;
3793 static struct attribute_group skx_iio_mapping_group = {
3794 .is_visible = skx_iio_mapping_visible,
3797 static const struct attribute_group *skx_iio_attr_update[] = {
3798 &skx_iio_mapping_group,
3803 pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3808 struct attribute **attrs = NULL;
3809 struct dev_ext_attribute *eas = NULL;
3811 ret = type->get_topology(type);
3813 goto clear_attr_update;
3817 /* One more for NULL. */
3818 attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3820 goto clear_topology;
3822 eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3826 for (die = 0; die < uncore_max_dies(); die++) {
3827 sprintf(buf, "die%ld", die);
3828 sysfs_attr_init(&eas[die].attr.attr);
3829 eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3830 if (!eas[die].attr.attr.name)
3832 eas[die].attr.attr.mode = 0444;
3833 eas[die].attr.show = skx_iio_mapping_show;
3834 eas[die].attr.store = NULL;
3835 eas[die].var = (void *)die;
3836 attrs[die] = &eas[die].attr.attr;
3842 for (; die >= 0; die--)
3843 kfree(eas[die].attr.attr.name);
3848 kfree(type->topology);
3850 type->attr_update = NULL;
3855 pmu_iio_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3857 struct attribute **attr = ag->attrs;
3862 for (; *attr; attr++)
3863 kfree((*attr)->name);
3864 kfree(attr_to_ext_attr(*ag->attrs));
3867 kfree(type->topology);
3870 static int skx_iio_set_mapping(struct intel_uncore_type *type)
3872 return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
3875 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
3877 pmu_iio_cleanup_mapping(type, &skx_iio_mapping_group);
3880 static struct intel_uncore_type skx_uncore_iio = {
3884 .perf_ctr_bits = 48,
3885 .event_ctl = SKX_IIO0_MSR_PMON_CTL0,
3886 .perf_ctr = SKX_IIO0_MSR_PMON_CTR0,
3887 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
3888 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3889 .box_ctl = SKX_IIO0_MSR_PMON_BOX_CTL,
3890 .msr_offset = SKX_IIO_MSR_OFFSET,
3891 .constraints = skx_uncore_iio_constraints,
3892 .ops = &skx_uncore_iio_ops,
3893 .format_group = &skx_uncore_iio_format_group,
3894 .attr_update = skx_iio_attr_update,
3895 .get_topology = skx_iio_get_topology,
3896 .set_mapping = skx_iio_set_mapping,
3897 .cleanup_mapping = skx_iio_cleanup_mapping,
3900 enum perf_uncore_iio_freerunning_type_id {
3901 SKX_IIO_MSR_IOCLK = 0,
3903 SKX_IIO_MSR_UTIL = 2,
3905 SKX_IIO_FREERUNNING_TYPE_MAX,
3909 static struct freerunning_counters skx_iio_freerunning[] = {
3910 [SKX_IIO_MSR_IOCLK] = { 0xa45, 0x1, 0x20, 1, 36 },
3911 [SKX_IIO_MSR_BW] = { 0xb00, 0x1, 0x10, 8, 36 },
3912 [SKX_IIO_MSR_UTIL] = { 0xb08, 0x1, 0x10, 8, 36 },
3915 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3916 /* Free-Running IO CLOCKS Counter */
3917 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
3918 /* Free-Running IIO BANDWIDTH Counters */
3919 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
3920 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
3921 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
3922 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
3923 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
3924 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
3925 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
3926 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
3927 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
3928 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
3929 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
3930 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
3931 INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x24"),
3932 INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"),
3933 INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"),
3934 INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x25"),
3935 INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"),
3936 INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"),
3937 INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x26"),
3938 INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"),
3939 INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"),
3940 INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x27"),
3941 INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"),
3942 INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"),
3943 /* Free-running IIO UTILIZATION Counters */
3944 INTEL_UNCORE_EVENT_DESC(util_in_port0, "event=0xff,umask=0x30"),
3945 INTEL_UNCORE_EVENT_DESC(util_out_port0, "event=0xff,umask=0x31"),
3946 INTEL_UNCORE_EVENT_DESC(util_in_port1, "event=0xff,umask=0x32"),
3947 INTEL_UNCORE_EVENT_DESC(util_out_port1, "event=0xff,umask=0x33"),
3948 INTEL_UNCORE_EVENT_DESC(util_in_port2, "event=0xff,umask=0x34"),
3949 INTEL_UNCORE_EVENT_DESC(util_out_port2, "event=0xff,umask=0x35"),
3950 INTEL_UNCORE_EVENT_DESC(util_in_port3, "event=0xff,umask=0x36"),
3951 INTEL_UNCORE_EVENT_DESC(util_out_port3, "event=0xff,umask=0x37"),
3952 { /* end: all zeroes */ },
3955 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3956 .read_counter = uncore_msr_read_counter,
3957 .hw_config = uncore_freerunning_hw_config,
3960 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3961 &format_attr_event.attr,
3962 &format_attr_umask.attr,
3966 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3968 .attrs = skx_uncore_iio_freerunning_formats_attr,
3971 static struct intel_uncore_type skx_uncore_iio_free_running = {
3972 .name = "iio_free_running",
3975 .num_freerunning_types = SKX_IIO_FREERUNNING_TYPE_MAX,
3976 .freerunning = skx_iio_freerunning,
3977 .ops = &skx_uncore_iio_freerunning_ops,
3978 .event_descs = skx_uncore_iio_freerunning_events,
3979 .format_group = &skx_uncore_iio_freerunning_format_group,
3982 static struct attribute *skx_uncore_formats_attr[] = {
3983 &format_attr_event.attr,
3984 &format_attr_umask.attr,
3985 &format_attr_edge.attr,
3986 &format_attr_inv.attr,
3987 &format_attr_thresh8.attr,
3991 static const struct attribute_group skx_uncore_format_group = {
3993 .attrs = skx_uncore_formats_attr,
3996 static struct intel_uncore_type skx_uncore_irp = {
4000 .perf_ctr_bits = 48,
4001 .event_ctl = SKX_IRP0_MSR_PMON_CTL0,
4002 .perf_ctr = SKX_IRP0_MSR_PMON_CTR0,
4003 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4004 .box_ctl = SKX_IRP0_MSR_PMON_BOX_CTL,
4005 .msr_offset = SKX_IRP_MSR_OFFSET,
4006 .ops = &skx_uncore_iio_ops,
4007 .format_group = &skx_uncore_format_group,
4010 static struct attribute *skx_uncore_pcu_formats_attr[] = {
4011 &format_attr_event.attr,
4012 &format_attr_umask.attr,
4013 &format_attr_edge.attr,
4014 &format_attr_inv.attr,
4015 &format_attr_thresh8.attr,
4016 &format_attr_occ_invert.attr,
4017 &format_attr_occ_edge_det.attr,
4018 &format_attr_filter_band0.attr,
4019 &format_attr_filter_band1.attr,
4020 &format_attr_filter_band2.attr,
4021 &format_attr_filter_band3.attr,
4025 static struct attribute_group skx_uncore_pcu_format_group = {
4027 .attrs = skx_uncore_pcu_formats_attr,
4030 static struct intel_uncore_ops skx_uncore_pcu_ops = {
4031 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4032 .hw_config = hswep_pcu_hw_config,
4033 .get_constraint = snbep_pcu_get_constraint,
4034 .put_constraint = snbep_pcu_put_constraint,
4037 static struct intel_uncore_type skx_uncore_pcu = {
4041 .perf_ctr_bits = 48,
4042 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
4043 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
4044 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
4045 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
4046 .num_shared_regs = 1,
4047 .ops = &skx_uncore_pcu_ops,
4048 .format_group = &skx_uncore_pcu_format_group,
4051 static struct intel_uncore_type *skx_msr_uncores[] = {
4055 &skx_uncore_iio_free_running,
4062 * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
4063 * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
4065 #define SKX_CAPID6 0x9c
4066 #define SKX_CHA_BIT_MASK GENMASK(27, 0)
4068 static int skx_count_chabox(void)
4070 struct pci_dev *dev = NULL;
4073 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4077 pci_read_config_dword(dev, SKX_CAPID6, &val);
4078 val &= SKX_CHA_BIT_MASK;
4081 return hweight32(val);
4084 void skx_uncore_cpu_init(void)
4086 skx_uncore_chabox.num_boxes = skx_count_chabox();
4087 uncore_msr_uncores = skx_msr_uncores;
4090 static struct intel_uncore_type skx_uncore_imc = {
4094 .perf_ctr_bits = 48,
4095 .fixed_ctr_bits = 48,
4096 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4097 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4098 .event_descs = hswep_uncore_imc_events,
4099 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4100 .event_ctl = SNBEP_PCI_PMON_CTL0,
4101 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4102 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4103 .ops = &ivbep_uncore_pci_ops,
4104 .format_group = &skx_uncore_format_group,
4107 static struct attribute *skx_upi_uncore_formats_attr[] = {
4108 &format_attr_event.attr,
4109 &format_attr_umask_ext.attr,
4110 &format_attr_edge.attr,
4111 &format_attr_inv.attr,
4112 &format_attr_thresh8.attr,
4116 static const struct attribute_group skx_upi_uncore_format_group = {
4118 .attrs = skx_upi_uncore_formats_attr,
4121 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4123 struct pci_dev *pdev = box->pci_dev;
4125 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4126 pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4129 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4130 .init_box = skx_upi_uncore_pci_init_box,
4131 .disable_box = snbep_uncore_pci_disable_box,
4132 .enable_box = snbep_uncore_pci_enable_box,
4133 .disable_event = snbep_uncore_pci_disable_event,
4134 .enable_event = snbep_uncore_pci_enable_event,
4135 .read_counter = snbep_uncore_pci_read_counter,
4138 static struct intel_uncore_type skx_uncore_upi = {
4142 .perf_ctr_bits = 48,
4143 .perf_ctr = SKX_UPI_PCI_PMON_CTR0,
4144 .event_ctl = SKX_UPI_PCI_PMON_CTL0,
4145 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4146 .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4147 .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL,
4148 .ops = &skx_upi_uncore_pci_ops,
4149 .format_group = &skx_upi_uncore_format_group,
4152 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4154 struct pci_dev *pdev = box->pci_dev;
4156 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4157 pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4160 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4161 .init_box = skx_m2m_uncore_pci_init_box,
4162 .disable_box = snbep_uncore_pci_disable_box,
4163 .enable_box = snbep_uncore_pci_enable_box,
4164 .disable_event = snbep_uncore_pci_disable_event,
4165 .enable_event = snbep_uncore_pci_enable_event,
4166 .read_counter = snbep_uncore_pci_read_counter,
4169 static struct intel_uncore_type skx_uncore_m2m = {
4173 .perf_ctr_bits = 48,
4174 .perf_ctr = SKX_M2M_PCI_PMON_CTR0,
4175 .event_ctl = SKX_M2M_PCI_PMON_CTL0,
4176 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4177 .box_ctl = SKX_M2M_PCI_PMON_BOX_CTL,
4178 .ops = &skx_m2m_uncore_pci_ops,
4179 .format_group = &skx_uncore_format_group,
4182 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4183 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4184 EVENT_CONSTRAINT_END
4187 static struct intel_uncore_type skx_uncore_m2pcie = {
4191 .perf_ctr_bits = 48,
4192 .constraints = skx_uncore_m2pcie_constraints,
4193 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4194 .event_ctl = SNBEP_PCI_PMON_CTL0,
4195 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4196 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4197 .ops = &ivbep_uncore_pci_ops,
4198 .format_group = &skx_uncore_format_group,
4201 static struct event_constraint skx_uncore_m3upi_constraints[] = {
4202 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4203 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4204 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4205 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4206 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4207 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4208 UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
4209 UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
4210 EVENT_CONSTRAINT_END
4213 static struct intel_uncore_type skx_uncore_m3upi = {
4217 .perf_ctr_bits = 48,
4218 .constraints = skx_uncore_m3upi_constraints,
4219 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4220 .event_ctl = SNBEP_PCI_PMON_CTL0,
4221 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4222 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4223 .ops = &ivbep_uncore_pci_ops,
4224 .format_group = &skx_uncore_format_group,
4231 SKX_PCI_UNCORE_M2PCIE,
4232 SKX_PCI_UNCORE_M3UPI,
4235 static struct intel_uncore_type *skx_pci_uncores[] = {
4236 [SKX_PCI_UNCORE_IMC] = &skx_uncore_imc,
4237 [SKX_PCI_UNCORE_M2M] = &skx_uncore_m2m,
4238 [SKX_PCI_UNCORE_UPI] = &skx_uncore_upi,
4239 [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
4240 [SKX_PCI_UNCORE_M3UPI] = &skx_uncore_m3upi,
4244 static const struct pci_device_id skx_uncore_pci_ids[] = {
4245 { /* MC0 Channel 0 */
4246 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4247 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4249 { /* MC0 Channel 1 */
4250 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4251 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4253 { /* MC0 Channel 2 */
4254 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4255 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4257 { /* MC1 Channel 0 */
4258 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4259 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4261 { /* MC1 Channel 1 */
4262 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4263 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4265 { /* MC1 Channel 2 */
4266 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4267 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4270 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4271 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4274 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4275 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4278 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4279 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4282 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4283 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4286 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4287 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4290 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4291 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4294 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4295 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4298 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4299 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4302 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4303 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4305 { /* M3UPI0 Link 0 */
4306 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4307 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4309 { /* M3UPI0 Link 1 */
4310 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4311 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4313 { /* M3UPI1 Link 2 */
4314 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4315 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4317 { /* end: all zeroes */ }
4321 static struct pci_driver skx_uncore_pci_driver = {
4322 .name = "skx_uncore",
4323 .id_table = skx_uncore_pci_ids,
4326 int skx_uncore_pci_init(void)
4328 /* need to double check pci address */
4329 int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4334 uncore_pci_uncores = skx_pci_uncores;
4335 uncore_pci_driver = &skx_uncore_pci_driver;
4339 /* end of SKX uncore support */
4341 /* SNR uncore support */
4343 static struct intel_uncore_type snr_uncore_ubox = {
4347 .perf_ctr_bits = 48,
4348 .fixed_ctr_bits = 48,
4349 .perf_ctr = SNR_U_MSR_PMON_CTR0,
4350 .event_ctl = SNR_U_MSR_PMON_CTL0,
4351 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4352 .fixed_ctr = SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4353 .fixed_ctl = SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4354 .ops = &ivbep_uncore_msr_ops,
4355 .format_group = &ivbep_uncore_format_group,
4358 static struct attribute *snr_uncore_cha_formats_attr[] = {
4359 &format_attr_event.attr,
4360 &format_attr_umask_ext2.attr,
4361 &format_attr_edge.attr,
4362 &format_attr_tid_en.attr,
4363 &format_attr_inv.attr,
4364 &format_attr_thresh8.attr,
4365 &format_attr_filter_tid5.attr,
4368 static const struct attribute_group snr_uncore_chabox_format_group = {
4370 .attrs = snr_uncore_cha_formats_attr,
4373 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4375 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4377 reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4378 box->pmu->type->msr_offset * box->pmu->pmu_idx;
4379 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4385 static void snr_cha_enable_event(struct intel_uncore_box *box,
4386 struct perf_event *event)
4388 struct hw_perf_event *hwc = &event->hw;
4389 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4391 if (reg1->idx != EXTRA_REG_NONE)
4392 wrmsrl(reg1->reg, reg1->config);
4394 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4397 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4398 .init_box = ivbep_uncore_msr_init_box,
4399 .disable_box = snbep_uncore_msr_disable_box,
4400 .enable_box = snbep_uncore_msr_enable_box,
4401 .disable_event = snbep_uncore_msr_disable_event,
4402 .enable_event = snr_cha_enable_event,
4403 .read_counter = uncore_msr_read_counter,
4404 .hw_config = snr_cha_hw_config,
4407 static struct intel_uncore_type snr_uncore_chabox = {
4411 .perf_ctr_bits = 48,
4412 .event_ctl = SNR_CHA_MSR_PMON_CTL0,
4413 .perf_ctr = SNR_CHA_MSR_PMON_CTR0,
4414 .box_ctl = SNR_CHA_MSR_PMON_BOX_CTL,
4415 .msr_offset = HSWEP_CBO_MSR_OFFSET,
4416 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4417 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
4418 .ops = &snr_uncore_chabox_ops,
4419 .format_group = &snr_uncore_chabox_format_group,
4422 static struct attribute *snr_uncore_iio_formats_attr[] = {
4423 &format_attr_event.attr,
4424 &format_attr_umask.attr,
4425 &format_attr_edge.attr,
4426 &format_attr_inv.attr,
4427 &format_attr_thresh9.attr,
4428 &format_attr_ch_mask2.attr,
4429 &format_attr_fc_mask2.attr,
4433 static const struct attribute_group snr_uncore_iio_format_group = {
4435 .attrs = snr_uncore_iio_formats_attr,
4439 snr_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4441 /* Root bus 0x00 is valid only for pmu_idx = 1. */
4442 return pmu_iio_mapping_visible(kobj, attr, die, 1);
4445 static struct attribute_group snr_iio_mapping_group = {
4446 .is_visible = snr_iio_mapping_visible,
4449 static const struct attribute_group *snr_iio_attr_update[] = {
4450 &snr_iio_mapping_group,
4454 static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_mapping)
4457 int die, stack_id, ret = -EPERM;
4458 struct pci_dev *dev = NULL;
4460 type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
4462 if (!type->topology)
4465 while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) {
4466 ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg);
4468 ret = pcibios_err_to_errno(ret);
4472 die = uncore_pcibus_to_dieid(dev->bus);
4473 stack_id = SAD_CONTROL_STACK_ID(sad_cfg);
4474 if (die < 0 || stack_id >= type->num_boxes) {
4479 /* Convert stack id from SAD_CONTROL to PMON notation. */
4480 stack_id = sad_pmon_mapping[stack_id];
4482 ((u8 *)&(type->topology[die].configuration))[stack_id] = dev->bus->number;
4483 type->topology[die].segment = pci_domain_nr(dev->bus);
4487 kfree(type->topology);
4488 type->topology = NULL;
4495 * SNR has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
4499 SNR_CBDMA_DMI_PMON_ID,
4502 SNR_PCIE_GEN3_PMON_ID
4505 static u8 snr_sad_pmon_mapping[] = {
4506 SNR_CBDMA_DMI_PMON_ID,
4507 SNR_PCIE_GEN3_PMON_ID,
4513 static int snr_iio_get_topology(struct intel_uncore_type *type)
4515 return sad_cfg_iio_topology(type, snr_sad_pmon_mapping);
4518 static int snr_iio_set_mapping(struct intel_uncore_type *type)
4520 return pmu_iio_set_mapping(type, &snr_iio_mapping_group);
4523 static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
4525 pmu_iio_cleanup_mapping(type, &snr_iio_mapping_group);
4528 static struct intel_uncore_type snr_uncore_iio = {
4532 .perf_ctr_bits = 48,
4533 .event_ctl = SNR_IIO_MSR_PMON_CTL0,
4534 .perf_ctr = SNR_IIO_MSR_PMON_CTR0,
4535 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4536 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4537 .box_ctl = SNR_IIO_MSR_PMON_BOX_CTL,
4538 .msr_offset = SNR_IIO_MSR_OFFSET,
4539 .ops = &ivbep_uncore_msr_ops,
4540 .format_group = &snr_uncore_iio_format_group,
4541 .attr_update = snr_iio_attr_update,
4542 .get_topology = snr_iio_get_topology,
4543 .set_mapping = snr_iio_set_mapping,
4544 .cleanup_mapping = snr_iio_cleanup_mapping,
4547 static struct intel_uncore_type snr_uncore_irp = {
4551 .perf_ctr_bits = 48,
4552 .event_ctl = SNR_IRP0_MSR_PMON_CTL0,
4553 .perf_ctr = SNR_IRP0_MSR_PMON_CTR0,
4554 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4555 .box_ctl = SNR_IRP0_MSR_PMON_BOX_CTL,
4556 .msr_offset = SNR_IRP_MSR_OFFSET,
4557 .ops = &ivbep_uncore_msr_ops,
4558 .format_group = &ivbep_uncore_format_group,
4561 static struct intel_uncore_type snr_uncore_m2pcie = {
4565 .perf_ctr_bits = 48,
4566 .event_ctl = SNR_M2PCIE_MSR_PMON_CTL0,
4567 .perf_ctr = SNR_M2PCIE_MSR_PMON_CTR0,
4568 .box_ctl = SNR_M2PCIE_MSR_PMON_BOX_CTL,
4569 .msr_offset = SNR_M2PCIE_MSR_OFFSET,
4570 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4571 .ops = &ivbep_uncore_msr_ops,
4572 .format_group = &ivbep_uncore_format_group,
4575 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4577 struct hw_perf_event *hwc = &event->hw;
4578 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4579 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4581 if (ev_sel >= 0xb && ev_sel <= 0xe) {
4582 reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4583 reg1->idx = ev_sel - 0xb;
4584 reg1->config = event->attr.config1 & (0xff << reg1->idx);
4589 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4590 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4591 .hw_config = snr_pcu_hw_config,
4592 .get_constraint = snbep_pcu_get_constraint,
4593 .put_constraint = snbep_pcu_put_constraint,
4596 static struct intel_uncore_type snr_uncore_pcu = {
4600 .perf_ctr_bits = 48,
4601 .perf_ctr = SNR_PCU_MSR_PMON_CTR0,
4602 .event_ctl = SNR_PCU_MSR_PMON_CTL0,
4603 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4604 .box_ctl = SNR_PCU_MSR_PMON_BOX_CTL,
4605 .num_shared_regs = 1,
4606 .ops = &snr_uncore_pcu_ops,
4607 .format_group = &skx_uncore_pcu_format_group,
4610 enum perf_uncore_snr_iio_freerunning_type_id {
4614 SNR_IIO_FREERUNNING_TYPE_MAX,
4617 static struct freerunning_counters snr_iio_freerunning[] = {
4618 [SNR_IIO_MSR_IOCLK] = { 0x1eac, 0x1, 0x10, 1, 48 },
4619 [SNR_IIO_MSR_BW_IN] = { 0x1f00, 0x1, 0x10, 8, 48 },
4622 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4623 /* Free-Running IIO CLOCKS Counter */
4624 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
4625 /* Free-Running IIO BANDWIDTH IN Counters */
4626 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
4627 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
4628 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
4629 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
4630 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
4631 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
4632 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
4633 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
4634 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
4635 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
4636 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
4637 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
4638 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
4639 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
4640 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
4641 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
4642 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
4643 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
4644 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
4645 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
4646 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
4647 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
4648 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
4649 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
4650 { /* end: all zeroes */ },
4653 static struct intel_uncore_type snr_uncore_iio_free_running = {
4654 .name = "iio_free_running",
4657 .num_freerunning_types = SNR_IIO_FREERUNNING_TYPE_MAX,
4658 .freerunning = snr_iio_freerunning,
4659 .ops = &skx_uncore_iio_freerunning_ops,
4660 .event_descs = snr_uncore_iio_freerunning_events,
4661 .format_group = &skx_uncore_iio_freerunning_format_group,
4664 static struct intel_uncore_type *snr_msr_uncores[] = {
4671 &snr_uncore_iio_free_running,
4675 void snr_uncore_cpu_init(void)
4677 uncore_msr_uncores = snr_msr_uncores;
4680 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4682 struct pci_dev *pdev = box->pci_dev;
4683 int box_ctl = uncore_pci_box_ctl(box);
4685 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4686 pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4689 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4690 .init_box = snr_m2m_uncore_pci_init_box,
4691 .disable_box = snbep_uncore_pci_disable_box,
4692 .enable_box = snbep_uncore_pci_enable_box,
4693 .disable_event = snbep_uncore_pci_disable_event,
4694 .enable_event = snbep_uncore_pci_enable_event,
4695 .read_counter = snbep_uncore_pci_read_counter,
4698 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4699 &format_attr_event.attr,
4700 &format_attr_umask_ext3.attr,
4701 &format_attr_edge.attr,
4702 &format_attr_inv.attr,
4703 &format_attr_thresh8.attr,
4707 static const struct attribute_group snr_m2m_uncore_format_group = {
4709 .attrs = snr_m2m_uncore_formats_attr,
4712 static struct intel_uncore_type snr_uncore_m2m = {
4716 .perf_ctr_bits = 48,
4717 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
4718 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
4719 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4720 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
4721 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
4722 .ops = &snr_m2m_uncore_pci_ops,
4723 .format_group = &snr_m2m_uncore_format_group,
4726 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4728 struct pci_dev *pdev = box->pci_dev;
4729 struct hw_perf_event *hwc = &event->hw;
4731 pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4732 pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4735 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
4736 .init_box = snr_m2m_uncore_pci_init_box,
4737 .disable_box = snbep_uncore_pci_disable_box,
4738 .enable_box = snbep_uncore_pci_enable_box,
4739 .disable_event = snbep_uncore_pci_disable_event,
4740 .enable_event = snr_uncore_pci_enable_event,
4741 .read_counter = snbep_uncore_pci_read_counter,
4744 static struct intel_uncore_type snr_uncore_pcie3 = {
4748 .perf_ctr_bits = 48,
4749 .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0,
4750 .event_ctl = SNR_PCIE3_PCI_PMON_CTL0,
4751 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
4752 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4753 .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL,
4754 .ops = &snr_pcie3_uncore_pci_ops,
4755 .format_group = &skx_uncore_iio_format_group,
4760 SNR_PCI_UNCORE_PCIE3,
4763 static struct intel_uncore_type *snr_pci_uncores[] = {
4764 [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m,
4765 [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3,
4769 static const struct pci_device_id snr_uncore_pci_ids[] = {
4771 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4772 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4774 { /* end: all zeroes */ }
4777 static struct pci_driver snr_uncore_pci_driver = {
4778 .name = "snr_uncore",
4779 .id_table = snr_uncore_pci_ids,
4782 static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
4784 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
4785 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
4787 { /* end: all zeroes */ }
4790 static struct pci_driver snr_uncore_pci_sub_driver = {
4791 .name = "snr_uncore_sub",
4792 .id_table = snr_uncore_pci_sub_ids,
4795 int snr_uncore_pci_init(void)
4798 int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4799 SKX_GIDNIDMAP, true);
4804 uncore_pci_uncores = snr_pci_uncores;
4805 uncore_pci_driver = &snr_uncore_pci_driver;
4806 uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
4810 #define SNR_MC_DEVICE_ID 0x3451
4812 static struct pci_dev *snr_uncore_get_mc_dev(unsigned int device, int id)
4814 struct pci_dev *mc_dev = NULL;
4818 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, mc_dev);
4821 pkg = uncore_pcibus_to_dieid(mc_dev->bus);
4828 static int snr_uncore_mmio_map(struct intel_uncore_box *box,
4829 unsigned int box_ctl, int mem_offset,
4830 unsigned int device)
4832 struct pci_dev *pdev = snr_uncore_get_mc_dev(device, box->dieid);
4833 struct intel_uncore_type *type = box->pmu->type;
4834 resource_size_t addr;
4840 pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4841 addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4843 pci_read_config_dword(pdev, mem_offset, &pci_dword);
4844 addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4848 box->io_addr = ioremap(addr, type->mmio_map_size);
4849 if (!box->io_addr) {
4850 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
4857 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
4858 unsigned int box_ctl, int mem_offset,
4859 unsigned int device)
4861 if (!snr_uncore_mmio_map(box, box_ctl, mem_offset, device))
4862 writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4865 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4867 __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
4868 SNR_IMC_MMIO_MEM0_OFFSET,
4872 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4879 config = readl(box->io_addr);
4880 config |= SNBEP_PMON_BOX_CTL_FRZ;
4881 writel(config, box->io_addr);
4884 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4891 config = readl(box->io_addr);
4892 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4893 writel(config, box->io_addr);
4896 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4897 struct perf_event *event)
4899 struct hw_perf_event *hwc = &event->hw;
4904 if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4907 writel(hwc->config | SNBEP_PMON_CTL_EN,
4908 box->io_addr + hwc->config_base);
4911 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4912 struct perf_event *event)
4914 struct hw_perf_event *hwc = &event->hw;
4919 if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4922 writel(hwc->config, box->io_addr + hwc->config_base);
4925 static struct intel_uncore_ops snr_uncore_mmio_ops = {
4926 .init_box = snr_uncore_mmio_init_box,
4927 .exit_box = uncore_mmio_exit_box,
4928 .disable_box = snr_uncore_mmio_disable_box,
4929 .enable_box = snr_uncore_mmio_enable_box,
4930 .disable_event = snr_uncore_mmio_disable_event,
4931 .enable_event = snr_uncore_mmio_enable_event,
4932 .read_counter = uncore_mmio_read_counter,
4935 static struct uncore_event_desc snr_uncore_imc_events[] = {
4936 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
4937 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x0f"),
4938 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4939 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4940 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4941 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4942 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4943 { /* end: all zeroes */ },
4946 static struct intel_uncore_type snr_uncore_imc = {
4950 .perf_ctr_bits = 48,
4951 .fixed_ctr_bits = 48,
4952 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
4953 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
4954 .event_descs = snr_uncore_imc_events,
4955 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
4956 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
4957 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4958 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
4959 .mmio_offset = SNR_IMC_MMIO_OFFSET,
4960 .mmio_map_size = SNR_IMC_MMIO_SIZE,
4961 .ops = &snr_uncore_mmio_ops,
4962 .format_group = &skx_uncore_format_group,
4965 enum perf_uncore_snr_imc_freerunning_type_id {
4969 SNR_IMC_FREERUNNING_TYPE_MAX,
4972 static struct freerunning_counters snr_imc_freerunning[] = {
4973 [SNR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
4974 [SNR_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
4977 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4978 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
4980 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
4981 INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
4982 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
4983 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
4984 INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
4985 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
4986 { /* end: all zeroes */ },
4989 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
4990 .init_box = snr_uncore_mmio_init_box,
4991 .exit_box = uncore_mmio_exit_box,
4992 .read_counter = uncore_mmio_read_counter,
4993 .hw_config = uncore_freerunning_hw_config,
4996 static struct intel_uncore_type snr_uncore_imc_free_running = {
4997 .name = "imc_free_running",
5000 .num_freerunning_types = SNR_IMC_FREERUNNING_TYPE_MAX,
5001 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5002 .freerunning = snr_imc_freerunning,
5003 .ops = &snr_uncore_imc_freerunning_ops,
5004 .event_descs = snr_uncore_imc_freerunning_events,
5005 .format_group = &skx_uncore_iio_freerunning_format_group,
5008 static struct intel_uncore_type *snr_mmio_uncores[] = {
5010 &snr_uncore_imc_free_running,
5014 void snr_uncore_mmio_init(void)
5016 uncore_mmio_uncores = snr_mmio_uncores;
5019 /* end of SNR uncore support */
5021 /* ICX uncore support */
5023 static unsigned icx_cha_msr_offsets[] = {
5024 0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
5025 0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
5026 0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
5027 0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0, 0xe,
5028 0x1c, 0x2a, 0x38, 0x46,
5031 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5033 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5034 bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
5037 reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
5038 icx_cha_msr_offsets[box->pmu->pmu_idx];
5039 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
5046 static struct intel_uncore_ops icx_uncore_chabox_ops = {
5047 .init_box = ivbep_uncore_msr_init_box,
5048 .disable_box = snbep_uncore_msr_disable_box,
5049 .enable_box = snbep_uncore_msr_enable_box,
5050 .disable_event = snbep_uncore_msr_disable_event,
5051 .enable_event = snr_cha_enable_event,
5052 .read_counter = uncore_msr_read_counter,
5053 .hw_config = icx_cha_hw_config,
5056 static struct intel_uncore_type icx_uncore_chabox = {
5059 .perf_ctr_bits = 48,
5060 .event_ctl = ICX_C34_MSR_PMON_CTL0,
5061 .perf_ctr = ICX_C34_MSR_PMON_CTR0,
5062 .box_ctl = ICX_C34_MSR_PMON_BOX_CTL,
5063 .msr_offsets = icx_cha_msr_offsets,
5064 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
5065 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
5066 .constraints = skx_uncore_chabox_constraints,
5067 .ops = &icx_uncore_chabox_ops,
5068 .format_group = &snr_uncore_chabox_format_group,
5071 static unsigned icx_msr_offsets[] = {
5072 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5075 static struct event_constraint icx_uncore_iio_constraints[] = {
5076 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
5077 UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
5078 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
5079 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
5080 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
5081 EVENT_CONSTRAINT_END
5085 icx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
5087 /* Root bus 0x00 is valid only for pmu_idx = 5. */
5088 return pmu_iio_mapping_visible(kobj, attr, die, 5);
5091 static struct attribute_group icx_iio_mapping_group = {
5092 .is_visible = icx_iio_mapping_visible,
5095 static const struct attribute_group *icx_iio_attr_update[] = {
5096 &icx_iio_mapping_group,
5101 * ICX has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
5109 ICX_CBDMA_DMI_PMON_ID
5112 static u8 icx_sad_pmon_mapping[] = {
5113 ICX_CBDMA_DMI_PMON_ID,
5121 static int icx_iio_get_topology(struct intel_uncore_type *type)
5123 return sad_cfg_iio_topology(type, icx_sad_pmon_mapping);
5126 static int icx_iio_set_mapping(struct intel_uncore_type *type)
5128 return pmu_iio_set_mapping(type, &icx_iio_mapping_group);
5131 static void icx_iio_cleanup_mapping(struct intel_uncore_type *type)
5133 pmu_iio_cleanup_mapping(type, &icx_iio_mapping_group);
5136 static struct intel_uncore_type icx_uncore_iio = {
5140 .perf_ctr_bits = 48,
5141 .event_ctl = ICX_IIO_MSR_PMON_CTL0,
5142 .perf_ctr = ICX_IIO_MSR_PMON_CTR0,
5143 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5144 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5145 .box_ctl = ICX_IIO_MSR_PMON_BOX_CTL,
5146 .msr_offsets = icx_msr_offsets,
5147 .constraints = icx_uncore_iio_constraints,
5148 .ops = &skx_uncore_iio_ops,
5149 .format_group = &snr_uncore_iio_format_group,
5150 .attr_update = icx_iio_attr_update,
5151 .get_topology = icx_iio_get_topology,
5152 .set_mapping = icx_iio_set_mapping,
5153 .cleanup_mapping = icx_iio_cleanup_mapping,
5156 static struct intel_uncore_type icx_uncore_irp = {
5160 .perf_ctr_bits = 48,
5161 .event_ctl = ICX_IRP0_MSR_PMON_CTL0,
5162 .perf_ctr = ICX_IRP0_MSR_PMON_CTR0,
5163 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5164 .box_ctl = ICX_IRP0_MSR_PMON_BOX_CTL,
5165 .msr_offsets = icx_msr_offsets,
5166 .ops = &ivbep_uncore_msr_ops,
5167 .format_group = &ivbep_uncore_format_group,
5170 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
5171 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
5172 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
5173 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
5174 EVENT_CONSTRAINT_END
5177 static struct intel_uncore_type icx_uncore_m2pcie = {
5181 .perf_ctr_bits = 48,
5182 .event_ctl = ICX_M2PCIE_MSR_PMON_CTL0,
5183 .perf_ctr = ICX_M2PCIE_MSR_PMON_CTR0,
5184 .box_ctl = ICX_M2PCIE_MSR_PMON_BOX_CTL,
5185 .msr_offsets = icx_msr_offsets,
5186 .constraints = icx_uncore_m2pcie_constraints,
5187 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5188 .ops = &ivbep_uncore_msr_ops,
5189 .format_group = &ivbep_uncore_format_group,
5192 enum perf_uncore_icx_iio_freerunning_type_id {
5196 ICX_IIO_FREERUNNING_TYPE_MAX,
5199 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
5200 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5203 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
5204 0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
5207 static struct freerunning_counters icx_iio_freerunning[] = {
5208 [ICX_IIO_MSR_IOCLK] = { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5209 [ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5212 static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
5213 /* Free-Running IIO CLOCKS Counter */
5214 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
5215 /* Free-Running IIO BANDWIDTH IN Counters */
5216 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
5217 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
5218 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
5219 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
5220 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
5221 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
5222 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
5223 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
5224 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
5225 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
5226 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
5227 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
5228 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
5229 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
5230 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
5231 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
5232 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
5233 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
5234 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
5235 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
5236 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
5237 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
5238 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
5239 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
5240 { /* end: all zeroes */ },
5243 static struct intel_uncore_type icx_uncore_iio_free_running = {
5244 .name = "iio_free_running",
5247 .num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX,
5248 .freerunning = icx_iio_freerunning,
5249 .ops = &skx_uncore_iio_freerunning_ops,
5250 .event_descs = icx_uncore_iio_freerunning_events,
5251 .format_group = &skx_uncore_iio_freerunning_format_group,
5254 static struct intel_uncore_type *icx_msr_uncores[] = {
5261 &icx_uncore_iio_free_running,
5266 * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5267 * registers which located at Device 30, Function 3
5269 #define ICX_CAPID6 0x9c
5270 #define ICX_CAPID7 0xa0
5272 static u64 icx_count_chabox(void)
5274 struct pci_dev *dev = NULL;
5277 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5281 pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5282 pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5285 return hweight64(caps);
5288 void icx_uncore_cpu_init(void)
5290 u64 num_boxes = icx_count_chabox();
5292 if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5294 icx_uncore_chabox.num_boxes = num_boxes;
5295 uncore_msr_uncores = icx_msr_uncores;
5298 static struct intel_uncore_type icx_uncore_m2m = {
5302 .perf_ctr_bits = 48,
5303 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
5304 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
5305 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5306 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
5307 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
5308 .ops = &snr_m2m_uncore_pci_ops,
5309 .format_group = &snr_m2m_uncore_format_group,
5312 static struct attribute *icx_upi_uncore_formats_attr[] = {
5313 &format_attr_event.attr,
5314 &format_attr_umask_ext4.attr,
5315 &format_attr_edge.attr,
5316 &format_attr_inv.attr,
5317 &format_attr_thresh8.attr,
5321 static const struct attribute_group icx_upi_uncore_format_group = {
5323 .attrs = icx_upi_uncore_formats_attr,
5326 static struct intel_uncore_type icx_uncore_upi = {
5330 .perf_ctr_bits = 48,
5331 .perf_ctr = ICX_UPI_PCI_PMON_CTR0,
5332 .event_ctl = ICX_UPI_PCI_PMON_CTL0,
5333 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5334 .event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5335 .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL,
5336 .ops = &skx_upi_uncore_pci_ops,
5337 .format_group = &icx_upi_uncore_format_group,
5340 static struct event_constraint icx_uncore_m3upi_constraints[] = {
5341 UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5342 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5343 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5344 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5345 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5346 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5347 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5348 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5349 EVENT_CONSTRAINT_END
5352 static struct intel_uncore_type icx_uncore_m3upi = {
5356 .perf_ctr_bits = 48,
5357 .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0,
5358 .event_ctl = ICX_M3UPI_PCI_PMON_CTL0,
5359 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5360 .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL,
5361 .constraints = icx_uncore_m3upi_constraints,
5362 .ops = &ivbep_uncore_pci_ops,
5363 .format_group = &skx_uncore_format_group,
5369 ICX_PCI_UNCORE_M3UPI,
5372 static struct intel_uncore_type *icx_pci_uncores[] = {
5373 [ICX_PCI_UNCORE_M2M] = &icx_uncore_m2m,
5374 [ICX_PCI_UNCORE_UPI] = &icx_uncore_upi,
5375 [ICX_PCI_UNCORE_M3UPI] = &icx_uncore_m3upi,
5379 static const struct pci_device_id icx_uncore_pci_ids[] = {
5381 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5382 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5385 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5386 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5389 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5390 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5393 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5394 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5397 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5398 .driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5401 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5402 .driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5405 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5406 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5408 { /* M3UPI Link 0 */
5409 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5410 .driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5412 { /* M3UPI Link 1 */
5413 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5414 .driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5416 { /* M3UPI Link 2 */
5417 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5418 .driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5420 { /* end: all zeroes */ }
5423 static struct pci_driver icx_uncore_pci_driver = {
5424 .name = "icx_uncore",
5425 .id_table = icx_uncore_pci_ids,
5428 int icx_uncore_pci_init(void)
5431 int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5432 SKX_GIDNIDMAP, true);
5437 uncore_pci_uncores = icx_pci_uncores;
5438 uncore_pci_driver = &icx_uncore_pci_driver;
5442 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5444 unsigned int box_ctl = box->pmu->type->box_ctl +
5445 box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5446 int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5447 SNR_IMC_MMIO_MEM0_OFFSET;
5449 __snr_uncore_mmio_init_box(box, box_ctl, mem_offset,
5453 static struct intel_uncore_ops icx_uncore_mmio_ops = {
5454 .init_box = icx_uncore_imc_init_box,
5455 .exit_box = uncore_mmio_exit_box,
5456 .disable_box = snr_uncore_mmio_disable_box,
5457 .enable_box = snr_uncore_mmio_enable_box,
5458 .disable_event = snr_uncore_mmio_disable_event,
5459 .enable_event = snr_uncore_mmio_enable_event,
5460 .read_counter = uncore_mmio_read_counter,
5463 static struct intel_uncore_type icx_uncore_imc = {
5467 .perf_ctr_bits = 48,
5468 .fixed_ctr_bits = 48,
5469 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
5470 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
5471 .event_descs = hswep_uncore_imc_events,
5472 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
5473 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
5474 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5475 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
5476 .mmio_offset = SNR_IMC_MMIO_OFFSET,
5477 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5478 .ops = &icx_uncore_mmio_ops,
5479 .format_group = &skx_uncore_format_group,
5482 enum perf_uncore_icx_imc_freerunning_type_id {
5487 ICX_IMC_FREERUNNING_TYPE_MAX,
5490 static struct freerunning_counters icx_imc_freerunning[] = {
5491 [ICX_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
5492 [ICX_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
5493 [ICX_IMC_DDRT] = { 0x22a0, 0x8, 0, 2, 48 },
5496 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5497 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
5499 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
5500 INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
5501 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
5502 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
5503 INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
5504 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
5506 INTEL_UNCORE_EVENT_DESC(ddrt_read, "event=0xff,umask=0x30"),
5507 INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "6.103515625e-5"),
5508 INTEL_UNCORE_EVENT_DESC(ddrt_read.unit, "MiB"),
5509 INTEL_UNCORE_EVENT_DESC(ddrt_write, "event=0xff,umask=0x31"),
5510 INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "6.103515625e-5"),
5511 INTEL_UNCORE_EVENT_DESC(ddrt_write.unit, "MiB"),
5512 { /* end: all zeroes */ },
5515 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5517 int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5518 SNR_IMC_MMIO_MEM0_OFFSET;
5520 snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
5521 mem_offset, SNR_MC_DEVICE_ID);
5524 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5525 .init_box = icx_uncore_imc_freerunning_init_box,
5526 .exit_box = uncore_mmio_exit_box,
5527 .read_counter = uncore_mmio_read_counter,
5528 .hw_config = uncore_freerunning_hw_config,
5531 static struct intel_uncore_type icx_uncore_imc_free_running = {
5532 .name = "imc_free_running",
5535 .num_freerunning_types = ICX_IMC_FREERUNNING_TYPE_MAX,
5536 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5537 .freerunning = icx_imc_freerunning,
5538 .ops = &icx_uncore_imc_freerunning_ops,
5539 .event_descs = icx_uncore_imc_freerunning_events,
5540 .format_group = &skx_uncore_iio_freerunning_format_group,
5543 static struct intel_uncore_type *icx_mmio_uncores[] = {
5545 &icx_uncore_imc_free_running,
5549 void icx_uncore_mmio_init(void)
5551 uncore_mmio_uncores = icx_mmio_uncores;
5554 /* end of ICX uncore support */
5556 /* SPR uncore support */
5558 static void spr_uncore_msr_enable_event(struct intel_uncore_box *box,
5559 struct perf_event *event)
5561 struct hw_perf_event *hwc = &event->hw;
5562 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5564 if (reg1->idx != EXTRA_REG_NONE)
5565 wrmsrl(reg1->reg, reg1->config);
5567 wrmsrl(hwc->config_base, hwc->config);
5570 static void spr_uncore_msr_disable_event(struct intel_uncore_box *box,
5571 struct perf_event *event)
5573 struct hw_perf_event *hwc = &event->hw;
5574 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5576 if (reg1->idx != EXTRA_REG_NONE)
5577 wrmsrl(reg1->reg, 0);
5579 wrmsrl(hwc->config_base, 0);
5582 static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5584 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5585 bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN);
5586 struct intel_uncore_type *type = box->pmu->type;
5589 reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 +
5590 HSWEP_CBO_MSR_OFFSET * type->box_ids[box->pmu->pmu_idx];
5591 reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID;
5598 static struct intel_uncore_ops spr_uncore_chabox_ops = {
5599 .init_box = intel_generic_uncore_msr_init_box,
5600 .disable_box = intel_generic_uncore_msr_disable_box,
5601 .enable_box = intel_generic_uncore_msr_enable_box,
5602 .disable_event = spr_uncore_msr_disable_event,
5603 .enable_event = spr_uncore_msr_enable_event,
5604 .read_counter = uncore_msr_read_counter,
5605 .hw_config = spr_cha_hw_config,
5606 .get_constraint = uncore_get_constraint,
5607 .put_constraint = uncore_put_constraint,
5610 static struct attribute *spr_uncore_cha_formats_attr[] = {
5611 &format_attr_event.attr,
5612 &format_attr_umask_ext4.attr,
5613 &format_attr_tid_en2.attr,
5614 &format_attr_edge.attr,
5615 &format_attr_inv.attr,
5616 &format_attr_thresh8.attr,
5617 &format_attr_filter_tid5.attr,
5620 static const struct attribute_group spr_uncore_chabox_format_group = {
5622 .attrs = spr_uncore_cha_formats_attr,
5625 static ssize_t alias_show(struct device *dev,
5626 struct device_attribute *attr,
5629 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
5630 char pmu_name[UNCORE_PMU_NAME_LEN];
5632 uncore_get_alias_name(pmu_name, pmu);
5633 return sysfs_emit(buf, "%s\n", pmu_name);
5636 static DEVICE_ATTR_RO(alias);
5638 static struct attribute *uncore_alias_attrs[] = {
5639 &dev_attr_alias.attr,
5643 ATTRIBUTE_GROUPS(uncore_alias);
5645 static struct intel_uncore_type spr_uncore_chabox = {
5647 .event_mask = SPR_CHA_PMON_EVENT_MASK,
5648 .event_mask_ext = SPR_RAW_EVENT_MASK_EXT,
5649 .num_shared_regs = 1,
5650 .ops = &spr_uncore_chabox_ops,
5651 .format_group = &spr_uncore_chabox_format_group,
5652 .attr_update = uncore_alias_groups,
5655 static struct intel_uncore_type spr_uncore_iio = {
5657 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5658 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5659 .format_group = &snr_uncore_iio_format_group,
5660 .attr_update = uncore_alias_groups,
5663 static struct attribute *spr_uncore_raw_formats_attr[] = {
5664 &format_attr_event.attr,
5665 &format_attr_umask_ext4.attr,
5666 &format_attr_edge.attr,
5667 &format_attr_inv.attr,
5668 &format_attr_thresh8.attr,
5672 static const struct attribute_group spr_uncore_raw_format_group = {
5674 .attrs = spr_uncore_raw_formats_attr,
5677 #define SPR_UNCORE_COMMON_FORMAT() \
5678 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
5679 .event_mask_ext = SPR_RAW_EVENT_MASK_EXT, \
5680 .format_group = &spr_uncore_raw_format_group, \
5681 .attr_update = uncore_alias_groups
5683 static struct intel_uncore_type spr_uncore_irp = {
5684 SPR_UNCORE_COMMON_FORMAT(),
5689 static struct intel_uncore_type spr_uncore_m2pcie = {
5690 SPR_UNCORE_COMMON_FORMAT(),
5694 static struct intel_uncore_type spr_uncore_pcu = {
5696 .attr_update = uncore_alias_groups,
5699 static void spr_uncore_mmio_enable_event(struct intel_uncore_box *box,
5700 struct perf_event *event)
5702 struct hw_perf_event *hwc = &event->hw;
5707 if (uncore_pmc_fixed(hwc->idx))
5708 writel(SNBEP_PMON_CTL_EN, box->io_addr + hwc->config_base);
5710 writel(hwc->config, box->io_addr + hwc->config_base);
5713 static struct intel_uncore_ops spr_uncore_mmio_ops = {
5714 .init_box = intel_generic_uncore_mmio_init_box,
5715 .exit_box = uncore_mmio_exit_box,
5716 .disable_box = intel_generic_uncore_mmio_disable_box,
5717 .enable_box = intel_generic_uncore_mmio_enable_box,
5718 .disable_event = intel_generic_uncore_mmio_disable_event,
5719 .enable_event = spr_uncore_mmio_enable_event,
5720 .read_counter = uncore_mmio_read_counter,
5723 static struct intel_uncore_type spr_uncore_imc = {
5724 SPR_UNCORE_COMMON_FORMAT(),
5726 .fixed_ctr_bits = 48,
5727 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
5728 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
5729 .ops = &spr_uncore_mmio_ops,
5732 static void spr_uncore_pci_enable_event(struct intel_uncore_box *box,
5733 struct perf_event *event)
5735 struct pci_dev *pdev = box->pci_dev;
5736 struct hw_perf_event *hwc = &event->hw;
5738 pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
5739 pci_write_config_dword(pdev, hwc->config_base, (u32)hwc->config);
5742 static struct intel_uncore_ops spr_uncore_pci_ops = {
5743 .init_box = intel_generic_uncore_pci_init_box,
5744 .disable_box = intel_generic_uncore_pci_disable_box,
5745 .enable_box = intel_generic_uncore_pci_enable_box,
5746 .disable_event = intel_generic_uncore_pci_disable_event,
5747 .enable_event = spr_uncore_pci_enable_event,
5748 .read_counter = intel_generic_uncore_pci_read_counter,
5751 #define SPR_UNCORE_PCI_COMMON_FORMAT() \
5752 SPR_UNCORE_COMMON_FORMAT(), \
5753 .ops = &spr_uncore_pci_ops
5755 static struct intel_uncore_type spr_uncore_m2m = {
5756 SPR_UNCORE_PCI_COMMON_FORMAT(),
5760 static struct intel_uncore_type spr_uncore_upi = {
5761 SPR_UNCORE_PCI_COMMON_FORMAT(),
5765 static struct intel_uncore_type spr_uncore_m3upi = {
5766 SPR_UNCORE_PCI_COMMON_FORMAT(),
5770 static struct intel_uncore_type spr_uncore_mdf = {
5771 SPR_UNCORE_COMMON_FORMAT(),
5775 #define UNCORE_SPR_NUM_UNCORE_TYPES 12
5776 #define UNCORE_SPR_IIO 1
5777 #define UNCORE_SPR_IMC 6
5779 static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = {
5794 enum perf_uncore_spr_iio_freerunning_type_id {
5799 SPR_IIO_FREERUNNING_TYPE_MAX,
5802 static struct freerunning_counters spr_iio_freerunning[] = {
5803 [SPR_IIO_MSR_IOCLK] = { 0x340e, 0x1, 0x10, 1, 48 },
5804 [SPR_IIO_MSR_BW_IN] = { 0x3800, 0x1, 0x10, 8, 48 },
5805 [SPR_IIO_MSR_BW_OUT] = { 0x3808, 0x1, 0x10, 8, 48 },
5808 static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
5809 /* Free-Running IIO CLOCKS Counter */
5810 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
5811 /* Free-Running IIO BANDWIDTH IN Counters */
5812 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
5813 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
5814 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
5815 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
5816 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
5817 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
5818 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
5819 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
5820 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
5821 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
5822 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
5823 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
5824 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
5825 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
5826 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
5827 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
5828 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
5829 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
5830 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
5831 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
5832 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
5833 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
5834 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
5835 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
5836 /* Free-Running IIO BANDWIDTH OUT Counters */
5837 INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x30"),
5838 INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"),
5839 INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"),
5840 INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x31"),
5841 INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"),
5842 INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"),
5843 INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x32"),
5844 INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"),
5845 INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"),
5846 INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x33"),
5847 INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"),
5848 INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"),
5849 INTEL_UNCORE_EVENT_DESC(bw_out_port4, "event=0xff,umask=0x34"),
5850 INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale, "3.814697266e-6"),
5851 INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit, "MiB"),
5852 INTEL_UNCORE_EVENT_DESC(bw_out_port5, "event=0xff,umask=0x35"),
5853 INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale, "3.814697266e-6"),
5854 INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit, "MiB"),
5855 INTEL_UNCORE_EVENT_DESC(bw_out_port6, "event=0xff,umask=0x36"),
5856 INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale, "3.814697266e-6"),
5857 INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit, "MiB"),
5858 INTEL_UNCORE_EVENT_DESC(bw_out_port7, "event=0xff,umask=0x37"),
5859 INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale, "3.814697266e-6"),
5860 INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit, "MiB"),
5861 { /* end: all zeroes */ },
5864 static struct intel_uncore_type spr_uncore_iio_free_running = {
5865 .name = "iio_free_running",
5867 .num_freerunning_types = SPR_IIO_FREERUNNING_TYPE_MAX,
5868 .freerunning = spr_iio_freerunning,
5869 .ops = &skx_uncore_iio_freerunning_ops,
5870 .event_descs = spr_uncore_iio_freerunning_events,
5871 .format_group = &skx_uncore_iio_freerunning_format_group,
5874 enum perf_uncore_spr_imc_freerunning_type_id {
5878 SPR_IMC_FREERUNNING_TYPE_MAX,
5881 static struct freerunning_counters spr_imc_freerunning[] = {
5882 [SPR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
5883 [SPR_IMC_PQ_CYCLES] = { 0x2318, 0x8, 0, 2, 48 },
5886 static struct uncore_event_desc spr_uncore_imc_freerunning_events[] = {
5887 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
5889 INTEL_UNCORE_EVENT_DESC(rpq_cycles, "event=0xff,umask=0x20"),
5890 INTEL_UNCORE_EVENT_DESC(wpq_cycles, "event=0xff,umask=0x21"),
5891 { /* end: all zeroes */ },
5894 #define SPR_MC_DEVICE_ID 0x3251
5896 static void spr_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5898 int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + SNR_IMC_MMIO_MEM0_OFFSET;
5900 snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
5901 mem_offset, SPR_MC_DEVICE_ID);
5904 static struct intel_uncore_ops spr_uncore_imc_freerunning_ops = {
5905 .init_box = spr_uncore_imc_freerunning_init_box,
5906 .exit_box = uncore_mmio_exit_box,
5907 .read_counter = uncore_mmio_read_counter,
5908 .hw_config = uncore_freerunning_hw_config,
5911 static struct intel_uncore_type spr_uncore_imc_free_running = {
5912 .name = "imc_free_running",
5914 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5915 .num_freerunning_types = SPR_IMC_FREERUNNING_TYPE_MAX,
5916 .freerunning = spr_imc_freerunning,
5917 .ops = &spr_uncore_imc_freerunning_ops,
5918 .event_descs = spr_uncore_imc_freerunning_events,
5919 .format_group = &skx_uncore_iio_freerunning_format_group,
5922 #define UNCORE_SPR_MSR_EXTRA_UNCORES 1
5923 #define UNCORE_SPR_MMIO_EXTRA_UNCORES 1
5925 static struct intel_uncore_type *spr_msr_uncores[UNCORE_SPR_MSR_EXTRA_UNCORES] = {
5926 &spr_uncore_iio_free_running,
5929 static struct intel_uncore_type *spr_mmio_uncores[UNCORE_SPR_MMIO_EXTRA_UNCORES] = {
5930 &spr_uncore_imc_free_running,
5933 static void uncore_type_customized_copy(struct intel_uncore_type *to_type,
5934 struct intel_uncore_type *from_type)
5936 if (!to_type || !from_type)
5939 if (from_type->name)
5940 to_type->name = from_type->name;
5941 if (from_type->fixed_ctr_bits)
5942 to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
5943 if (from_type->event_mask)
5944 to_type->event_mask = from_type->event_mask;
5945 if (from_type->event_mask_ext)
5946 to_type->event_mask_ext = from_type->event_mask_ext;
5947 if (from_type->fixed_ctr)
5948 to_type->fixed_ctr = from_type->fixed_ctr;
5949 if (from_type->fixed_ctl)
5950 to_type->fixed_ctl = from_type->fixed_ctl;
5951 if (from_type->fixed_ctr_bits)
5952 to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
5953 if (from_type->num_shared_regs)
5954 to_type->num_shared_regs = from_type->num_shared_regs;
5955 if (from_type->constraints)
5956 to_type->constraints = from_type->constraints;
5958 to_type->ops = from_type->ops;
5959 if (from_type->event_descs)
5960 to_type->event_descs = from_type->event_descs;
5961 if (from_type->format_group)
5962 to_type->format_group = from_type->format_group;
5963 if (from_type->attr_update)
5964 to_type->attr_update = from_type->attr_update;
5967 static struct intel_uncore_type **
5968 uncore_get_uncores(enum uncore_access_type type_id, int num_extra,
5969 struct intel_uncore_type **extra)
5971 struct intel_uncore_type **types, **start_types;
5974 start_types = types = intel_uncore_generic_init_uncores(type_id, num_extra);
5976 /* Only copy the customized features */
5977 for (; *types; types++) {
5978 if ((*types)->type_id >= UNCORE_SPR_NUM_UNCORE_TYPES)
5980 uncore_type_customized_copy(*types, spr_uncores[(*types)->type_id]);
5983 for (i = 0; i < num_extra; i++, types++)
5989 static struct intel_uncore_type *
5990 uncore_find_type_by_id(struct intel_uncore_type **types, int type_id)
5992 for (; *types; types++) {
5993 if (type_id == (*types)->type_id)
6000 static int uncore_type_max_boxes(struct intel_uncore_type **types,
6003 struct intel_uncore_type *type;
6006 type = uncore_find_type_by_id(types, type_id);
6010 for (i = 0; i < type->num_boxes; i++) {
6011 if (type->box_ids[i] > max)
6012 max = type->box_ids[i];
6018 void spr_uncore_cpu_init(void)
6020 uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
6021 UNCORE_SPR_MSR_EXTRA_UNCORES,
6024 spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
6027 int spr_uncore_pci_init(void)
6029 uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL);
6033 void spr_uncore_mmio_init(void)
6035 int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true);
6038 uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL);
6040 uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO,
6041 UNCORE_SPR_MMIO_EXTRA_UNCORES,
6044 spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_SPR_IMC) / 2;
6048 /* end of SPR uncore support */