1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
4 #include "uncore_discovery.h"
6 /* SNB-EP pci bus to socket mapping */
7 #define SNBEP_CPUNODEID 0x40
8 #define SNBEP_GIDNIDMAP 0x54
10 /* SNB-EP Box level control */
11 #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
12 #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
13 #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
14 #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
15 #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
16 SNBEP_PMON_BOX_CTL_RST_CTRS | \
17 SNBEP_PMON_BOX_CTL_FRZ_EN)
18 /* SNB-EP event control */
19 #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
20 #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
21 #define SNBEP_PMON_CTL_RST (1 << 17)
22 #define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
23 #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
24 #define SNBEP_PMON_CTL_EN (1 << 22)
25 #define SNBEP_PMON_CTL_INVERT (1 << 23)
26 #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
27 #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
28 SNBEP_PMON_CTL_UMASK_MASK | \
29 SNBEP_PMON_CTL_EDGE_DET | \
30 SNBEP_PMON_CTL_INVERT | \
31 SNBEP_PMON_CTL_TRESH_MASK)
33 /* SNB-EP Ubox event control */
34 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
35 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
36 (SNBEP_PMON_CTL_EV_SEL_MASK | \
37 SNBEP_PMON_CTL_UMASK_MASK | \
38 SNBEP_PMON_CTL_EDGE_DET | \
39 SNBEP_PMON_CTL_INVERT | \
40 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
42 #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
43 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
44 SNBEP_CBO_PMON_CTL_TID_EN)
46 /* SNB-EP PCU event control */
47 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
48 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
50 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
51 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
52 (SNBEP_PMON_CTL_EV_SEL_MASK | \
53 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
54 SNBEP_PMON_CTL_EDGE_DET | \
55 SNBEP_PMON_CTL_INVERT | \
56 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
57 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
58 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
60 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
61 (SNBEP_PMON_RAW_EVENT_MASK | \
62 SNBEP_PMON_CTL_EV_SEL_EXT)
64 /* SNB-EP pci control register */
65 #define SNBEP_PCI_PMON_BOX_CTL 0xf4
66 #define SNBEP_PCI_PMON_CTL0 0xd8
67 /* SNB-EP pci counter register */
68 #define SNBEP_PCI_PMON_CTR0 0xa0
70 /* SNB-EP home agent register */
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
72 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
73 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
74 /* SNB-EP memory controller register */
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
76 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
77 /* SNB-EP QPI register */
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
81 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
83 /* SNB-EP Ubox register */
84 #define SNBEP_U_MSR_PMON_CTR0 0xc16
85 #define SNBEP_U_MSR_PMON_CTL0 0xc10
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
88 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
90 /* SNB-EP Cbo register */
91 #define SNBEP_C0_MSR_PMON_CTR0 0xd16
92 #define SNBEP_C0_MSR_PMON_CTL0 0xd10
93 #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
94 #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
95 #define SNBEP_CBO_MSR_OFFSET 0x20
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
100 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
102 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
104 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
105 .config_mask = (m), \
109 /* SNB-EP PCU register */
110 #define SNBEP_PCU_MSR_PMON_CTR0 0xc36
111 #define SNBEP_PCU_MSR_PMON_CTL0 0xc30
112 #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
114 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
115 #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
116 #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
118 /* IVBEP event control */
119 #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
120 SNBEP_PMON_BOX_CTL_RST_CTRS)
121 #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
122 SNBEP_PMON_CTL_UMASK_MASK | \
123 SNBEP_PMON_CTL_EDGE_DET | \
124 SNBEP_PMON_CTL_TRESH_MASK)
126 #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00
127 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
128 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
130 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
131 (SNBEP_PMON_CTL_EV_SEL_MASK | \
132 SNBEP_PMON_CTL_UMASK_MASK | \
133 SNBEP_PMON_CTL_EDGE_DET | \
134 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
136 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \
137 SNBEP_CBO_PMON_CTL_TID_EN)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
146 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
148 /* IVBEP home agent */
149 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
150 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \
151 (IVBEP_PMON_RAW_EVENT_MASK | \
152 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
154 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
155 (SNBEP_PMON_CTL_EV_SEL_MASK | \
156 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
157 SNBEP_PMON_CTL_EDGE_DET | \
158 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
159 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
160 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
162 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
163 (IVBEP_PMON_RAW_EVENT_MASK | \
164 SNBEP_PMON_CTL_EV_SEL_EXT)
166 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
167 ((1ULL << (n)) - 1)))
169 /* Haswell-EP Ubox */
170 #define HSWEP_U_MSR_PMON_CTR0 0x709
171 #define HSWEP_U_MSR_PMON_CTL0 0x705
172 #define HSWEP_U_MSR_PMON_FILTER 0x707
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703
175 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1)
179 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
180 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
181 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
184 #define HSWEP_C0_MSR_PMON_CTR0 0xe08
185 #define HSWEP_C0_MSR_PMON_CTL0 0xe01
186 #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00
187 #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05
188 #define HSWEP_CBO_MSR_OFFSET 0x10
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
198 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
201 /* Haswell-EP Sbox */
202 #define HSWEP_S0_MSR_PMON_CTR0 0x726
203 #define HSWEP_S0_MSR_PMON_CTL0 0x721
204 #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720
205 #define HSWEP_SBOX_MSR_OFFSET 0xa
206 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
207 SNBEP_CBO_PMON_CTL_TID_EN)
210 #define HSWEP_PCU_MSR_PMON_CTR0 0x717
211 #define HSWEP_PCU_MSR_PMON_CTL0 0x711
212 #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710
213 #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715
216 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
217 (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
218 SNBEP_CBO_PMON_CTL_TID_EN)
220 #define KNL_CHA_MSR_OFFSET 0xc
221 #define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16)
222 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
223 (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
224 KNL_CHA_MSR_PMON_CTL_QOR)
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33)
230 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37)
232 /* KNL EDC/MC UCLK */
233 #define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400
234 #define KNL_UCLK_MSR_PMON_CTL0 0x420
235 #define KNL_UCLK_MSR_PMON_BOX_CTL 0x430
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c
237 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454
238 #define KNL_PMON_FIXED_CTL_EN 0x1
241 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00
242 #define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20
243 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c
245 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44
248 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00
249 #define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20
250 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c
252 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44
255 #define KNL_IRP_PCI_PMON_BOX_CTL 0xf0
256 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
257 KNL_CHA_MSR_PMON_CTL_QOR)
259 #define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f
260 #define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7)
261 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000
262 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
263 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
264 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
265 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
266 SNBEP_PMON_CTL_EDGE_DET | \
267 SNBEP_CBO_PMON_CTL_TID_EN | \
268 SNBEP_PMON_CTL_INVERT | \
269 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
270 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
271 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
273 /* SKX pci bus to socket mapping */
274 #define SKX_CPUNODEID 0xc0
275 #define SKX_GIDNIDMAP 0xd4
278 * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
279 * that BIOS programmed. MSR has package scope.
280 * | Bit | Default | Description
281 * | [63] | 00h | VALID - When set, indicates the CPU bus
282 * numbers have been initialized. (RO)
283 * |[62:48]| --- | Reserved
284 * |[47:40]| 00h | BUS_NUM_5 - Return the bus number BIOS assigned
286 * |[39:32]| 00h | BUS_NUM_4 - Return the bus number BIOS assigned
288 * |[31:24]| 00h | BUS_NUM_3 - Return the bus number BIOS assigned
290 * |[23:16]| 00h | BUS_NUM_2 - Return the bus number BIOS assigned
292 * |[15:8] | 00h | BUS_NUM_1 - Return the bus number BIOS assigned
294 * | [7:0] | 00h | BUS_NUM_0 - Return the bus number BIOS assigned
297 #define SKX_MSR_CPU_BUS_NUMBER 0x300
298 #define SKX_MSR_CPU_BUS_VALID_BIT (1ULL << 63)
299 #define BUS_NUM_STRIDE 8
302 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0)
303 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9)
304 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17)
305 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32)
306 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33)
307 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35)
308 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36)
309 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37)
310 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41)
311 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51)
312 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
313 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
314 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
317 #define SKX_IIO0_MSR_PMON_CTL0 0xa48
318 #define SKX_IIO0_MSR_PMON_CTR0 0xa41
319 #define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40
320 #define SKX_IIO_MSR_OFFSET 0x20
322 #define SKX_PMON_CTL_TRESH_MASK (0xff << 24)
323 #define SKX_PMON_CTL_TRESH_MASK_EXT (0xf)
324 #define SKX_PMON_CTL_CH_MASK (0xff << 4)
325 #define SKX_PMON_CTL_FC_MASK (0x7 << 12)
326 #define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
327 SNBEP_PMON_CTL_UMASK_MASK | \
328 SNBEP_PMON_CTL_EDGE_DET | \
329 SNBEP_PMON_CTL_INVERT | \
330 SKX_PMON_CTL_TRESH_MASK)
331 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
332 SKX_PMON_CTL_CH_MASK | \
333 SKX_PMON_CTL_FC_MASK)
336 #define SKX_IRP0_MSR_PMON_CTL0 0xa5b
337 #define SKX_IRP0_MSR_PMON_CTR0 0xa59
338 #define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58
339 #define SKX_IRP_MSR_OFFSET 0x20
342 #define SKX_UPI_PCI_PMON_CTL0 0x350
343 #define SKX_UPI_PCI_PMON_CTR0 0x318
344 #define SKX_UPI_PCI_PMON_BOX_CTL 0x378
345 #define SKX_UPI_CTL_UMASK_EXT 0xffefff
348 #define SKX_M2M_PCI_PMON_CTL0 0x228
349 #define SKX_M2M_PCI_PMON_CTR0 0x200
350 #define SKX_M2M_PCI_PMON_BOX_CTL 0x258
352 /* Memory Map registers device ID */
353 #define SNR_ICX_MESH2IIO_MMAP_DID 0x9a2
354 #define SNR_ICX_SAD_CONTROL_CFG 0x3f4
356 /* Getting I/O stack id in SAD_COTROL_CFG notation */
357 #define SAD_CONTROL_STACK_ID(data) (((data) >> 4) & 0x7)
360 #define SNR_U_MSR_PMON_CTR0 0x1f98
361 #define SNR_U_MSR_PMON_CTL0 0x1f91
362 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL 0x1f93
363 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR 0x1f94
366 #define SNR_CHA_RAW_EVENT_MASK_EXT 0x3ffffff
367 #define SNR_CHA_MSR_PMON_CTL0 0x1c01
368 #define SNR_CHA_MSR_PMON_CTR0 0x1c08
369 #define SNR_CHA_MSR_PMON_BOX_CTL 0x1c00
370 #define SNR_C0_MSR_PMON_BOX_FILTER0 0x1c05
374 #define SNR_IIO_MSR_PMON_CTL0 0x1e08
375 #define SNR_IIO_MSR_PMON_CTR0 0x1e01
376 #define SNR_IIO_MSR_PMON_BOX_CTL 0x1e00
377 #define SNR_IIO_MSR_OFFSET 0x10
378 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT 0x7ffff
381 #define SNR_IRP0_MSR_PMON_CTL0 0x1ea8
382 #define SNR_IRP0_MSR_PMON_CTR0 0x1ea1
383 #define SNR_IRP0_MSR_PMON_BOX_CTL 0x1ea0
384 #define SNR_IRP_MSR_OFFSET 0x10
387 #define SNR_M2PCIE_MSR_PMON_CTL0 0x1e58
388 #define SNR_M2PCIE_MSR_PMON_CTR0 0x1e51
389 #define SNR_M2PCIE_MSR_PMON_BOX_CTL 0x1e50
390 #define SNR_M2PCIE_MSR_OFFSET 0x10
393 #define SNR_PCU_MSR_PMON_CTL0 0x1ef1
394 #define SNR_PCU_MSR_PMON_CTR0 0x1ef8
395 #define SNR_PCU_MSR_PMON_BOX_CTL 0x1ef0
396 #define SNR_PCU_MSR_PMON_BOX_FILTER 0x1efc
399 #define SNR_M2M_PCI_PMON_CTL0 0x468
400 #define SNR_M2M_PCI_PMON_CTR0 0x440
401 #define SNR_M2M_PCI_PMON_BOX_CTL 0x438
402 #define SNR_M2M_PCI_PMON_UMASK_EXT 0xff
405 #define SNR_PCIE3_PCI_PMON_CTL0 0x508
406 #define SNR_PCIE3_PCI_PMON_CTR0 0x4e8
407 #define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e0
410 #define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54
411 #define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38
412 #define SNR_IMC_MMIO_PMON_CTL0 0x40
413 #define SNR_IMC_MMIO_PMON_CTR0 0x8
414 #define SNR_IMC_MMIO_PMON_BOX_CTL 0x22800
415 #define SNR_IMC_MMIO_OFFSET 0x4000
416 #define SNR_IMC_MMIO_SIZE 0x4000
417 #define SNR_IMC_MMIO_BASE_OFFSET 0xd0
418 #define SNR_IMC_MMIO_BASE_MASK 0x1FFFFFFF
419 #define SNR_IMC_MMIO_MEM0_OFFSET 0xd8
420 #define SNR_IMC_MMIO_MEM0_MASK 0x7FF
423 #define ICX_C34_MSR_PMON_CTR0 0xb68
424 #define ICX_C34_MSR_PMON_CTL0 0xb61
425 #define ICX_C34_MSR_PMON_BOX_CTL 0xb60
426 #define ICX_C34_MSR_PMON_BOX_FILTER0 0xb65
429 #define ICX_IIO_MSR_PMON_CTL0 0xa58
430 #define ICX_IIO_MSR_PMON_CTR0 0xa51
431 #define ICX_IIO_MSR_PMON_BOX_CTL 0xa50
434 #define ICX_IRP0_MSR_PMON_CTL0 0xa4d
435 #define ICX_IRP0_MSR_PMON_CTR0 0xa4b
436 #define ICX_IRP0_MSR_PMON_BOX_CTL 0xa4a
439 #define ICX_M2PCIE_MSR_PMON_CTL0 0xa46
440 #define ICX_M2PCIE_MSR_PMON_CTR0 0xa41
441 #define ICX_M2PCIE_MSR_PMON_BOX_CTL 0xa40
444 #define ICX_UPI_PCI_PMON_CTL0 0x350
445 #define ICX_UPI_PCI_PMON_CTR0 0x320
446 #define ICX_UPI_PCI_PMON_BOX_CTL 0x318
447 #define ICX_UPI_CTL_UMASK_EXT 0xffffff
450 #define ICX_M3UPI_PCI_PMON_CTL0 0xd8
451 #define ICX_M3UPI_PCI_PMON_CTR0 0xa8
452 #define ICX_M3UPI_PCI_PMON_BOX_CTL 0xa0
455 #define ICX_NUMBER_IMC_CHN 2
456 #define ICX_IMC_MEM_STRIDE 0x4
459 #define SPR_RAW_EVENT_MASK_EXT 0xffffff
462 #define SPR_CHA_PMON_CTL_TID_EN (1 << 16)
463 #define SPR_CHA_PMON_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
464 SPR_CHA_PMON_CTL_TID_EN)
465 #define SPR_CHA_PMON_BOX_FILTER_TID 0x3ff
467 #define SPR_C0_MSR_PMON_BOX_FILTER0 0x200e
469 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
470 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
471 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
472 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
473 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
474 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
475 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
476 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
477 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
478 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
479 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
480 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
481 DEFINE_UNCORE_FORMAT_ATTR(tid_en2, tid_en, "config:16");
482 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
483 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
484 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
485 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
486 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
487 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
488 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
489 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
490 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
491 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
492 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
493 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
494 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
495 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
496 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
497 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
498 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
499 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
500 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
501 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
502 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
503 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
504 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
505 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
506 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
507 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
508 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
509 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
510 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
511 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
512 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
513 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
514 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
515 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
516 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
517 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
518 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
519 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
520 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
521 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
522 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
523 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
524 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
525 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
526 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
527 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
528 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
529 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
530 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
531 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
532 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
533 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
534 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
535 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
536 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
537 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
538 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
539 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
540 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
541 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
542 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
543 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
544 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
545 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
546 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
547 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
549 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
551 struct pci_dev *pdev = box->pci_dev;
552 int box_ctl = uncore_pci_box_ctl(box);
555 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
556 config |= SNBEP_PMON_BOX_CTL_FRZ;
557 pci_write_config_dword(pdev, box_ctl, config);
561 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
563 struct pci_dev *pdev = box->pci_dev;
564 int box_ctl = uncore_pci_box_ctl(box);
567 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
568 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
569 pci_write_config_dword(pdev, box_ctl, config);
573 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
575 struct pci_dev *pdev = box->pci_dev;
576 struct hw_perf_event *hwc = &event->hw;
578 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
581 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
583 struct pci_dev *pdev = box->pci_dev;
584 struct hw_perf_event *hwc = &event->hw;
586 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
589 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
591 struct pci_dev *pdev = box->pci_dev;
592 struct hw_perf_event *hwc = &event->hw;
595 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
596 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
601 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
603 struct pci_dev *pdev = box->pci_dev;
604 int box_ctl = uncore_pci_box_ctl(box);
606 pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
609 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
614 msr = uncore_msr_box_ctl(box);
617 config |= SNBEP_PMON_BOX_CTL_FRZ;
622 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
627 msr = uncore_msr_box_ctl(box);
630 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
635 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
637 struct hw_perf_event *hwc = &event->hw;
638 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
640 if (reg1->idx != EXTRA_REG_NONE)
641 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
643 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
646 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
647 struct perf_event *event)
649 struct hw_perf_event *hwc = &event->hw;
651 wrmsrl(hwc->config_base, hwc->config);
654 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
656 unsigned msr = uncore_msr_box_ctl(box);
659 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
662 static struct attribute *snbep_uncore_formats_attr[] = {
663 &format_attr_event.attr,
664 &format_attr_umask.attr,
665 &format_attr_edge.attr,
666 &format_attr_inv.attr,
667 &format_attr_thresh8.attr,
671 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
672 &format_attr_event.attr,
673 &format_attr_umask.attr,
674 &format_attr_edge.attr,
675 &format_attr_inv.attr,
676 &format_attr_thresh5.attr,
680 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
681 &format_attr_event.attr,
682 &format_attr_umask.attr,
683 &format_attr_edge.attr,
684 &format_attr_tid_en.attr,
685 &format_attr_inv.attr,
686 &format_attr_thresh8.attr,
687 &format_attr_filter_tid.attr,
688 &format_attr_filter_nid.attr,
689 &format_attr_filter_state.attr,
690 &format_attr_filter_opc.attr,
694 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
695 &format_attr_event.attr,
696 &format_attr_occ_sel.attr,
697 &format_attr_edge.attr,
698 &format_attr_inv.attr,
699 &format_attr_thresh5.attr,
700 &format_attr_occ_invert.attr,
701 &format_attr_occ_edge.attr,
702 &format_attr_filter_band0.attr,
703 &format_attr_filter_band1.attr,
704 &format_attr_filter_band2.attr,
705 &format_attr_filter_band3.attr,
709 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
710 &format_attr_event_ext.attr,
711 &format_attr_umask.attr,
712 &format_attr_edge.attr,
713 &format_attr_inv.attr,
714 &format_attr_thresh8.attr,
715 &format_attr_match_rds.attr,
716 &format_attr_match_rnid30.attr,
717 &format_attr_match_rnid4.attr,
718 &format_attr_match_dnid.attr,
719 &format_attr_match_mc.attr,
720 &format_attr_match_opc.attr,
721 &format_attr_match_vnw.attr,
722 &format_attr_match0.attr,
723 &format_attr_match1.attr,
724 &format_attr_mask_rds.attr,
725 &format_attr_mask_rnid30.attr,
726 &format_attr_mask_rnid4.attr,
727 &format_attr_mask_dnid.attr,
728 &format_attr_mask_mc.attr,
729 &format_attr_mask_opc.attr,
730 &format_attr_mask_vnw.attr,
731 &format_attr_mask0.attr,
732 &format_attr_mask1.attr,
736 static struct uncore_event_desc snbep_uncore_imc_events[] = {
737 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
738 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
739 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
740 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
741 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
742 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
743 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
744 { /* end: all zeroes */ },
747 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
748 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
749 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
750 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
751 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
752 { /* end: all zeroes */ },
755 static const struct attribute_group snbep_uncore_format_group = {
757 .attrs = snbep_uncore_formats_attr,
760 static const struct attribute_group snbep_uncore_ubox_format_group = {
762 .attrs = snbep_uncore_ubox_formats_attr,
765 static const struct attribute_group snbep_uncore_cbox_format_group = {
767 .attrs = snbep_uncore_cbox_formats_attr,
770 static const struct attribute_group snbep_uncore_pcu_format_group = {
772 .attrs = snbep_uncore_pcu_formats_attr,
775 static const struct attribute_group snbep_uncore_qpi_format_group = {
777 .attrs = snbep_uncore_qpi_formats_attr,
780 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
781 .disable_box = snbep_uncore_msr_disable_box, \
782 .enable_box = snbep_uncore_msr_enable_box, \
783 .disable_event = snbep_uncore_msr_disable_event, \
784 .enable_event = snbep_uncore_msr_enable_event, \
785 .read_counter = uncore_msr_read_counter
787 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
788 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
789 .init_box = snbep_uncore_msr_init_box \
791 static struct intel_uncore_ops snbep_uncore_msr_ops = {
792 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
795 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
796 .init_box = snbep_uncore_pci_init_box, \
797 .disable_box = snbep_uncore_pci_disable_box, \
798 .enable_box = snbep_uncore_pci_enable_box, \
799 .disable_event = snbep_uncore_pci_disable_event, \
800 .read_counter = snbep_uncore_pci_read_counter
802 static struct intel_uncore_ops snbep_uncore_pci_ops = {
803 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
804 .enable_event = snbep_uncore_pci_enable_event, \
807 static struct event_constraint snbep_uncore_cbox_constraints[] = {
808 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
809 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
810 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
811 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
812 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
813 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
814 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
815 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
816 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
817 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
818 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
819 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
820 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
821 UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
822 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
823 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
824 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
825 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
826 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
827 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
828 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
829 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
830 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
831 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
832 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
833 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
837 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
838 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
839 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
840 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
841 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
842 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
843 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
844 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
845 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
846 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
847 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
851 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
852 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
853 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
854 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
855 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
856 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
857 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
858 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
859 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
860 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
861 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
862 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
863 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
864 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
865 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
866 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
867 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
868 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
869 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
870 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
871 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
872 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
873 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
874 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
875 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
876 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
877 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
878 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
879 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
883 static struct intel_uncore_type snbep_uncore_ubox = {
888 .fixed_ctr_bits = 48,
889 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
890 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
891 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
892 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
893 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
894 .ops = &snbep_uncore_msr_ops,
895 .format_group = &snbep_uncore_ubox_format_group,
898 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
899 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
900 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
901 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
902 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
903 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
904 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
905 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
906 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
907 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
908 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
909 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
910 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
911 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
912 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
913 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
914 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
915 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
916 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
917 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
918 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
919 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
920 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
921 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
922 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
923 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
927 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
929 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
930 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
933 if (uncore_box_is_fake(box))
936 for (i = 0; i < 5; i++) {
937 if (reg1->alloc & (0x1 << i))
938 atomic_sub(1 << (i * 6), &er->ref);
943 static struct event_constraint *
944 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
945 u64 (*cbox_filter_mask)(int fields))
947 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
948 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
953 if (reg1->idx == EXTRA_REG_NONE)
956 raw_spin_lock_irqsave(&er->lock, flags);
957 for (i = 0; i < 5; i++) {
958 if (!(reg1->idx & (0x1 << i)))
960 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
963 mask = cbox_filter_mask(0x1 << i);
964 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
965 !((reg1->config ^ er->config) & mask)) {
966 atomic_add(1 << (i * 6), &er->ref);
968 er->config |= reg1->config & mask;
974 raw_spin_unlock_irqrestore(&er->lock, flags);
978 if (!uncore_box_is_fake(box))
979 reg1->alloc |= alloc;
983 for (; i >= 0; i--) {
984 if (alloc & (0x1 << i))
985 atomic_sub(1 << (i * 6), &er->ref);
987 return &uncore_constraint_empty;
990 static u64 snbep_cbox_filter_mask(int fields)
995 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
997 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
999 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1001 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1006 static struct event_constraint *
1007 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1009 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
1012 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1014 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1015 struct extra_reg *er;
1018 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
1019 if (er->event != (event->hw.config & er->config_mask))
1025 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1026 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1027 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1033 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1034 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1035 .hw_config = snbep_cbox_hw_config,
1036 .get_constraint = snbep_cbox_get_constraint,
1037 .put_constraint = snbep_cbox_put_constraint,
1040 static struct intel_uncore_type snbep_uncore_cbox = {
1044 .perf_ctr_bits = 44,
1045 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1046 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1047 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1048 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1049 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1050 .num_shared_regs = 1,
1051 .constraints = snbep_uncore_cbox_constraints,
1052 .ops = &snbep_uncore_cbox_ops,
1053 .format_group = &snbep_uncore_cbox_format_group,
1056 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1058 struct hw_perf_event *hwc = &event->hw;
1059 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1060 u64 config = reg1->config;
1062 if (new_idx > reg1->idx)
1063 config <<= 8 * (new_idx - reg1->idx);
1065 config >>= 8 * (reg1->idx - new_idx);
1068 hwc->config += new_idx - reg1->idx;
1069 reg1->config = config;
1070 reg1->idx = new_idx;
1075 static struct event_constraint *
1076 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1078 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1079 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1080 unsigned long flags;
1081 int idx = reg1->idx;
1082 u64 mask, config1 = reg1->config;
1085 if (reg1->idx == EXTRA_REG_NONE ||
1086 (!uncore_box_is_fake(box) && reg1->alloc))
1089 mask = 0xffULL << (idx * 8);
1090 raw_spin_lock_irqsave(&er->lock, flags);
1091 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1092 !((config1 ^ er->config) & mask)) {
1093 atomic_add(1 << (idx * 8), &er->ref);
1094 er->config &= ~mask;
1095 er->config |= config1 & mask;
1098 raw_spin_unlock_irqrestore(&er->lock, flags);
1101 idx = (idx + 1) % 4;
1102 if (idx != reg1->idx) {
1103 config1 = snbep_pcu_alter_er(event, idx, false);
1106 return &uncore_constraint_empty;
1109 if (!uncore_box_is_fake(box)) {
1110 if (idx != reg1->idx)
1111 snbep_pcu_alter_er(event, idx, true);
1117 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1119 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1120 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1122 if (uncore_box_is_fake(box) || !reg1->alloc)
1125 atomic_sub(1 << (reg1->idx * 8), &er->ref);
1129 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1131 struct hw_perf_event *hwc = &event->hw;
1132 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1133 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1135 if (ev_sel >= 0xb && ev_sel <= 0xe) {
1136 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1137 reg1->idx = ev_sel - 0xb;
1138 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1143 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1144 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1145 .hw_config = snbep_pcu_hw_config,
1146 .get_constraint = snbep_pcu_get_constraint,
1147 .put_constraint = snbep_pcu_put_constraint,
1150 static struct intel_uncore_type snbep_uncore_pcu = {
1154 .perf_ctr_bits = 48,
1155 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1156 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1157 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1158 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1159 .num_shared_regs = 1,
1160 .ops = &snbep_uncore_pcu_ops,
1161 .format_group = &snbep_uncore_pcu_format_group,
1164 static struct intel_uncore_type *snbep_msr_uncores[] = {
1171 void snbep_uncore_cpu_init(void)
1173 if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1174 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1175 uncore_msr_uncores = snbep_msr_uncores;
1179 SNBEP_PCI_QPI_PORT0_FILTER,
1180 SNBEP_PCI_QPI_PORT1_FILTER,
1181 BDX_PCI_QPI_PORT2_FILTER,
1184 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1186 struct hw_perf_event *hwc = &event->hw;
1187 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1188 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1190 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1192 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1193 reg1->config = event->attr.config1;
1194 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1195 reg2->config = event->attr.config2;
1200 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1202 struct pci_dev *pdev = box->pci_dev;
1203 struct hw_perf_event *hwc = &event->hw;
1204 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1205 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1207 if (reg1->idx != EXTRA_REG_NONE) {
1208 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1209 int die = box->dieid;
1210 struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1213 pci_write_config_dword(filter_pdev, reg1->reg,
1215 pci_write_config_dword(filter_pdev, reg1->reg + 4,
1216 (u32)(reg1->config >> 32));
1217 pci_write_config_dword(filter_pdev, reg2->reg,
1219 pci_write_config_dword(filter_pdev, reg2->reg + 4,
1220 (u32)(reg2->config >> 32));
1224 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1227 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1228 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1229 .enable_event = snbep_qpi_enable_event,
1230 .hw_config = snbep_qpi_hw_config,
1231 .get_constraint = uncore_get_constraint,
1232 .put_constraint = uncore_put_constraint,
1235 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
1236 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1237 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1238 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
1239 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1240 .ops = &snbep_uncore_pci_ops, \
1241 .format_group = &snbep_uncore_format_group
1243 static struct intel_uncore_type snbep_uncore_ha = {
1247 .perf_ctr_bits = 48,
1248 SNBEP_UNCORE_PCI_COMMON_INIT(),
1251 static struct intel_uncore_type snbep_uncore_imc = {
1255 .perf_ctr_bits = 48,
1256 .fixed_ctr_bits = 48,
1257 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1258 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1259 .event_descs = snbep_uncore_imc_events,
1260 SNBEP_UNCORE_PCI_COMMON_INIT(),
1263 static struct intel_uncore_type snbep_uncore_qpi = {
1267 .perf_ctr_bits = 48,
1268 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1269 .event_ctl = SNBEP_PCI_PMON_CTL0,
1270 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1271 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1272 .num_shared_regs = 1,
1273 .ops = &snbep_uncore_qpi_ops,
1274 .event_descs = snbep_uncore_qpi_events,
1275 .format_group = &snbep_uncore_qpi_format_group,
1279 static struct intel_uncore_type snbep_uncore_r2pcie = {
1283 .perf_ctr_bits = 44,
1284 .constraints = snbep_uncore_r2pcie_constraints,
1285 SNBEP_UNCORE_PCI_COMMON_INIT(),
1288 static struct intel_uncore_type snbep_uncore_r3qpi = {
1292 .perf_ctr_bits = 44,
1293 .constraints = snbep_uncore_r3qpi_constraints,
1294 SNBEP_UNCORE_PCI_COMMON_INIT(),
1298 SNBEP_PCI_UNCORE_HA,
1299 SNBEP_PCI_UNCORE_IMC,
1300 SNBEP_PCI_UNCORE_QPI,
1301 SNBEP_PCI_UNCORE_R2PCIE,
1302 SNBEP_PCI_UNCORE_R3QPI,
1305 static struct intel_uncore_type *snbep_pci_uncores[] = {
1306 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
1307 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
1308 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
1309 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
1310 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
1314 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1316 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1317 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1319 { /* MC Channel 0 */
1320 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1321 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1323 { /* MC Channel 1 */
1324 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1325 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1327 { /* MC Channel 2 */
1328 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1329 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1331 { /* MC Channel 3 */
1332 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1333 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1336 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1337 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1340 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1341 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1344 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1345 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1347 { /* R3QPI Link 0 */
1348 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1349 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1351 { /* R3QPI Link 1 */
1352 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1353 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1355 { /* QPI Port 0 filter */
1356 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1357 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1358 SNBEP_PCI_QPI_PORT0_FILTER),
1360 { /* QPI Port 0 filter */
1361 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1362 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1363 SNBEP_PCI_QPI_PORT1_FILTER),
1365 { /* end: all zeroes */ }
1368 static struct pci_driver snbep_uncore_pci_driver = {
1369 .name = "snbep_uncore",
1370 .id_table = snbep_uncore_pci_ids,
1373 #define NODE_ID_MASK 0x7
1376 * build pci bus to socket mapping
1378 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1380 struct pci_dev *ubox_dev = NULL;
1381 int i, bus, nodeid, segment, die_id;
1382 struct pci2phy_map *map;
1387 /* find the UBOX device */
1388 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1391 bus = ubox_dev->bus->number;
1393 * The nodeid and idmap registers only contain enough
1394 * information to handle 8 nodes. On systems with more
1395 * than 8 nodes, we need to rely on NUMA information,
1396 * filled in from BIOS supplied information, to determine
1399 if (nr_node_ids <= 8) {
1400 /* get the Node ID of the local register */
1401 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1404 nodeid = config & NODE_ID_MASK;
1405 /* get the Node ID mapping */
1406 err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1410 segment = pci_domain_nr(ubox_dev->bus);
1411 raw_spin_lock(&pci2phy_map_lock);
1412 map = __find_pci2phy_map(segment);
1414 raw_spin_unlock(&pci2phy_map_lock);
1420 * every three bits in the Node ID mapping register maps
1421 * to a particular node.
1423 for (i = 0; i < 8; i++) {
1424 if (nodeid == ((config >> (3 * i)) & 0x7)) {
1425 if (topology_max_die_per_package() > 1)
1428 die_id = topology_phys_to_logical_pkg(i);
1429 map->pbus_to_dieid[bus] = die_id;
1433 raw_spin_unlock(&pci2phy_map_lock);
1435 int node = pcibus_to_node(ubox_dev->bus);
1438 segment = pci_domain_nr(ubox_dev->bus);
1439 raw_spin_lock(&pci2phy_map_lock);
1440 map = __find_pci2phy_map(segment);
1442 raw_spin_unlock(&pci2phy_map_lock);
1448 for_each_cpu(cpu, cpumask_of_pcibus(ubox_dev->bus)) {
1449 struct cpuinfo_x86 *c = &cpu_data(cpu);
1451 if (c->initialized && cpu_to_node(cpu) == node) {
1452 map->pbus_to_dieid[bus] = die_id = c->logical_die_id;
1456 raw_spin_unlock(&pci2phy_map_lock);
1458 if (WARN_ON_ONCE(die_id == -1)) {
1467 * For PCI bus with no UBOX device, find the next bus
1468 * that has UBOX device and use its mapping.
1470 raw_spin_lock(&pci2phy_map_lock);
1471 list_for_each_entry(map, &pci2phy_map_head, list) {
1474 for (bus = 255; bus >= 0; bus--) {
1475 if (map->pbus_to_dieid[bus] >= 0)
1476 i = map->pbus_to_dieid[bus];
1478 map->pbus_to_dieid[bus] = i;
1481 for (bus = 0; bus <= 255; bus++) {
1482 if (map->pbus_to_dieid[bus] >= 0)
1483 i = map->pbus_to_dieid[bus];
1485 map->pbus_to_dieid[bus] = i;
1489 raw_spin_unlock(&pci2phy_map_lock);
1492 pci_dev_put(ubox_dev);
1494 return err ? pcibios_err_to_errno(err) : 0;
1497 int snbep_uncore_pci_init(void)
1499 int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1502 uncore_pci_uncores = snbep_pci_uncores;
1503 uncore_pci_driver = &snbep_uncore_pci_driver;
1506 /* end of Sandy Bridge-EP uncore support */
1508 /* IvyTown uncore support */
1509 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1511 unsigned msr = uncore_msr_box_ctl(box);
1513 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1516 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1518 struct pci_dev *pdev = box->pci_dev;
1520 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1523 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
1524 .init_box = ivbep_uncore_msr_init_box, \
1525 .disable_box = snbep_uncore_msr_disable_box, \
1526 .enable_box = snbep_uncore_msr_enable_box, \
1527 .disable_event = snbep_uncore_msr_disable_event, \
1528 .enable_event = snbep_uncore_msr_enable_event, \
1529 .read_counter = uncore_msr_read_counter
1531 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1532 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1535 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1536 .init_box = ivbep_uncore_pci_init_box,
1537 .disable_box = snbep_uncore_pci_disable_box,
1538 .enable_box = snbep_uncore_pci_enable_box,
1539 .disable_event = snbep_uncore_pci_disable_event,
1540 .enable_event = snbep_uncore_pci_enable_event,
1541 .read_counter = snbep_uncore_pci_read_counter,
1544 #define IVBEP_UNCORE_PCI_COMMON_INIT() \
1545 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1546 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1547 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \
1548 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1549 .ops = &ivbep_uncore_pci_ops, \
1550 .format_group = &ivbep_uncore_format_group
1552 static struct attribute *ivbep_uncore_formats_attr[] = {
1553 &format_attr_event.attr,
1554 &format_attr_umask.attr,
1555 &format_attr_edge.attr,
1556 &format_attr_inv.attr,
1557 &format_attr_thresh8.attr,
1561 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1562 &format_attr_event.attr,
1563 &format_attr_umask.attr,
1564 &format_attr_edge.attr,
1565 &format_attr_inv.attr,
1566 &format_attr_thresh5.attr,
1570 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1571 &format_attr_event.attr,
1572 &format_attr_umask.attr,
1573 &format_attr_edge.attr,
1574 &format_attr_tid_en.attr,
1575 &format_attr_thresh8.attr,
1576 &format_attr_filter_tid.attr,
1577 &format_attr_filter_link.attr,
1578 &format_attr_filter_state2.attr,
1579 &format_attr_filter_nid2.attr,
1580 &format_attr_filter_opc2.attr,
1581 &format_attr_filter_nc.attr,
1582 &format_attr_filter_c6.attr,
1583 &format_attr_filter_isoc.attr,
1587 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1588 &format_attr_event.attr,
1589 &format_attr_occ_sel.attr,
1590 &format_attr_edge.attr,
1591 &format_attr_thresh5.attr,
1592 &format_attr_occ_invert.attr,
1593 &format_attr_occ_edge.attr,
1594 &format_attr_filter_band0.attr,
1595 &format_attr_filter_band1.attr,
1596 &format_attr_filter_band2.attr,
1597 &format_attr_filter_band3.attr,
1601 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1602 &format_attr_event_ext.attr,
1603 &format_attr_umask.attr,
1604 &format_attr_edge.attr,
1605 &format_attr_thresh8.attr,
1606 &format_attr_match_rds.attr,
1607 &format_attr_match_rnid30.attr,
1608 &format_attr_match_rnid4.attr,
1609 &format_attr_match_dnid.attr,
1610 &format_attr_match_mc.attr,
1611 &format_attr_match_opc.attr,
1612 &format_attr_match_vnw.attr,
1613 &format_attr_match0.attr,
1614 &format_attr_match1.attr,
1615 &format_attr_mask_rds.attr,
1616 &format_attr_mask_rnid30.attr,
1617 &format_attr_mask_rnid4.attr,
1618 &format_attr_mask_dnid.attr,
1619 &format_attr_mask_mc.attr,
1620 &format_attr_mask_opc.attr,
1621 &format_attr_mask_vnw.attr,
1622 &format_attr_mask0.attr,
1623 &format_attr_mask1.attr,
1627 static const struct attribute_group ivbep_uncore_format_group = {
1629 .attrs = ivbep_uncore_formats_attr,
1632 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1634 .attrs = ivbep_uncore_ubox_formats_attr,
1637 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1639 .attrs = ivbep_uncore_cbox_formats_attr,
1642 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1644 .attrs = ivbep_uncore_pcu_formats_attr,
1647 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1649 .attrs = ivbep_uncore_qpi_formats_attr,
1652 static struct intel_uncore_type ivbep_uncore_ubox = {
1656 .perf_ctr_bits = 44,
1657 .fixed_ctr_bits = 48,
1658 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1659 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1660 .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1661 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1662 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1663 .ops = &ivbep_uncore_msr_ops,
1664 .format_group = &ivbep_uncore_ubox_format_group,
1667 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1668 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1669 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1670 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1671 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1672 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1673 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1674 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1675 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1676 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1677 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1678 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1679 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1680 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1681 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1682 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1683 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1684 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1685 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1686 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1687 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1688 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1689 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1690 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1691 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1692 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1693 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1694 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1695 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1696 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1697 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1698 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1699 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1700 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1701 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1702 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1703 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1704 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1708 static u64 ivbep_cbox_filter_mask(int fields)
1713 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1715 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1717 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1719 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1720 if (fields & 0x10) {
1721 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1722 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1723 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1724 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1730 static struct event_constraint *
1731 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1733 return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1736 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1738 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1739 struct extra_reg *er;
1742 for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1743 if (er->event != (event->hw.config & er->config_mask))
1749 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1750 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1751 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1757 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1759 struct hw_perf_event *hwc = &event->hw;
1760 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1762 if (reg1->idx != EXTRA_REG_NONE) {
1763 u64 filter = uncore_shared_reg_config(box, 0);
1764 wrmsrl(reg1->reg, filter & 0xffffffff);
1765 wrmsrl(reg1->reg + 6, filter >> 32);
1768 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1771 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1772 .init_box = ivbep_uncore_msr_init_box,
1773 .disable_box = snbep_uncore_msr_disable_box,
1774 .enable_box = snbep_uncore_msr_enable_box,
1775 .disable_event = snbep_uncore_msr_disable_event,
1776 .enable_event = ivbep_cbox_enable_event,
1777 .read_counter = uncore_msr_read_counter,
1778 .hw_config = ivbep_cbox_hw_config,
1779 .get_constraint = ivbep_cbox_get_constraint,
1780 .put_constraint = snbep_cbox_put_constraint,
1783 static struct intel_uncore_type ivbep_uncore_cbox = {
1787 .perf_ctr_bits = 44,
1788 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1789 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1790 .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1791 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1792 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1793 .num_shared_regs = 1,
1794 .constraints = snbep_uncore_cbox_constraints,
1795 .ops = &ivbep_uncore_cbox_ops,
1796 .format_group = &ivbep_uncore_cbox_format_group,
1799 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1800 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1801 .hw_config = snbep_pcu_hw_config,
1802 .get_constraint = snbep_pcu_get_constraint,
1803 .put_constraint = snbep_pcu_put_constraint,
1806 static struct intel_uncore_type ivbep_uncore_pcu = {
1810 .perf_ctr_bits = 48,
1811 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1812 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1813 .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1814 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1815 .num_shared_regs = 1,
1816 .ops = &ivbep_uncore_pcu_ops,
1817 .format_group = &ivbep_uncore_pcu_format_group,
1820 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1827 void ivbep_uncore_cpu_init(void)
1829 if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1830 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1831 uncore_msr_uncores = ivbep_msr_uncores;
1834 static struct intel_uncore_type ivbep_uncore_ha = {
1838 .perf_ctr_bits = 48,
1839 IVBEP_UNCORE_PCI_COMMON_INIT(),
1842 static struct intel_uncore_type ivbep_uncore_imc = {
1846 .perf_ctr_bits = 48,
1847 .fixed_ctr_bits = 48,
1848 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1849 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1850 .event_descs = snbep_uncore_imc_events,
1851 IVBEP_UNCORE_PCI_COMMON_INIT(),
1854 /* registers in IRP boxes are not properly aligned */
1855 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1856 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1858 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1860 struct pci_dev *pdev = box->pci_dev;
1861 struct hw_perf_event *hwc = &event->hw;
1863 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1864 hwc->config | SNBEP_PMON_CTL_EN);
1867 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1869 struct pci_dev *pdev = box->pci_dev;
1870 struct hw_perf_event *hwc = &event->hw;
1872 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1875 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1877 struct pci_dev *pdev = box->pci_dev;
1878 struct hw_perf_event *hwc = &event->hw;
1881 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1882 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1887 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1888 .init_box = ivbep_uncore_pci_init_box,
1889 .disable_box = snbep_uncore_pci_disable_box,
1890 .enable_box = snbep_uncore_pci_enable_box,
1891 .disable_event = ivbep_uncore_irp_disable_event,
1892 .enable_event = ivbep_uncore_irp_enable_event,
1893 .read_counter = ivbep_uncore_irp_read_counter,
1896 static struct intel_uncore_type ivbep_uncore_irp = {
1900 .perf_ctr_bits = 48,
1901 .event_mask = IVBEP_PMON_RAW_EVENT_MASK,
1902 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1903 .ops = &ivbep_uncore_irp_ops,
1904 .format_group = &ivbep_uncore_format_group,
1907 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1908 .init_box = ivbep_uncore_pci_init_box,
1909 .disable_box = snbep_uncore_pci_disable_box,
1910 .enable_box = snbep_uncore_pci_enable_box,
1911 .disable_event = snbep_uncore_pci_disable_event,
1912 .enable_event = snbep_qpi_enable_event,
1913 .read_counter = snbep_uncore_pci_read_counter,
1914 .hw_config = snbep_qpi_hw_config,
1915 .get_constraint = uncore_get_constraint,
1916 .put_constraint = uncore_put_constraint,
1919 static struct intel_uncore_type ivbep_uncore_qpi = {
1923 .perf_ctr_bits = 48,
1924 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1925 .event_ctl = SNBEP_PCI_PMON_CTL0,
1926 .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1927 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1928 .num_shared_regs = 1,
1929 .ops = &ivbep_uncore_qpi_ops,
1930 .format_group = &ivbep_uncore_qpi_format_group,
1933 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1937 .perf_ctr_bits = 44,
1938 .constraints = snbep_uncore_r2pcie_constraints,
1939 IVBEP_UNCORE_PCI_COMMON_INIT(),
1942 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1946 .perf_ctr_bits = 44,
1947 .constraints = snbep_uncore_r3qpi_constraints,
1948 IVBEP_UNCORE_PCI_COMMON_INIT(),
1952 IVBEP_PCI_UNCORE_HA,
1953 IVBEP_PCI_UNCORE_IMC,
1954 IVBEP_PCI_UNCORE_IRP,
1955 IVBEP_PCI_UNCORE_QPI,
1956 IVBEP_PCI_UNCORE_R2PCIE,
1957 IVBEP_PCI_UNCORE_R3QPI,
1960 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1961 [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha,
1962 [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc,
1963 [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp,
1964 [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi,
1965 [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie,
1966 [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi,
1970 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1971 { /* Home Agent 0 */
1972 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1973 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1975 { /* Home Agent 1 */
1976 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1977 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1979 { /* MC0 Channel 0 */
1980 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1981 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1983 { /* MC0 Channel 1 */
1984 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1985 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1987 { /* MC0 Channel 3 */
1988 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1989 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1991 { /* MC0 Channel 4 */
1992 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1993 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1995 { /* MC1 Channel 0 */
1996 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1997 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1999 { /* MC1 Channel 1 */
2000 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
2001 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
2003 { /* MC1 Channel 3 */
2004 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
2005 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
2007 { /* MC1 Channel 4 */
2008 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
2009 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
2012 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
2013 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
2016 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
2017 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
2020 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
2021 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
2024 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
2025 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
2028 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
2029 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
2031 { /* R3QPI0 Link 0 */
2032 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
2033 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
2035 { /* R3QPI0 Link 1 */
2036 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
2037 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
2039 { /* R3QPI1 Link 2 */
2040 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
2041 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
2043 { /* QPI Port 0 filter */
2044 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
2045 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2046 SNBEP_PCI_QPI_PORT0_FILTER),
2048 { /* QPI Port 0 filter */
2049 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
2050 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2051 SNBEP_PCI_QPI_PORT1_FILTER),
2053 { /* end: all zeroes */ }
2056 static struct pci_driver ivbep_uncore_pci_driver = {
2057 .name = "ivbep_uncore",
2058 .id_table = ivbep_uncore_pci_ids,
2061 int ivbep_uncore_pci_init(void)
2063 int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2066 uncore_pci_uncores = ivbep_pci_uncores;
2067 uncore_pci_driver = &ivbep_uncore_pci_driver;
2070 /* end of IvyTown uncore support */
2072 /* KNL uncore support */
2073 static struct attribute *knl_uncore_ubox_formats_attr[] = {
2074 &format_attr_event.attr,
2075 &format_attr_umask.attr,
2076 &format_attr_edge.attr,
2077 &format_attr_tid_en.attr,
2078 &format_attr_inv.attr,
2079 &format_attr_thresh5.attr,
2083 static const struct attribute_group knl_uncore_ubox_format_group = {
2085 .attrs = knl_uncore_ubox_formats_attr,
2088 static struct intel_uncore_type knl_uncore_ubox = {
2092 .perf_ctr_bits = 48,
2093 .fixed_ctr_bits = 48,
2094 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2095 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2096 .event_mask = KNL_U_MSR_PMON_RAW_EVENT_MASK,
2097 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2098 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2099 .ops = &snbep_uncore_msr_ops,
2100 .format_group = &knl_uncore_ubox_format_group,
2103 static struct attribute *knl_uncore_cha_formats_attr[] = {
2104 &format_attr_event.attr,
2105 &format_attr_umask.attr,
2106 &format_attr_qor.attr,
2107 &format_attr_edge.attr,
2108 &format_attr_tid_en.attr,
2109 &format_attr_inv.attr,
2110 &format_attr_thresh8.attr,
2111 &format_attr_filter_tid4.attr,
2112 &format_attr_filter_link3.attr,
2113 &format_attr_filter_state4.attr,
2114 &format_attr_filter_local.attr,
2115 &format_attr_filter_all_op.attr,
2116 &format_attr_filter_nnm.attr,
2117 &format_attr_filter_opc3.attr,
2118 &format_attr_filter_nc.attr,
2119 &format_attr_filter_isoc.attr,
2123 static const struct attribute_group knl_uncore_cha_format_group = {
2125 .attrs = knl_uncore_cha_formats_attr,
2128 static struct event_constraint knl_uncore_cha_constraints[] = {
2129 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2130 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2131 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2132 EVENT_CONSTRAINT_END
2135 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2136 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2137 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2138 SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2139 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2140 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2144 static u64 knl_cha_filter_mask(int fields)
2149 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2151 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2153 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2157 static struct event_constraint *
2158 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2160 return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2163 static int knl_cha_hw_config(struct intel_uncore_box *box,
2164 struct perf_event *event)
2166 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2167 struct extra_reg *er;
2170 for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2171 if (er->event != (event->hw.config & er->config_mask))
2177 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2178 KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2179 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2181 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2182 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2183 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2189 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2190 struct perf_event *event);
2192 static struct intel_uncore_ops knl_uncore_cha_ops = {
2193 .init_box = snbep_uncore_msr_init_box,
2194 .disable_box = snbep_uncore_msr_disable_box,
2195 .enable_box = snbep_uncore_msr_enable_box,
2196 .disable_event = snbep_uncore_msr_disable_event,
2197 .enable_event = hswep_cbox_enable_event,
2198 .read_counter = uncore_msr_read_counter,
2199 .hw_config = knl_cha_hw_config,
2200 .get_constraint = knl_cha_get_constraint,
2201 .put_constraint = snbep_cbox_put_constraint,
2204 static struct intel_uncore_type knl_uncore_cha = {
2208 .perf_ctr_bits = 48,
2209 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2210 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2211 .event_mask = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2212 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2213 .msr_offset = KNL_CHA_MSR_OFFSET,
2214 .num_shared_regs = 1,
2215 .constraints = knl_uncore_cha_constraints,
2216 .ops = &knl_uncore_cha_ops,
2217 .format_group = &knl_uncore_cha_format_group,
2220 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2221 &format_attr_event2.attr,
2222 &format_attr_use_occ_ctr.attr,
2223 &format_attr_occ_sel.attr,
2224 &format_attr_edge.attr,
2225 &format_attr_tid_en.attr,
2226 &format_attr_inv.attr,
2227 &format_attr_thresh6.attr,
2228 &format_attr_occ_invert.attr,
2229 &format_attr_occ_edge_det.attr,
2233 static const struct attribute_group knl_uncore_pcu_format_group = {
2235 .attrs = knl_uncore_pcu_formats_attr,
2238 static struct intel_uncore_type knl_uncore_pcu = {
2242 .perf_ctr_bits = 48,
2243 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2244 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2245 .event_mask = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2246 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2247 .ops = &snbep_uncore_msr_ops,
2248 .format_group = &knl_uncore_pcu_format_group,
2251 static struct intel_uncore_type *knl_msr_uncores[] = {
2258 void knl_uncore_cpu_init(void)
2260 uncore_msr_uncores = knl_msr_uncores;
2263 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2265 struct pci_dev *pdev = box->pci_dev;
2266 int box_ctl = uncore_pci_box_ctl(box);
2268 pci_write_config_dword(pdev, box_ctl, 0);
2271 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2272 struct perf_event *event)
2274 struct pci_dev *pdev = box->pci_dev;
2275 struct hw_perf_event *hwc = &event->hw;
2277 if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2278 == UNCORE_FIXED_EVENT)
2279 pci_write_config_dword(pdev, hwc->config_base,
2280 hwc->config | KNL_PMON_FIXED_CTL_EN);
2282 pci_write_config_dword(pdev, hwc->config_base,
2283 hwc->config | SNBEP_PMON_CTL_EN);
2286 static struct intel_uncore_ops knl_uncore_imc_ops = {
2287 .init_box = snbep_uncore_pci_init_box,
2288 .disable_box = snbep_uncore_pci_disable_box,
2289 .enable_box = knl_uncore_imc_enable_box,
2290 .read_counter = snbep_uncore_pci_read_counter,
2291 .enable_event = knl_uncore_imc_enable_event,
2292 .disable_event = snbep_uncore_pci_disable_event,
2295 static struct intel_uncore_type knl_uncore_imc_uclk = {
2299 .perf_ctr_bits = 48,
2300 .fixed_ctr_bits = 48,
2301 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2302 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2303 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2304 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2305 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2306 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2307 .ops = &knl_uncore_imc_ops,
2308 .format_group = &snbep_uncore_format_group,
2311 static struct intel_uncore_type knl_uncore_imc_dclk = {
2315 .perf_ctr_bits = 48,
2316 .fixed_ctr_bits = 48,
2317 .perf_ctr = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2318 .event_ctl = KNL_MC0_CH0_MSR_PMON_CTL0,
2319 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2320 .fixed_ctr = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2321 .fixed_ctl = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2322 .box_ctl = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2323 .ops = &knl_uncore_imc_ops,
2324 .format_group = &snbep_uncore_format_group,
2327 static struct intel_uncore_type knl_uncore_edc_uclk = {
2331 .perf_ctr_bits = 48,
2332 .fixed_ctr_bits = 48,
2333 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2334 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2335 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2336 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2337 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2338 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2339 .ops = &knl_uncore_imc_ops,
2340 .format_group = &snbep_uncore_format_group,
2343 static struct intel_uncore_type knl_uncore_edc_eclk = {
2347 .perf_ctr_bits = 48,
2348 .fixed_ctr_bits = 48,
2349 .perf_ctr = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2350 .event_ctl = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2351 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2352 .fixed_ctr = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2353 .fixed_ctl = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2354 .box_ctl = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2355 .ops = &knl_uncore_imc_ops,
2356 .format_group = &snbep_uncore_format_group,
2359 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2360 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2361 EVENT_CONSTRAINT_END
2364 static struct intel_uncore_type knl_uncore_m2pcie = {
2368 .perf_ctr_bits = 48,
2369 .constraints = knl_uncore_m2pcie_constraints,
2370 SNBEP_UNCORE_PCI_COMMON_INIT(),
2373 static struct attribute *knl_uncore_irp_formats_attr[] = {
2374 &format_attr_event.attr,
2375 &format_attr_umask.attr,
2376 &format_attr_qor.attr,
2377 &format_attr_edge.attr,
2378 &format_attr_inv.attr,
2379 &format_attr_thresh8.attr,
2383 static const struct attribute_group knl_uncore_irp_format_group = {
2385 .attrs = knl_uncore_irp_formats_attr,
2388 static struct intel_uncore_type knl_uncore_irp = {
2392 .perf_ctr_bits = 48,
2393 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2394 .event_ctl = SNBEP_PCI_PMON_CTL0,
2395 .event_mask = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2396 .box_ctl = KNL_IRP_PCI_PMON_BOX_CTL,
2397 .ops = &snbep_uncore_pci_ops,
2398 .format_group = &knl_uncore_irp_format_group,
2402 KNL_PCI_UNCORE_MC_UCLK,
2403 KNL_PCI_UNCORE_MC_DCLK,
2404 KNL_PCI_UNCORE_EDC_UCLK,
2405 KNL_PCI_UNCORE_EDC_ECLK,
2406 KNL_PCI_UNCORE_M2PCIE,
2410 static struct intel_uncore_type *knl_pci_uncores[] = {
2411 [KNL_PCI_UNCORE_MC_UCLK] = &knl_uncore_imc_uclk,
2412 [KNL_PCI_UNCORE_MC_DCLK] = &knl_uncore_imc_dclk,
2413 [KNL_PCI_UNCORE_EDC_UCLK] = &knl_uncore_edc_uclk,
2414 [KNL_PCI_UNCORE_EDC_ECLK] = &knl_uncore_edc_eclk,
2415 [KNL_PCI_UNCORE_M2PCIE] = &knl_uncore_m2pcie,
2416 [KNL_PCI_UNCORE_IRP] = &knl_uncore_irp,
2421 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2422 * device type. prior to KNL, each instance of a PMU device type had a unique
2425 * PCI Device ID Uncore PMU Devices
2426 * ----------------------------------
2427 * 0x7841 MC0 UClk, MC1 UClk
2428 * 0x7843 MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2429 * MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2430 * 0x7833 EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2431 * EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2432 * 0x7835 EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2433 * EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2438 static const struct pci_device_id knl_uncore_pci_ids[] = {
2440 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2441 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2444 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2445 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2447 { /* MC0 DClk CH 0 */
2448 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2449 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2451 { /* MC0 DClk CH 1 */
2452 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2453 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2455 { /* MC0 DClk CH 2 */
2456 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2457 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2459 { /* MC1 DClk CH 0 */
2460 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2461 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2463 { /* MC1 DClk CH 1 */
2464 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2465 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2467 { /* MC1 DClk CH 2 */
2468 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2469 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2472 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2473 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2476 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2477 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2480 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2481 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2484 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2485 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2488 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2489 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2492 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2493 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2496 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2497 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2500 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2501 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2504 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2505 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2508 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2509 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2512 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2513 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2516 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2517 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2520 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2521 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2524 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2525 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2528 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2529 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2532 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2533 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2536 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2537 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2540 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2541 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2543 { /* end: all zeroes */ }
2546 static struct pci_driver knl_uncore_pci_driver = {
2547 .name = "knl_uncore",
2548 .id_table = knl_uncore_pci_ids,
2551 int knl_uncore_pci_init(void)
2555 /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2556 ret = snb_pci2phy_map_init(0x7814); /* IRP */
2559 ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2562 uncore_pci_uncores = knl_pci_uncores;
2563 uncore_pci_driver = &knl_uncore_pci_driver;
2567 /* end of KNL uncore support */
2569 /* Haswell-EP uncore support */
2570 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2571 &format_attr_event.attr,
2572 &format_attr_umask.attr,
2573 &format_attr_edge.attr,
2574 &format_attr_inv.attr,
2575 &format_attr_thresh5.attr,
2576 &format_attr_filter_tid2.attr,
2577 &format_attr_filter_cid.attr,
2581 static const struct attribute_group hswep_uncore_ubox_format_group = {
2583 .attrs = hswep_uncore_ubox_formats_attr,
2586 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2588 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2589 reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2590 reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2595 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2596 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2597 .hw_config = hswep_ubox_hw_config,
2598 .get_constraint = uncore_get_constraint,
2599 .put_constraint = uncore_put_constraint,
2602 static struct intel_uncore_type hswep_uncore_ubox = {
2606 .perf_ctr_bits = 44,
2607 .fixed_ctr_bits = 48,
2608 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2609 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2610 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2611 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2612 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2613 .num_shared_regs = 1,
2614 .ops = &hswep_uncore_ubox_ops,
2615 .format_group = &hswep_uncore_ubox_format_group,
2618 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2619 &format_attr_event.attr,
2620 &format_attr_umask.attr,
2621 &format_attr_edge.attr,
2622 &format_attr_tid_en.attr,
2623 &format_attr_thresh8.attr,
2624 &format_attr_filter_tid3.attr,
2625 &format_attr_filter_link2.attr,
2626 &format_attr_filter_state3.attr,
2627 &format_attr_filter_nid2.attr,
2628 &format_attr_filter_opc2.attr,
2629 &format_attr_filter_nc.attr,
2630 &format_attr_filter_c6.attr,
2631 &format_attr_filter_isoc.attr,
2635 static const struct attribute_group hswep_uncore_cbox_format_group = {
2637 .attrs = hswep_uncore_cbox_formats_attr,
2640 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2641 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2642 UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2643 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2644 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2645 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2646 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2647 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2648 EVENT_CONSTRAINT_END
2651 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2652 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2653 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2654 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2655 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2656 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2657 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2658 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2659 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2660 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2661 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2662 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2663 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2664 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2665 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2666 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2667 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2668 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2669 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2670 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2671 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2672 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2673 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2674 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2675 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2676 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2677 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2678 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2679 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2680 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2681 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2682 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2683 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2684 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2685 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2686 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2687 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2688 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2689 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2693 static u64 hswep_cbox_filter_mask(int fields)
2697 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2699 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2701 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2703 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2704 if (fields & 0x10) {
2705 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2706 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2707 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2708 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2713 static struct event_constraint *
2714 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2716 return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2719 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2721 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2722 struct extra_reg *er;
2725 for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2726 if (er->event != (event->hw.config & er->config_mask))
2732 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2733 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2734 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2740 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2741 struct perf_event *event)
2743 struct hw_perf_event *hwc = &event->hw;
2744 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2746 if (reg1->idx != EXTRA_REG_NONE) {
2747 u64 filter = uncore_shared_reg_config(box, 0);
2748 wrmsrl(reg1->reg, filter & 0xffffffff);
2749 wrmsrl(reg1->reg + 1, filter >> 32);
2752 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2755 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2756 .init_box = snbep_uncore_msr_init_box,
2757 .disable_box = snbep_uncore_msr_disable_box,
2758 .enable_box = snbep_uncore_msr_enable_box,
2759 .disable_event = snbep_uncore_msr_disable_event,
2760 .enable_event = hswep_cbox_enable_event,
2761 .read_counter = uncore_msr_read_counter,
2762 .hw_config = hswep_cbox_hw_config,
2763 .get_constraint = hswep_cbox_get_constraint,
2764 .put_constraint = snbep_cbox_put_constraint,
2767 static struct intel_uncore_type hswep_uncore_cbox = {
2771 .perf_ctr_bits = 48,
2772 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2773 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2774 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2775 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2776 .msr_offset = HSWEP_CBO_MSR_OFFSET,
2777 .num_shared_regs = 1,
2778 .constraints = hswep_uncore_cbox_constraints,
2779 .ops = &hswep_uncore_cbox_ops,
2780 .format_group = &hswep_uncore_cbox_format_group,
2784 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2786 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2788 unsigned msr = uncore_msr_box_ctl(box);
2791 u64 init = SNBEP_PMON_BOX_CTL_INT;
2795 for_each_set_bit(i, (unsigned long *)&init, 64) {
2796 flags |= (1ULL << i);
2802 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2803 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2804 .init_box = hswep_uncore_sbox_msr_init_box
2807 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2808 &format_attr_event.attr,
2809 &format_attr_umask.attr,
2810 &format_attr_edge.attr,
2811 &format_attr_tid_en.attr,
2812 &format_attr_inv.attr,
2813 &format_attr_thresh8.attr,
2817 static const struct attribute_group hswep_uncore_sbox_format_group = {
2819 .attrs = hswep_uncore_sbox_formats_attr,
2822 static struct intel_uncore_type hswep_uncore_sbox = {
2826 .perf_ctr_bits = 44,
2827 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
2828 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
2829 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2830 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
2831 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
2832 .ops = &hswep_uncore_sbox_msr_ops,
2833 .format_group = &hswep_uncore_sbox_format_group,
2836 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2838 struct hw_perf_event *hwc = &event->hw;
2839 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2840 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2842 if (ev_sel >= 0xb && ev_sel <= 0xe) {
2843 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2844 reg1->idx = ev_sel - 0xb;
2845 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2850 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2851 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2852 .hw_config = hswep_pcu_hw_config,
2853 .get_constraint = snbep_pcu_get_constraint,
2854 .put_constraint = snbep_pcu_put_constraint,
2857 static struct intel_uncore_type hswep_uncore_pcu = {
2861 .perf_ctr_bits = 48,
2862 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2863 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2864 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2865 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2866 .num_shared_regs = 1,
2867 .ops = &hswep_uncore_pcu_ops,
2868 .format_group = &snbep_uncore_pcu_format_group,
2871 static struct intel_uncore_type *hswep_msr_uncores[] = {
2879 #define HSWEP_PCU_DID 0x2fc0
2880 #define HSWEP_PCU_CAPID4_OFFET 0x94
2881 #define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3)
2883 static bool hswep_has_limit_sbox(unsigned int device)
2885 struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2891 pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2892 if (!hswep_get_chop(capid4))
2898 void hswep_uncore_cpu_init(void)
2900 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2901 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2903 /* Detect 6-8 core systems with only two SBOXes */
2904 if (hswep_has_limit_sbox(HSWEP_PCU_DID))
2905 hswep_uncore_sbox.num_boxes = 2;
2907 uncore_msr_uncores = hswep_msr_uncores;
2910 static struct intel_uncore_type hswep_uncore_ha = {
2914 .perf_ctr_bits = 48,
2915 SNBEP_UNCORE_PCI_COMMON_INIT(),
2918 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2919 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
2920 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
2921 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2922 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2923 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2924 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2925 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2926 { /* end: all zeroes */ },
2929 static struct intel_uncore_type hswep_uncore_imc = {
2933 .perf_ctr_bits = 48,
2934 .fixed_ctr_bits = 48,
2935 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2936 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2937 .event_descs = hswep_uncore_imc_events,
2938 SNBEP_UNCORE_PCI_COMMON_INIT(),
2941 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2943 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2945 struct pci_dev *pdev = box->pci_dev;
2946 struct hw_perf_event *hwc = &event->hw;
2949 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2950 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2955 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2956 .init_box = snbep_uncore_pci_init_box,
2957 .disable_box = snbep_uncore_pci_disable_box,
2958 .enable_box = snbep_uncore_pci_enable_box,
2959 .disable_event = ivbep_uncore_irp_disable_event,
2960 .enable_event = ivbep_uncore_irp_enable_event,
2961 .read_counter = hswep_uncore_irp_read_counter,
2964 static struct intel_uncore_type hswep_uncore_irp = {
2968 .perf_ctr_bits = 48,
2969 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2970 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2971 .ops = &hswep_uncore_irp_ops,
2972 .format_group = &snbep_uncore_format_group,
2975 static struct intel_uncore_type hswep_uncore_qpi = {
2979 .perf_ctr_bits = 48,
2980 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2981 .event_ctl = SNBEP_PCI_PMON_CTL0,
2982 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2983 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2984 .num_shared_regs = 1,
2985 .ops = &snbep_uncore_qpi_ops,
2986 .format_group = &snbep_uncore_qpi_format_group,
2989 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2990 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2991 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2992 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2993 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2994 UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2995 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2996 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2997 UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2998 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2999 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3000 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
3001 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
3002 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3003 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3004 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3005 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3006 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3007 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
3008 EVENT_CONSTRAINT_END
3011 static struct intel_uncore_type hswep_uncore_r2pcie = {
3015 .perf_ctr_bits = 48,
3016 .constraints = hswep_uncore_r2pcie_constraints,
3017 SNBEP_UNCORE_PCI_COMMON_INIT(),
3020 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
3021 UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
3022 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3023 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3024 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3025 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3026 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3027 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3028 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3029 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
3030 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3031 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3032 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3033 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3034 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3035 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3036 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3037 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3038 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3039 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3040 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3041 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3042 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3043 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3044 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3045 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3046 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
3047 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3048 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3049 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3050 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3051 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3052 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3053 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3054 EVENT_CONSTRAINT_END
3057 static struct intel_uncore_type hswep_uncore_r3qpi = {
3061 .perf_ctr_bits = 44,
3062 .constraints = hswep_uncore_r3qpi_constraints,
3063 SNBEP_UNCORE_PCI_COMMON_INIT(),
3067 HSWEP_PCI_UNCORE_HA,
3068 HSWEP_PCI_UNCORE_IMC,
3069 HSWEP_PCI_UNCORE_IRP,
3070 HSWEP_PCI_UNCORE_QPI,
3071 HSWEP_PCI_UNCORE_R2PCIE,
3072 HSWEP_PCI_UNCORE_R3QPI,
3075 static struct intel_uncore_type *hswep_pci_uncores[] = {
3076 [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha,
3077 [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc,
3078 [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp,
3079 [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi,
3080 [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie,
3081 [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi,
3085 static const struct pci_device_id hswep_uncore_pci_ids[] = {
3086 { /* Home Agent 0 */
3087 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3088 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3090 { /* Home Agent 1 */
3091 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3092 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3094 { /* MC0 Channel 0 */
3095 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3096 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3098 { /* MC0 Channel 1 */
3099 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3100 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3102 { /* MC0 Channel 2 */
3103 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3104 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3106 { /* MC0 Channel 3 */
3107 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3108 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3110 { /* MC1 Channel 0 */
3111 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3112 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3114 { /* MC1 Channel 1 */
3115 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3116 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3118 { /* MC1 Channel 2 */
3119 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3120 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3122 { /* MC1 Channel 3 */
3123 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3124 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3127 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3128 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3131 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3132 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3135 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3136 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3139 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3140 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3143 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3144 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3146 { /* R3QPI0 Link 0 */
3147 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3148 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3150 { /* R3QPI0 Link 1 */
3151 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3152 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3154 { /* R3QPI1 Link 2 */
3155 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3156 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3158 { /* QPI Port 0 filter */
3159 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3160 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3161 SNBEP_PCI_QPI_PORT0_FILTER),
3163 { /* QPI Port 1 filter */
3164 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3165 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3166 SNBEP_PCI_QPI_PORT1_FILTER),
3168 { /* end: all zeroes */ }
3171 static struct pci_driver hswep_uncore_pci_driver = {
3172 .name = "hswep_uncore",
3173 .id_table = hswep_uncore_pci_ids,
3176 int hswep_uncore_pci_init(void)
3178 int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3181 uncore_pci_uncores = hswep_pci_uncores;
3182 uncore_pci_driver = &hswep_uncore_pci_driver;
3185 /* end of Haswell-EP uncore support */
3187 /* BDX uncore support */
3189 static struct intel_uncore_type bdx_uncore_ubox = {
3193 .perf_ctr_bits = 48,
3194 .fixed_ctr_bits = 48,
3195 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3196 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3197 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3198 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3199 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3200 .num_shared_regs = 1,
3201 .ops = &ivbep_uncore_msr_ops,
3202 .format_group = &ivbep_uncore_ubox_format_group,
3205 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3206 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3207 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3208 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3209 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3210 EVENT_CONSTRAINT_END
3213 static struct intel_uncore_type bdx_uncore_cbox = {
3217 .perf_ctr_bits = 48,
3218 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3219 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3220 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3221 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3222 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3223 .num_shared_regs = 1,
3224 .constraints = bdx_uncore_cbox_constraints,
3225 .ops = &hswep_uncore_cbox_ops,
3226 .format_group = &hswep_uncore_cbox_format_group,
3229 static struct intel_uncore_type bdx_uncore_sbox = {
3233 .perf_ctr_bits = 48,
3234 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
3235 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
3236 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3237 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
3238 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
3239 .ops = &hswep_uncore_sbox_msr_ops,
3240 .format_group = &hswep_uncore_sbox_format_group,
3243 #define BDX_MSR_UNCORE_SBOX 3
3245 static struct intel_uncore_type *bdx_msr_uncores[] = {
3253 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3254 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3255 EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3256 EVENT_CONSTRAINT_END
3259 #define BDX_PCU_DID 0x6fc0
3261 void bdx_uncore_cpu_init(void)
3263 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3264 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3265 uncore_msr_uncores = bdx_msr_uncores;
3267 /* Detect systems with no SBOXes */
3268 if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
3269 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3271 hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3274 static struct intel_uncore_type bdx_uncore_ha = {
3278 .perf_ctr_bits = 48,
3279 SNBEP_UNCORE_PCI_COMMON_INIT(),
3282 static struct intel_uncore_type bdx_uncore_imc = {
3286 .perf_ctr_bits = 48,
3287 .fixed_ctr_bits = 48,
3288 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3289 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3290 .event_descs = hswep_uncore_imc_events,
3291 SNBEP_UNCORE_PCI_COMMON_INIT(),
3294 static struct intel_uncore_type bdx_uncore_irp = {
3298 .perf_ctr_bits = 48,
3299 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3300 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3301 .ops = &hswep_uncore_irp_ops,
3302 .format_group = &snbep_uncore_format_group,
3305 static struct intel_uncore_type bdx_uncore_qpi = {
3309 .perf_ctr_bits = 48,
3310 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3311 .event_ctl = SNBEP_PCI_PMON_CTL0,
3312 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3313 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3314 .num_shared_regs = 1,
3315 .ops = &snbep_uncore_qpi_ops,
3316 .format_group = &snbep_uncore_qpi_format_group,
3319 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3320 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3321 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3322 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3323 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3324 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3325 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3326 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3327 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3328 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3329 EVENT_CONSTRAINT_END
3332 static struct intel_uncore_type bdx_uncore_r2pcie = {
3336 .perf_ctr_bits = 48,
3337 .constraints = bdx_uncore_r2pcie_constraints,
3338 SNBEP_UNCORE_PCI_COMMON_INIT(),
3341 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3342 UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3343 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3344 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3345 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3346 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3347 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3348 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3349 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3350 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3351 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3352 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3353 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3354 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3355 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3356 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3357 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3358 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3359 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3360 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3361 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3362 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3363 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3364 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3365 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3366 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3367 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3368 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3369 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3370 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3371 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3372 EVENT_CONSTRAINT_END
3375 static struct intel_uncore_type bdx_uncore_r3qpi = {
3379 .perf_ctr_bits = 48,
3380 .constraints = bdx_uncore_r3qpi_constraints,
3381 SNBEP_UNCORE_PCI_COMMON_INIT(),
3389 BDX_PCI_UNCORE_R2PCIE,
3390 BDX_PCI_UNCORE_R3QPI,
3393 static struct intel_uncore_type *bdx_pci_uncores[] = {
3394 [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha,
3395 [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc,
3396 [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp,
3397 [BDX_PCI_UNCORE_QPI] = &bdx_uncore_qpi,
3398 [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3399 [BDX_PCI_UNCORE_R3QPI] = &bdx_uncore_r3qpi,
3403 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3404 { /* Home Agent 0 */
3405 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3406 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3408 { /* Home Agent 1 */
3409 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3410 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3412 { /* MC0 Channel 0 */
3413 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3414 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3416 { /* MC0 Channel 1 */
3417 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3418 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3420 { /* MC0 Channel 2 */
3421 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3422 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3424 { /* MC0 Channel 3 */
3425 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3426 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3428 { /* MC1 Channel 0 */
3429 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3430 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3432 { /* MC1 Channel 1 */
3433 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3434 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3436 { /* MC1 Channel 2 */
3437 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3438 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3440 { /* MC1 Channel 3 */
3441 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3442 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3445 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3446 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3449 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3450 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3453 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3454 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3457 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3458 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3461 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3462 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3464 { /* R3QPI0 Link 0 */
3465 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3466 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3468 { /* R3QPI0 Link 1 */
3469 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3470 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3472 { /* R3QPI1 Link 2 */
3473 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3474 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3476 { /* QPI Port 0 filter */
3477 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3478 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3479 SNBEP_PCI_QPI_PORT0_FILTER),
3481 { /* QPI Port 1 filter */
3482 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3483 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3484 SNBEP_PCI_QPI_PORT1_FILTER),
3486 { /* QPI Port 2 filter */
3487 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3488 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3489 BDX_PCI_QPI_PORT2_FILTER),
3491 { /* end: all zeroes */ }
3494 static struct pci_driver bdx_uncore_pci_driver = {
3495 .name = "bdx_uncore",
3496 .id_table = bdx_uncore_pci_ids,
3499 int bdx_uncore_pci_init(void)
3501 int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3505 uncore_pci_uncores = bdx_pci_uncores;
3506 uncore_pci_driver = &bdx_uncore_pci_driver;
3510 /* end of BDX uncore support */
3512 /* SKX uncore support */
3514 static struct intel_uncore_type skx_uncore_ubox = {
3518 .perf_ctr_bits = 48,
3519 .fixed_ctr_bits = 48,
3520 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3521 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3522 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3523 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3524 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3525 .ops = &ivbep_uncore_msr_ops,
3526 .format_group = &ivbep_uncore_ubox_format_group,
3529 static struct attribute *skx_uncore_cha_formats_attr[] = {
3530 &format_attr_event.attr,
3531 &format_attr_umask.attr,
3532 &format_attr_edge.attr,
3533 &format_attr_tid_en.attr,
3534 &format_attr_inv.attr,
3535 &format_attr_thresh8.attr,
3536 &format_attr_filter_tid4.attr,
3537 &format_attr_filter_state5.attr,
3538 &format_attr_filter_rem.attr,
3539 &format_attr_filter_loc.attr,
3540 &format_attr_filter_nm.attr,
3541 &format_attr_filter_all_op.attr,
3542 &format_attr_filter_not_nm.attr,
3543 &format_attr_filter_opc_0.attr,
3544 &format_attr_filter_opc_1.attr,
3545 &format_attr_filter_nc.attr,
3546 &format_attr_filter_isoc.attr,
3550 static const struct attribute_group skx_uncore_chabox_format_group = {
3552 .attrs = skx_uncore_cha_formats_attr,
3555 static struct event_constraint skx_uncore_chabox_constraints[] = {
3556 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3557 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3558 EVENT_CONSTRAINT_END
3561 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3562 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3563 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3564 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3565 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3566 SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3567 SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3568 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3569 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3570 SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3574 static u64 skx_cha_filter_mask(int fields)
3579 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3581 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3583 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3585 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3586 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3587 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3588 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3589 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3590 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3591 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3592 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3593 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3598 static struct event_constraint *
3599 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3601 return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3604 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3606 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3607 struct extra_reg *er;
3610 for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3611 if (er->event != (event->hw.config & er->config_mask))
3617 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3618 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3619 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3625 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3626 /* There is no frz_en for chabox ctl */
3627 .init_box = ivbep_uncore_msr_init_box,
3628 .disable_box = snbep_uncore_msr_disable_box,
3629 .enable_box = snbep_uncore_msr_enable_box,
3630 .disable_event = snbep_uncore_msr_disable_event,
3631 .enable_event = hswep_cbox_enable_event,
3632 .read_counter = uncore_msr_read_counter,
3633 .hw_config = skx_cha_hw_config,
3634 .get_constraint = skx_cha_get_constraint,
3635 .put_constraint = snbep_cbox_put_constraint,
3638 static struct intel_uncore_type skx_uncore_chabox = {
3641 .perf_ctr_bits = 48,
3642 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3643 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3644 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3645 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3646 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3647 .num_shared_regs = 1,
3648 .constraints = skx_uncore_chabox_constraints,
3649 .ops = &skx_uncore_chabox_ops,
3650 .format_group = &skx_uncore_chabox_format_group,
3653 static struct attribute *skx_uncore_iio_formats_attr[] = {
3654 &format_attr_event.attr,
3655 &format_attr_umask.attr,
3656 &format_attr_edge.attr,
3657 &format_attr_inv.attr,
3658 &format_attr_thresh9.attr,
3659 &format_attr_ch_mask.attr,
3660 &format_attr_fc_mask.attr,
3664 static const struct attribute_group skx_uncore_iio_format_group = {
3666 .attrs = skx_uncore_iio_formats_attr,
3669 static struct event_constraint skx_uncore_iio_constraints[] = {
3670 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3671 UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3672 UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3673 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3674 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3675 UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3676 EVENT_CONSTRAINT_END
3679 static void skx_iio_enable_event(struct intel_uncore_box *box,
3680 struct perf_event *event)
3682 struct hw_perf_event *hwc = &event->hw;
3684 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3687 static struct intel_uncore_ops skx_uncore_iio_ops = {
3688 .init_box = ivbep_uncore_msr_init_box,
3689 .disable_box = snbep_uncore_msr_disable_box,
3690 .enable_box = snbep_uncore_msr_enable_box,
3691 .disable_event = snbep_uncore_msr_disable_event,
3692 .enable_event = skx_iio_enable_event,
3693 .read_counter = uncore_msr_read_counter,
3696 static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
3698 return pmu->type->topology[die].configuration >>
3699 (pmu->pmu_idx * BUS_NUM_STRIDE);
3703 pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
3704 int die, int zero_bus_pmu)
3706 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3708 return (!skx_iio_stack(pmu, die) && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
3712 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3714 /* Root bus 0x00 is valid only for pmu_idx = 0. */
3715 return pmu_iio_mapping_visible(kobj, attr, die, 0);
3718 static ssize_t skx_iio_mapping_show(struct device *dev,
3719 struct device_attribute *attr, char *buf)
3721 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
3722 struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3723 long die = (long)ea->var;
3725 return sprintf(buf, "%04x:%02x\n", pmu->type->topology[die].segment,
3726 skx_iio_stack(pmu, die));
3729 static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3733 if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3734 !(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3737 *topology = msr_value;
3742 static int die_to_cpu(int die)
3744 int res = 0, cpu, current_die;
3746 * Using cpus_read_lock() to ensure cpu is not going down between
3747 * looking at cpu_online_mask.
3750 for_each_online_cpu(cpu) {
3751 current_die = topology_logical_die_id(cpu);
3752 if (current_die == die) {
3761 static int skx_iio_get_topology(struct intel_uncore_type *type)
3763 int die, ret = -EPERM;
3765 type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
3767 if (!type->topology)
3770 for (die = 0; die < uncore_max_dies(); die++) {
3771 ret = skx_msr_cpu_bus_read(die_to_cpu(die),
3772 &type->topology[die].configuration);
3776 ret = uncore_die_to_segment(die);
3780 type->topology[die].segment = ret;
3784 kfree(type->topology);
3785 type->topology = NULL;
3791 static struct attribute_group skx_iio_mapping_group = {
3792 .is_visible = skx_iio_mapping_visible,
3795 static const struct attribute_group *skx_iio_attr_update[] = {
3796 &skx_iio_mapping_group,
3801 pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3806 struct attribute **attrs = NULL;
3807 struct dev_ext_attribute *eas = NULL;
3809 ret = type->get_topology(type);
3811 goto clear_attr_update;
3815 /* One more for NULL. */
3816 attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3820 eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3824 for (die = 0; die < uncore_max_dies(); die++) {
3825 sprintf(buf, "die%ld", die);
3826 sysfs_attr_init(&eas[die].attr.attr);
3827 eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3828 if (!eas[die].attr.attr.name)
3830 eas[die].attr.attr.mode = 0444;
3831 eas[die].attr.show = skx_iio_mapping_show;
3832 eas[die].attr.store = NULL;
3833 eas[die].var = (void *)die;
3834 attrs[die] = &eas[die].attr.attr;
3840 for (; die >= 0; die--)
3841 kfree(eas[die].attr.attr.name);
3844 kfree(type->topology);
3846 type->attr_update = NULL;
3850 static int skx_iio_set_mapping(struct intel_uncore_type *type)
3852 return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
3855 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
3857 struct attribute **attr = skx_iio_mapping_group.attrs;
3862 for (; *attr; attr++)
3863 kfree((*attr)->name);
3864 kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs));
3865 kfree(skx_iio_mapping_group.attrs);
3866 skx_iio_mapping_group.attrs = NULL;
3867 kfree(type->topology);
3870 static struct intel_uncore_type skx_uncore_iio = {
3874 .perf_ctr_bits = 48,
3875 .event_ctl = SKX_IIO0_MSR_PMON_CTL0,
3876 .perf_ctr = SKX_IIO0_MSR_PMON_CTR0,
3877 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
3878 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3879 .box_ctl = SKX_IIO0_MSR_PMON_BOX_CTL,
3880 .msr_offset = SKX_IIO_MSR_OFFSET,
3881 .constraints = skx_uncore_iio_constraints,
3882 .ops = &skx_uncore_iio_ops,
3883 .format_group = &skx_uncore_iio_format_group,
3884 .attr_update = skx_iio_attr_update,
3885 .get_topology = skx_iio_get_topology,
3886 .set_mapping = skx_iio_set_mapping,
3887 .cleanup_mapping = skx_iio_cleanup_mapping,
3890 enum perf_uncore_iio_freerunning_type_id {
3891 SKX_IIO_MSR_IOCLK = 0,
3893 SKX_IIO_MSR_UTIL = 2,
3895 SKX_IIO_FREERUNNING_TYPE_MAX,
3899 static struct freerunning_counters skx_iio_freerunning[] = {
3900 [SKX_IIO_MSR_IOCLK] = { 0xa45, 0x1, 0x20, 1, 36 },
3901 [SKX_IIO_MSR_BW] = { 0xb00, 0x1, 0x10, 8, 36 },
3902 [SKX_IIO_MSR_UTIL] = { 0xb08, 0x1, 0x10, 8, 36 },
3905 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3906 /* Free-Running IO CLOCKS Counter */
3907 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
3908 /* Free-Running IIO BANDWIDTH Counters */
3909 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
3910 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
3911 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
3912 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
3913 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
3914 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
3915 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
3916 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
3917 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
3918 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
3919 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
3920 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
3921 INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x24"),
3922 INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"),
3923 INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"),
3924 INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x25"),
3925 INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"),
3926 INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"),
3927 INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x26"),
3928 INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"),
3929 INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"),
3930 INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x27"),
3931 INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"),
3932 INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"),
3933 /* Free-running IIO UTILIZATION Counters */
3934 INTEL_UNCORE_EVENT_DESC(util_in_port0, "event=0xff,umask=0x30"),
3935 INTEL_UNCORE_EVENT_DESC(util_out_port0, "event=0xff,umask=0x31"),
3936 INTEL_UNCORE_EVENT_DESC(util_in_port1, "event=0xff,umask=0x32"),
3937 INTEL_UNCORE_EVENT_DESC(util_out_port1, "event=0xff,umask=0x33"),
3938 INTEL_UNCORE_EVENT_DESC(util_in_port2, "event=0xff,umask=0x34"),
3939 INTEL_UNCORE_EVENT_DESC(util_out_port2, "event=0xff,umask=0x35"),
3940 INTEL_UNCORE_EVENT_DESC(util_in_port3, "event=0xff,umask=0x36"),
3941 INTEL_UNCORE_EVENT_DESC(util_out_port3, "event=0xff,umask=0x37"),
3942 { /* end: all zeroes */ },
3945 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3946 .read_counter = uncore_msr_read_counter,
3947 .hw_config = uncore_freerunning_hw_config,
3950 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3951 &format_attr_event.attr,
3952 &format_attr_umask.attr,
3956 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3958 .attrs = skx_uncore_iio_freerunning_formats_attr,
3961 static struct intel_uncore_type skx_uncore_iio_free_running = {
3962 .name = "iio_free_running",
3965 .num_freerunning_types = SKX_IIO_FREERUNNING_TYPE_MAX,
3966 .freerunning = skx_iio_freerunning,
3967 .ops = &skx_uncore_iio_freerunning_ops,
3968 .event_descs = skx_uncore_iio_freerunning_events,
3969 .format_group = &skx_uncore_iio_freerunning_format_group,
3972 static struct attribute *skx_uncore_formats_attr[] = {
3973 &format_attr_event.attr,
3974 &format_attr_umask.attr,
3975 &format_attr_edge.attr,
3976 &format_attr_inv.attr,
3977 &format_attr_thresh8.attr,
3981 static const struct attribute_group skx_uncore_format_group = {
3983 .attrs = skx_uncore_formats_attr,
3986 static struct intel_uncore_type skx_uncore_irp = {
3990 .perf_ctr_bits = 48,
3991 .event_ctl = SKX_IRP0_MSR_PMON_CTL0,
3992 .perf_ctr = SKX_IRP0_MSR_PMON_CTR0,
3993 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3994 .box_ctl = SKX_IRP0_MSR_PMON_BOX_CTL,
3995 .msr_offset = SKX_IRP_MSR_OFFSET,
3996 .ops = &skx_uncore_iio_ops,
3997 .format_group = &skx_uncore_format_group,
4000 static struct attribute *skx_uncore_pcu_formats_attr[] = {
4001 &format_attr_event.attr,
4002 &format_attr_umask.attr,
4003 &format_attr_edge.attr,
4004 &format_attr_inv.attr,
4005 &format_attr_thresh8.attr,
4006 &format_attr_occ_invert.attr,
4007 &format_attr_occ_edge_det.attr,
4008 &format_attr_filter_band0.attr,
4009 &format_attr_filter_band1.attr,
4010 &format_attr_filter_band2.attr,
4011 &format_attr_filter_band3.attr,
4015 static struct attribute_group skx_uncore_pcu_format_group = {
4017 .attrs = skx_uncore_pcu_formats_attr,
4020 static struct intel_uncore_ops skx_uncore_pcu_ops = {
4021 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4022 .hw_config = hswep_pcu_hw_config,
4023 .get_constraint = snbep_pcu_get_constraint,
4024 .put_constraint = snbep_pcu_put_constraint,
4027 static struct intel_uncore_type skx_uncore_pcu = {
4031 .perf_ctr_bits = 48,
4032 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
4033 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
4034 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
4035 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
4036 .num_shared_regs = 1,
4037 .ops = &skx_uncore_pcu_ops,
4038 .format_group = &skx_uncore_pcu_format_group,
4041 static struct intel_uncore_type *skx_msr_uncores[] = {
4045 &skx_uncore_iio_free_running,
4052 * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
4053 * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
4055 #define SKX_CAPID6 0x9c
4056 #define SKX_CHA_BIT_MASK GENMASK(27, 0)
4058 static int skx_count_chabox(void)
4060 struct pci_dev *dev = NULL;
4063 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4067 pci_read_config_dword(dev, SKX_CAPID6, &val);
4068 val &= SKX_CHA_BIT_MASK;
4071 return hweight32(val);
4074 void skx_uncore_cpu_init(void)
4076 skx_uncore_chabox.num_boxes = skx_count_chabox();
4077 uncore_msr_uncores = skx_msr_uncores;
4080 static struct intel_uncore_type skx_uncore_imc = {
4084 .perf_ctr_bits = 48,
4085 .fixed_ctr_bits = 48,
4086 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4087 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4088 .event_descs = hswep_uncore_imc_events,
4089 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4090 .event_ctl = SNBEP_PCI_PMON_CTL0,
4091 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4092 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4093 .ops = &ivbep_uncore_pci_ops,
4094 .format_group = &skx_uncore_format_group,
4097 static struct attribute *skx_upi_uncore_formats_attr[] = {
4098 &format_attr_event.attr,
4099 &format_attr_umask_ext.attr,
4100 &format_attr_edge.attr,
4101 &format_attr_inv.attr,
4102 &format_attr_thresh8.attr,
4106 static const struct attribute_group skx_upi_uncore_format_group = {
4108 .attrs = skx_upi_uncore_formats_attr,
4111 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4113 struct pci_dev *pdev = box->pci_dev;
4115 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4116 pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4119 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4120 .init_box = skx_upi_uncore_pci_init_box,
4121 .disable_box = snbep_uncore_pci_disable_box,
4122 .enable_box = snbep_uncore_pci_enable_box,
4123 .disable_event = snbep_uncore_pci_disable_event,
4124 .enable_event = snbep_uncore_pci_enable_event,
4125 .read_counter = snbep_uncore_pci_read_counter,
4128 static struct intel_uncore_type skx_uncore_upi = {
4132 .perf_ctr_bits = 48,
4133 .perf_ctr = SKX_UPI_PCI_PMON_CTR0,
4134 .event_ctl = SKX_UPI_PCI_PMON_CTL0,
4135 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4136 .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4137 .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL,
4138 .ops = &skx_upi_uncore_pci_ops,
4139 .format_group = &skx_upi_uncore_format_group,
4142 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4144 struct pci_dev *pdev = box->pci_dev;
4146 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4147 pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4150 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4151 .init_box = skx_m2m_uncore_pci_init_box,
4152 .disable_box = snbep_uncore_pci_disable_box,
4153 .enable_box = snbep_uncore_pci_enable_box,
4154 .disable_event = snbep_uncore_pci_disable_event,
4155 .enable_event = snbep_uncore_pci_enable_event,
4156 .read_counter = snbep_uncore_pci_read_counter,
4159 static struct intel_uncore_type skx_uncore_m2m = {
4163 .perf_ctr_bits = 48,
4164 .perf_ctr = SKX_M2M_PCI_PMON_CTR0,
4165 .event_ctl = SKX_M2M_PCI_PMON_CTL0,
4166 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4167 .box_ctl = SKX_M2M_PCI_PMON_BOX_CTL,
4168 .ops = &skx_m2m_uncore_pci_ops,
4169 .format_group = &skx_uncore_format_group,
4172 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4173 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4174 EVENT_CONSTRAINT_END
4177 static struct intel_uncore_type skx_uncore_m2pcie = {
4181 .perf_ctr_bits = 48,
4182 .constraints = skx_uncore_m2pcie_constraints,
4183 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4184 .event_ctl = SNBEP_PCI_PMON_CTL0,
4185 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4186 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4187 .ops = &ivbep_uncore_pci_ops,
4188 .format_group = &skx_uncore_format_group,
4191 static struct event_constraint skx_uncore_m3upi_constraints[] = {
4192 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4193 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4194 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4195 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4196 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4197 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4198 UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
4199 UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
4200 EVENT_CONSTRAINT_END
4203 static struct intel_uncore_type skx_uncore_m3upi = {
4207 .perf_ctr_bits = 48,
4208 .constraints = skx_uncore_m3upi_constraints,
4209 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4210 .event_ctl = SNBEP_PCI_PMON_CTL0,
4211 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4212 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4213 .ops = &ivbep_uncore_pci_ops,
4214 .format_group = &skx_uncore_format_group,
4221 SKX_PCI_UNCORE_M2PCIE,
4222 SKX_PCI_UNCORE_M3UPI,
4225 static struct intel_uncore_type *skx_pci_uncores[] = {
4226 [SKX_PCI_UNCORE_IMC] = &skx_uncore_imc,
4227 [SKX_PCI_UNCORE_M2M] = &skx_uncore_m2m,
4228 [SKX_PCI_UNCORE_UPI] = &skx_uncore_upi,
4229 [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
4230 [SKX_PCI_UNCORE_M3UPI] = &skx_uncore_m3upi,
4234 static const struct pci_device_id skx_uncore_pci_ids[] = {
4235 { /* MC0 Channel 0 */
4236 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4237 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4239 { /* MC0 Channel 1 */
4240 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4241 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4243 { /* MC0 Channel 2 */
4244 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4245 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4247 { /* MC1 Channel 0 */
4248 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4249 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4251 { /* MC1 Channel 1 */
4252 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4253 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4255 { /* MC1 Channel 2 */
4256 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4257 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4260 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4261 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4264 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4265 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4268 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4269 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4272 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4273 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4276 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4277 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4280 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4281 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4284 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4285 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4288 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4289 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4292 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4293 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4295 { /* M3UPI0 Link 0 */
4296 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4297 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4299 { /* M3UPI0 Link 1 */
4300 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4301 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4303 { /* M3UPI1 Link 2 */
4304 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4305 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4307 { /* end: all zeroes */ }
4311 static struct pci_driver skx_uncore_pci_driver = {
4312 .name = "skx_uncore",
4313 .id_table = skx_uncore_pci_ids,
4316 int skx_uncore_pci_init(void)
4318 /* need to double check pci address */
4319 int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4324 uncore_pci_uncores = skx_pci_uncores;
4325 uncore_pci_driver = &skx_uncore_pci_driver;
4329 /* end of SKX uncore support */
4331 /* SNR uncore support */
4333 static struct intel_uncore_type snr_uncore_ubox = {
4337 .perf_ctr_bits = 48,
4338 .fixed_ctr_bits = 48,
4339 .perf_ctr = SNR_U_MSR_PMON_CTR0,
4340 .event_ctl = SNR_U_MSR_PMON_CTL0,
4341 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4342 .fixed_ctr = SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4343 .fixed_ctl = SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4344 .ops = &ivbep_uncore_msr_ops,
4345 .format_group = &ivbep_uncore_format_group,
4348 static struct attribute *snr_uncore_cha_formats_attr[] = {
4349 &format_attr_event.attr,
4350 &format_attr_umask_ext2.attr,
4351 &format_attr_edge.attr,
4352 &format_attr_tid_en.attr,
4353 &format_attr_inv.attr,
4354 &format_attr_thresh8.attr,
4355 &format_attr_filter_tid5.attr,
4358 static const struct attribute_group snr_uncore_chabox_format_group = {
4360 .attrs = snr_uncore_cha_formats_attr,
4363 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4365 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4367 reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4368 box->pmu->type->msr_offset * box->pmu->pmu_idx;
4369 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4375 static void snr_cha_enable_event(struct intel_uncore_box *box,
4376 struct perf_event *event)
4378 struct hw_perf_event *hwc = &event->hw;
4379 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4381 if (reg1->idx != EXTRA_REG_NONE)
4382 wrmsrl(reg1->reg, reg1->config);
4384 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4387 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4388 .init_box = ivbep_uncore_msr_init_box,
4389 .disable_box = snbep_uncore_msr_disable_box,
4390 .enable_box = snbep_uncore_msr_enable_box,
4391 .disable_event = snbep_uncore_msr_disable_event,
4392 .enable_event = snr_cha_enable_event,
4393 .read_counter = uncore_msr_read_counter,
4394 .hw_config = snr_cha_hw_config,
4397 static struct intel_uncore_type snr_uncore_chabox = {
4401 .perf_ctr_bits = 48,
4402 .event_ctl = SNR_CHA_MSR_PMON_CTL0,
4403 .perf_ctr = SNR_CHA_MSR_PMON_CTR0,
4404 .box_ctl = SNR_CHA_MSR_PMON_BOX_CTL,
4405 .msr_offset = HSWEP_CBO_MSR_OFFSET,
4406 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4407 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
4408 .ops = &snr_uncore_chabox_ops,
4409 .format_group = &snr_uncore_chabox_format_group,
4412 static struct attribute *snr_uncore_iio_formats_attr[] = {
4413 &format_attr_event.attr,
4414 &format_attr_umask.attr,
4415 &format_attr_edge.attr,
4416 &format_attr_inv.attr,
4417 &format_attr_thresh9.attr,
4418 &format_attr_ch_mask2.attr,
4419 &format_attr_fc_mask2.attr,
4423 static const struct attribute_group snr_uncore_iio_format_group = {
4425 .attrs = snr_uncore_iio_formats_attr,
4429 snr_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4431 /* Root bus 0x00 is valid only for pmu_idx = 1. */
4432 return pmu_iio_mapping_visible(kobj, attr, die, 1);
4435 static struct attribute_group snr_iio_mapping_group = {
4436 .is_visible = snr_iio_mapping_visible,
4439 static const struct attribute_group *snr_iio_attr_update[] = {
4440 &snr_iio_mapping_group,
4444 static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_mapping)
4447 int die, stack_id, ret = -EPERM;
4448 struct pci_dev *dev = NULL;
4450 type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
4452 if (!type->topology)
4455 while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) {
4456 ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg);
4458 ret = pcibios_err_to_errno(ret);
4462 die = uncore_pcibus_to_dieid(dev->bus);
4463 stack_id = SAD_CONTROL_STACK_ID(sad_cfg);
4464 if (die < 0 || stack_id >= type->num_boxes) {
4469 /* Convert stack id from SAD_CONTROL to PMON notation. */
4470 stack_id = sad_pmon_mapping[stack_id];
4472 ((u8 *)&(type->topology[die].configuration))[stack_id] = dev->bus->number;
4473 type->topology[die].segment = pci_domain_nr(dev->bus);
4477 kfree(type->topology);
4478 type->topology = NULL;
4485 * SNR has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
4489 SNR_CBDMA_DMI_PMON_ID,
4492 SNR_PCIE_GEN3_PMON_ID
4495 static u8 snr_sad_pmon_mapping[] = {
4496 SNR_CBDMA_DMI_PMON_ID,
4497 SNR_PCIE_GEN3_PMON_ID,
4503 static int snr_iio_get_topology(struct intel_uncore_type *type)
4505 return sad_cfg_iio_topology(type, snr_sad_pmon_mapping);
4508 static int snr_iio_set_mapping(struct intel_uncore_type *type)
4510 return pmu_iio_set_mapping(type, &snr_iio_mapping_group);
4513 static struct intel_uncore_type snr_uncore_iio = {
4517 .perf_ctr_bits = 48,
4518 .event_ctl = SNR_IIO_MSR_PMON_CTL0,
4519 .perf_ctr = SNR_IIO_MSR_PMON_CTR0,
4520 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4521 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4522 .box_ctl = SNR_IIO_MSR_PMON_BOX_CTL,
4523 .msr_offset = SNR_IIO_MSR_OFFSET,
4524 .ops = &ivbep_uncore_msr_ops,
4525 .format_group = &snr_uncore_iio_format_group,
4526 .attr_update = snr_iio_attr_update,
4527 .get_topology = snr_iio_get_topology,
4528 .set_mapping = snr_iio_set_mapping,
4529 .cleanup_mapping = skx_iio_cleanup_mapping,
4532 static struct intel_uncore_type snr_uncore_irp = {
4536 .perf_ctr_bits = 48,
4537 .event_ctl = SNR_IRP0_MSR_PMON_CTL0,
4538 .perf_ctr = SNR_IRP0_MSR_PMON_CTR0,
4539 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4540 .box_ctl = SNR_IRP0_MSR_PMON_BOX_CTL,
4541 .msr_offset = SNR_IRP_MSR_OFFSET,
4542 .ops = &ivbep_uncore_msr_ops,
4543 .format_group = &ivbep_uncore_format_group,
4546 static struct intel_uncore_type snr_uncore_m2pcie = {
4550 .perf_ctr_bits = 48,
4551 .event_ctl = SNR_M2PCIE_MSR_PMON_CTL0,
4552 .perf_ctr = SNR_M2PCIE_MSR_PMON_CTR0,
4553 .box_ctl = SNR_M2PCIE_MSR_PMON_BOX_CTL,
4554 .msr_offset = SNR_M2PCIE_MSR_OFFSET,
4555 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4556 .ops = &ivbep_uncore_msr_ops,
4557 .format_group = &ivbep_uncore_format_group,
4560 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4562 struct hw_perf_event *hwc = &event->hw;
4563 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4564 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4566 if (ev_sel >= 0xb && ev_sel <= 0xe) {
4567 reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4568 reg1->idx = ev_sel - 0xb;
4569 reg1->config = event->attr.config1 & (0xff << reg1->idx);
4574 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4575 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4576 .hw_config = snr_pcu_hw_config,
4577 .get_constraint = snbep_pcu_get_constraint,
4578 .put_constraint = snbep_pcu_put_constraint,
4581 static struct intel_uncore_type snr_uncore_pcu = {
4585 .perf_ctr_bits = 48,
4586 .perf_ctr = SNR_PCU_MSR_PMON_CTR0,
4587 .event_ctl = SNR_PCU_MSR_PMON_CTL0,
4588 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4589 .box_ctl = SNR_PCU_MSR_PMON_BOX_CTL,
4590 .num_shared_regs = 1,
4591 .ops = &snr_uncore_pcu_ops,
4592 .format_group = &skx_uncore_pcu_format_group,
4595 enum perf_uncore_snr_iio_freerunning_type_id {
4599 SNR_IIO_FREERUNNING_TYPE_MAX,
4602 static struct freerunning_counters snr_iio_freerunning[] = {
4603 [SNR_IIO_MSR_IOCLK] = { 0x1eac, 0x1, 0x10, 1, 48 },
4604 [SNR_IIO_MSR_BW_IN] = { 0x1f00, 0x1, 0x10, 8, 48 },
4607 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4608 /* Free-Running IIO CLOCKS Counter */
4609 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
4610 /* Free-Running IIO BANDWIDTH IN Counters */
4611 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
4612 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
4613 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
4614 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
4615 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
4616 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
4617 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
4618 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
4619 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
4620 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
4621 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
4622 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
4623 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
4624 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
4625 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
4626 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
4627 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
4628 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
4629 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
4630 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
4631 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
4632 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
4633 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
4634 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
4635 { /* end: all zeroes */ },
4638 static struct intel_uncore_type snr_uncore_iio_free_running = {
4639 .name = "iio_free_running",
4642 .num_freerunning_types = SNR_IIO_FREERUNNING_TYPE_MAX,
4643 .freerunning = snr_iio_freerunning,
4644 .ops = &skx_uncore_iio_freerunning_ops,
4645 .event_descs = snr_uncore_iio_freerunning_events,
4646 .format_group = &skx_uncore_iio_freerunning_format_group,
4649 static struct intel_uncore_type *snr_msr_uncores[] = {
4656 &snr_uncore_iio_free_running,
4660 void snr_uncore_cpu_init(void)
4662 uncore_msr_uncores = snr_msr_uncores;
4665 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4667 struct pci_dev *pdev = box->pci_dev;
4668 int box_ctl = uncore_pci_box_ctl(box);
4670 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4671 pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4674 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4675 .init_box = snr_m2m_uncore_pci_init_box,
4676 .disable_box = snbep_uncore_pci_disable_box,
4677 .enable_box = snbep_uncore_pci_enable_box,
4678 .disable_event = snbep_uncore_pci_disable_event,
4679 .enable_event = snbep_uncore_pci_enable_event,
4680 .read_counter = snbep_uncore_pci_read_counter,
4683 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4684 &format_attr_event.attr,
4685 &format_attr_umask_ext3.attr,
4686 &format_attr_edge.attr,
4687 &format_attr_inv.attr,
4688 &format_attr_thresh8.attr,
4692 static const struct attribute_group snr_m2m_uncore_format_group = {
4694 .attrs = snr_m2m_uncore_formats_attr,
4697 static struct intel_uncore_type snr_uncore_m2m = {
4701 .perf_ctr_bits = 48,
4702 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
4703 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
4704 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4705 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
4706 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
4707 .ops = &snr_m2m_uncore_pci_ops,
4708 .format_group = &snr_m2m_uncore_format_group,
4711 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4713 struct pci_dev *pdev = box->pci_dev;
4714 struct hw_perf_event *hwc = &event->hw;
4716 pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4717 pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4720 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
4721 .init_box = snr_m2m_uncore_pci_init_box,
4722 .disable_box = snbep_uncore_pci_disable_box,
4723 .enable_box = snbep_uncore_pci_enable_box,
4724 .disable_event = snbep_uncore_pci_disable_event,
4725 .enable_event = snr_uncore_pci_enable_event,
4726 .read_counter = snbep_uncore_pci_read_counter,
4729 static struct intel_uncore_type snr_uncore_pcie3 = {
4733 .perf_ctr_bits = 48,
4734 .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0,
4735 .event_ctl = SNR_PCIE3_PCI_PMON_CTL0,
4736 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
4737 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4738 .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL,
4739 .ops = &snr_pcie3_uncore_pci_ops,
4740 .format_group = &skx_uncore_iio_format_group,
4745 SNR_PCI_UNCORE_PCIE3,
4748 static struct intel_uncore_type *snr_pci_uncores[] = {
4749 [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m,
4750 [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3,
4754 static const struct pci_device_id snr_uncore_pci_ids[] = {
4756 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4757 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4759 { /* end: all zeroes */ }
4762 static struct pci_driver snr_uncore_pci_driver = {
4763 .name = "snr_uncore",
4764 .id_table = snr_uncore_pci_ids,
4767 static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
4769 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
4770 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
4772 { /* end: all zeroes */ }
4775 static struct pci_driver snr_uncore_pci_sub_driver = {
4776 .name = "snr_uncore_sub",
4777 .id_table = snr_uncore_pci_sub_ids,
4780 int snr_uncore_pci_init(void)
4783 int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4784 SKX_GIDNIDMAP, true);
4789 uncore_pci_uncores = snr_pci_uncores;
4790 uncore_pci_driver = &snr_uncore_pci_driver;
4791 uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
4795 #define SNR_MC_DEVICE_ID 0x3451
4797 static struct pci_dev *snr_uncore_get_mc_dev(unsigned int device, int id)
4799 struct pci_dev *mc_dev = NULL;
4803 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, mc_dev);
4806 pkg = uncore_pcibus_to_dieid(mc_dev->bus);
4813 static int snr_uncore_mmio_map(struct intel_uncore_box *box,
4814 unsigned int box_ctl, int mem_offset,
4815 unsigned int device)
4817 struct pci_dev *pdev = snr_uncore_get_mc_dev(device, box->dieid);
4818 struct intel_uncore_type *type = box->pmu->type;
4819 resource_size_t addr;
4825 pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4826 addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4828 pci_read_config_dword(pdev, mem_offset, &pci_dword);
4829 addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4833 box->io_addr = ioremap(addr, type->mmio_map_size);
4834 if (!box->io_addr) {
4835 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
4842 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
4843 unsigned int box_ctl, int mem_offset,
4844 unsigned int device)
4846 if (!snr_uncore_mmio_map(box, box_ctl, mem_offset, device))
4847 writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4850 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4852 __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
4853 SNR_IMC_MMIO_MEM0_OFFSET,
4857 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4864 config = readl(box->io_addr);
4865 config |= SNBEP_PMON_BOX_CTL_FRZ;
4866 writel(config, box->io_addr);
4869 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4876 config = readl(box->io_addr);
4877 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4878 writel(config, box->io_addr);
4881 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4882 struct perf_event *event)
4884 struct hw_perf_event *hwc = &event->hw;
4889 if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4892 writel(hwc->config | SNBEP_PMON_CTL_EN,
4893 box->io_addr + hwc->config_base);
4896 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4897 struct perf_event *event)
4899 struct hw_perf_event *hwc = &event->hw;
4904 if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4907 writel(hwc->config, box->io_addr + hwc->config_base);
4910 static struct intel_uncore_ops snr_uncore_mmio_ops = {
4911 .init_box = snr_uncore_mmio_init_box,
4912 .exit_box = uncore_mmio_exit_box,
4913 .disable_box = snr_uncore_mmio_disable_box,
4914 .enable_box = snr_uncore_mmio_enable_box,
4915 .disable_event = snr_uncore_mmio_disable_event,
4916 .enable_event = snr_uncore_mmio_enable_event,
4917 .read_counter = uncore_mmio_read_counter,
4920 static struct uncore_event_desc snr_uncore_imc_events[] = {
4921 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
4922 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x0f"),
4923 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4924 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4925 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4926 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4927 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4928 { /* end: all zeroes */ },
4931 static struct intel_uncore_type snr_uncore_imc = {
4935 .perf_ctr_bits = 48,
4936 .fixed_ctr_bits = 48,
4937 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
4938 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
4939 .event_descs = snr_uncore_imc_events,
4940 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
4941 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
4942 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4943 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
4944 .mmio_offset = SNR_IMC_MMIO_OFFSET,
4945 .mmio_map_size = SNR_IMC_MMIO_SIZE,
4946 .ops = &snr_uncore_mmio_ops,
4947 .format_group = &skx_uncore_format_group,
4950 enum perf_uncore_snr_imc_freerunning_type_id {
4954 SNR_IMC_FREERUNNING_TYPE_MAX,
4957 static struct freerunning_counters snr_imc_freerunning[] = {
4958 [SNR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
4959 [SNR_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
4962 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4963 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
4965 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
4966 INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
4967 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
4968 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
4969 INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
4970 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
4971 { /* end: all zeroes */ },
4974 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
4975 .init_box = snr_uncore_mmio_init_box,
4976 .exit_box = uncore_mmio_exit_box,
4977 .read_counter = uncore_mmio_read_counter,
4978 .hw_config = uncore_freerunning_hw_config,
4981 static struct intel_uncore_type snr_uncore_imc_free_running = {
4982 .name = "imc_free_running",
4985 .num_freerunning_types = SNR_IMC_FREERUNNING_TYPE_MAX,
4986 .mmio_map_size = SNR_IMC_MMIO_SIZE,
4987 .freerunning = snr_imc_freerunning,
4988 .ops = &snr_uncore_imc_freerunning_ops,
4989 .event_descs = snr_uncore_imc_freerunning_events,
4990 .format_group = &skx_uncore_iio_freerunning_format_group,
4993 static struct intel_uncore_type *snr_mmio_uncores[] = {
4995 &snr_uncore_imc_free_running,
4999 void snr_uncore_mmio_init(void)
5001 uncore_mmio_uncores = snr_mmio_uncores;
5004 /* end of SNR uncore support */
5006 /* ICX uncore support */
5008 static unsigned icx_cha_msr_offsets[] = {
5009 0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
5010 0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
5011 0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
5012 0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0, 0xe,
5013 0x1c, 0x2a, 0x38, 0x46,
5016 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5018 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5019 bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
5022 reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
5023 icx_cha_msr_offsets[box->pmu->pmu_idx];
5024 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
5031 static struct intel_uncore_ops icx_uncore_chabox_ops = {
5032 .init_box = ivbep_uncore_msr_init_box,
5033 .disable_box = snbep_uncore_msr_disable_box,
5034 .enable_box = snbep_uncore_msr_enable_box,
5035 .disable_event = snbep_uncore_msr_disable_event,
5036 .enable_event = snr_cha_enable_event,
5037 .read_counter = uncore_msr_read_counter,
5038 .hw_config = icx_cha_hw_config,
5041 static struct intel_uncore_type icx_uncore_chabox = {
5044 .perf_ctr_bits = 48,
5045 .event_ctl = ICX_C34_MSR_PMON_CTL0,
5046 .perf_ctr = ICX_C34_MSR_PMON_CTR0,
5047 .box_ctl = ICX_C34_MSR_PMON_BOX_CTL,
5048 .msr_offsets = icx_cha_msr_offsets,
5049 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
5050 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
5051 .constraints = skx_uncore_chabox_constraints,
5052 .ops = &icx_uncore_chabox_ops,
5053 .format_group = &snr_uncore_chabox_format_group,
5056 static unsigned icx_msr_offsets[] = {
5057 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5060 static struct event_constraint icx_uncore_iio_constraints[] = {
5061 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
5062 UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
5063 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
5064 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
5065 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
5066 EVENT_CONSTRAINT_END
5070 icx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
5072 /* Root bus 0x00 is valid only for pmu_idx = 5. */
5073 return pmu_iio_mapping_visible(kobj, attr, die, 5);
5076 static struct attribute_group icx_iio_mapping_group = {
5077 .is_visible = icx_iio_mapping_visible,
5080 static const struct attribute_group *icx_iio_attr_update[] = {
5081 &icx_iio_mapping_group,
5086 * ICX has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
5094 ICX_CBDMA_DMI_PMON_ID
5097 static u8 icx_sad_pmon_mapping[] = {
5098 ICX_CBDMA_DMI_PMON_ID,
5106 static int icx_iio_get_topology(struct intel_uncore_type *type)
5108 return sad_cfg_iio_topology(type, icx_sad_pmon_mapping);
5111 static int icx_iio_set_mapping(struct intel_uncore_type *type)
5113 return pmu_iio_set_mapping(type, &icx_iio_mapping_group);
5116 static struct intel_uncore_type icx_uncore_iio = {
5120 .perf_ctr_bits = 48,
5121 .event_ctl = ICX_IIO_MSR_PMON_CTL0,
5122 .perf_ctr = ICX_IIO_MSR_PMON_CTR0,
5123 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5124 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5125 .box_ctl = ICX_IIO_MSR_PMON_BOX_CTL,
5126 .msr_offsets = icx_msr_offsets,
5127 .constraints = icx_uncore_iio_constraints,
5128 .ops = &skx_uncore_iio_ops,
5129 .format_group = &snr_uncore_iio_format_group,
5130 .attr_update = icx_iio_attr_update,
5131 .get_topology = icx_iio_get_topology,
5132 .set_mapping = icx_iio_set_mapping,
5133 .cleanup_mapping = skx_iio_cleanup_mapping,
5136 static struct intel_uncore_type icx_uncore_irp = {
5140 .perf_ctr_bits = 48,
5141 .event_ctl = ICX_IRP0_MSR_PMON_CTL0,
5142 .perf_ctr = ICX_IRP0_MSR_PMON_CTR0,
5143 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5144 .box_ctl = ICX_IRP0_MSR_PMON_BOX_CTL,
5145 .msr_offsets = icx_msr_offsets,
5146 .ops = &ivbep_uncore_msr_ops,
5147 .format_group = &ivbep_uncore_format_group,
5150 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
5151 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
5152 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
5153 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
5154 EVENT_CONSTRAINT_END
5157 static struct intel_uncore_type icx_uncore_m2pcie = {
5161 .perf_ctr_bits = 48,
5162 .event_ctl = ICX_M2PCIE_MSR_PMON_CTL0,
5163 .perf_ctr = ICX_M2PCIE_MSR_PMON_CTR0,
5164 .box_ctl = ICX_M2PCIE_MSR_PMON_BOX_CTL,
5165 .msr_offsets = icx_msr_offsets,
5166 .constraints = icx_uncore_m2pcie_constraints,
5167 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5168 .ops = &ivbep_uncore_msr_ops,
5169 .format_group = &ivbep_uncore_format_group,
5172 enum perf_uncore_icx_iio_freerunning_type_id {
5176 ICX_IIO_FREERUNNING_TYPE_MAX,
5179 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
5180 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5183 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
5184 0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
5187 static struct freerunning_counters icx_iio_freerunning[] = {
5188 [ICX_IIO_MSR_IOCLK] = { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5189 [ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5192 static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
5193 /* Free-Running IIO CLOCKS Counter */
5194 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
5195 /* Free-Running IIO BANDWIDTH IN Counters */
5196 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
5197 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
5198 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
5199 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
5200 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
5201 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
5202 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
5203 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
5204 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
5205 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
5206 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
5207 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
5208 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
5209 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
5210 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
5211 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
5212 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
5213 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
5214 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
5215 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
5216 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
5217 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
5218 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
5219 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
5220 { /* end: all zeroes */ },
5223 static struct intel_uncore_type icx_uncore_iio_free_running = {
5224 .name = "iio_free_running",
5227 .num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX,
5228 .freerunning = icx_iio_freerunning,
5229 .ops = &skx_uncore_iio_freerunning_ops,
5230 .event_descs = icx_uncore_iio_freerunning_events,
5231 .format_group = &skx_uncore_iio_freerunning_format_group,
5234 static struct intel_uncore_type *icx_msr_uncores[] = {
5241 &icx_uncore_iio_free_running,
5246 * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5247 * registers which located at Device 30, Function 3
5249 #define ICX_CAPID6 0x9c
5250 #define ICX_CAPID7 0xa0
5252 static u64 icx_count_chabox(void)
5254 struct pci_dev *dev = NULL;
5257 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5261 pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5262 pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5265 return hweight64(caps);
5268 void icx_uncore_cpu_init(void)
5270 u64 num_boxes = icx_count_chabox();
5272 if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5274 icx_uncore_chabox.num_boxes = num_boxes;
5275 uncore_msr_uncores = icx_msr_uncores;
5278 static struct intel_uncore_type icx_uncore_m2m = {
5282 .perf_ctr_bits = 48,
5283 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
5284 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
5285 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5286 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
5287 .ops = &snr_m2m_uncore_pci_ops,
5288 .format_group = &skx_uncore_format_group,
5291 static struct attribute *icx_upi_uncore_formats_attr[] = {
5292 &format_attr_event.attr,
5293 &format_attr_umask_ext4.attr,
5294 &format_attr_edge.attr,
5295 &format_attr_inv.attr,
5296 &format_attr_thresh8.attr,
5300 static const struct attribute_group icx_upi_uncore_format_group = {
5302 .attrs = icx_upi_uncore_formats_attr,
5305 static struct intel_uncore_type icx_uncore_upi = {
5309 .perf_ctr_bits = 48,
5310 .perf_ctr = ICX_UPI_PCI_PMON_CTR0,
5311 .event_ctl = ICX_UPI_PCI_PMON_CTL0,
5312 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5313 .event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5314 .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL,
5315 .ops = &skx_upi_uncore_pci_ops,
5316 .format_group = &icx_upi_uncore_format_group,
5319 static struct event_constraint icx_uncore_m3upi_constraints[] = {
5320 UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5321 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5322 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5323 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5324 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5325 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5326 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5327 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5328 EVENT_CONSTRAINT_END
5331 static struct intel_uncore_type icx_uncore_m3upi = {
5335 .perf_ctr_bits = 48,
5336 .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0,
5337 .event_ctl = ICX_M3UPI_PCI_PMON_CTL0,
5338 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5339 .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL,
5340 .constraints = icx_uncore_m3upi_constraints,
5341 .ops = &ivbep_uncore_pci_ops,
5342 .format_group = &skx_uncore_format_group,
5348 ICX_PCI_UNCORE_M3UPI,
5351 static struct intel_uncore_type *icx_pci_uncores[] = {
5352 [ICX_PCI_UNCORE_M2M] = &icx_uncore_m2m,
5353 [ICX_PCI_UNCORE_UPI] = &icx_uncore_upi,
5354 [ICX_PCI_UNCORE_M3UPI] = &icx_uncore_m3upi,
5358 static const struct pci_device_id icx_uncore_pci_ids[] = {
5360 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5361 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5364 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5365 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5368 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5369 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5372 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5373 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5376 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5377 .driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5380 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5381 .driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5384 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5385 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5387 { /* M3UPI Link 0 */
5388 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5389 .driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5391 { /* M3UPI Link 1 */
5392 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5393 .driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5395 { /* M3UPI Link 2 */
5396 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5397 .driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5399 { /* end: all zeroes */ }
5402 static struct pci_driver icx_uncore_pci_driver = {
5403 .name = "icx_uncore",
5404 .id_table = icx_uncore_pci_ids,
5407 int icx_uncore_pci_init(void)
5410 int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5411 SKX_GIDNIDMAP, true);
5416 uncore_pci_uncores = icx_pci_uncores;
5417 uncore_pci_driver = &icx_uncore_pci_driver;
5421 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5423 unsigned int box_ctl = box->pmu->type->box_ctl +
5424 box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5425 int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5426 SNR_IMC_MMIO_MEM0_OFFSET;
5428 __snr_uncore_mmio_init_box(box, box_ctl, mem_offset,
5432 static struct intel_uncore_ops icx_uncore_mmio_ops = {
5433 .init_box = icx_uncore_imc_init_box,
5434 .exit_box = uncore_mmio_exit_box,
5435 .disable_box = snr_uncore_mmio_disable_box,
5436 .enable_box = snr_uncore_mmio_enable_box,
5437 .disable_event = snr_uncore_mmio_disable_event,
5438 .enable_event = snr_uncore_mmio_enable_event,
5439 .read_counter = uncore_mmio_read_counter,
5442 static struct intel_uncore_type icx_uncore_imc = {
5446 .perf_ctr_bits = 48,
5447 .fixed_ctr_bits = 48,
5448 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
5449 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
5450 .event_descs = hswep_uncore_imc_events,
5451 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
5452 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
5453 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5454 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
5455 .mmio_offset = SNR_IMC_MMIO_OFFSET,
5456 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5457 .ops = &icx_uncore_mmio_ops,
5458 .format_group = &skx_uncore_format_group,
5461 enum perf_uncore_icx_imc_freerunning_type_id {
5466 ICX_IMC_FREERUNNING_TYPE_MAX,
5469 static struct freerunning_counters icx_imc_freerunning[] = {
5470 [ICX_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
5471 [ICX_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
5472 [ICX_IMC_DDRT] = { 0x22a0, 0x8, 0, 2, 48 },
5475 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5476 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
5478 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
5479 INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
5480 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
5481 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
5482 INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
5483 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
5485 INTEL_UNCORE_EVENT_DESC(ddrt_read, "event=0xff,umask=0x30"),
5486 INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "6.103515625e-5"),
5487 INTEL_UNCORE_EVENT_DESC(ddrt_read.unit, "MiB"),
5488 INTEL_UNCORE_EVENT_DESC(ddrt_write, "event=0xff,umask=0x31"),
5489 INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "6.103515625e-5"),
5490 INTEL_UNCORE_EVENT_DESC(ddrt_write.unit, "MiB"),
5491 { /* end: all zeroes */ },
5494 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5496 int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5497 SNR_IMC_MMIO_MEM0_OFFSET;
5499 snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
5500 mem_offset, SNR_MC_DEVICE_ID);
5503 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5504 .init_box = icx_uncore_imc_freerunning_init_box,
5505 .exit_box = uncore_mmio_exit_box,
5506 .read_counter = uncore_mmio_read_counter,
5507 .hw_config = uncore_freerunning_hw_config,
5510 static struct intel_uncore_type icx_uncore_imc_free_running = {
5511 .name = "imc_free_running",
5514 .num_freerunning_types = ICX_IMC_FREERUNNING_TYPE_MAX,
5515 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5516 .freerunning = icx_imc_freerunning,
5517 .ops = &icx_uncore_imc_freerunning_ops,
5518 .event_descs = icx_uncore_imc_freerunning_events,
5519 .format_group = &skx_uncore_iio_freerunning_format_group,
5522 static struct intel_uncore_type *icx_mmio_uncores[] = {
5524 &icx_uncore_imc_free_running,
5528 void icx_uncore_mmio_init(void)
5530 uncore_mmio_uncores = icx_mmio_uncores;
5533 /* end of ICX uncore support */
5535 /* SPR uncore support */
5537 static void spr_uncore_msr_enable_event(struct intel_uncore_box *box,
5538 struct perf_event *event)
5540 struct hw_perf_event *hwc = &event->hw;
5541 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5543 if (reg1->idx != EXTRA_REG_NONE)
5544 wrmsrl(reg1->reg, reg1->config);
5546 wrmsrl(hwc->config_base, hwc->config);
5549 static void spr_uncore_msr_disable_event(struct intel_uncore_box *box,
5550 struct perf_event *event)
5552 struct hw_perf_event *hwc = &event->hw;
5553 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5555 if (reg1->idx != EXTRA_REG_NONE)
5556 wrmsrl(reg1->reg, 0);
5558 wrmsrl(hwc->config_base, 0);
5561 static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5563 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5564 bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN);
5565 struct intel_uncore_type *type = box->pmu->type;
5568 reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 +
5569 HSWEP_CBO_MSR_OFFSET * type->box_ids[box->pmu->pmu_idx];
5570 reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID;
5577 static struct intel_uncore_ops spr_uncore_chabox_ops = {
5578 .init_box = intel_generic_uncore_msr_init_box,
5579 .disable_box = intel_generic_uncore_msr_disable_box,
5580 .enable_box = intel_generic_uncore_msr_enable_box,
5581 .disable_event = spr_uncore_msr_disable_event,
5582 .enable_event = spr_uncore_msr_enable_event,
5583 .read_counter = uncore_msr_read_counter,
5584 .hw_config = spr_cha_hw_config,
5585 .get_constraint = uncore_get_constraint,
5586 .put_constraint = uncore_put_constraint,
5589 static struct attribute *spr_uncore_cha_formats_attr[] = {
5590 &format_attr_event.attr,
5591 &format_attr_umask_ext4.attr,
5592 &format_attr_tid_en2.attr,
5593 &format_attr_edge.attr,
5594 &format_attr_inv.attr,
5595 &format_attr_thresh8.attr,
5596 &format_attr_filter_tid5.attr,
5599 static const struct attribute_group spr_uncore_chabox_format_group = {
5601 .attrs = spr_uncore_cha_formats_attr,
5604 static ssize_t alias_show(struct device *dev,
5605 struct device_attribute *attr,
5608 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
5609 char pmu_name[UNCORE_PMU_NAME_LEN];
5611 uncore_get_alias_name(pmu_name, pmu);
5612 return sysfs_emit(buf, "%s\n", pmu_name);
5615 static DEVICE_ATTR_RO(alias);
5617 static struct attribute *uncore_alias_attrs[] = {
5618 &dev_attr_alias.attr,
5622 ATTRIBUTE_GROUPS(uncore_alias);
5624 static struct intel_uncore_type spr_uncore_chabox = {
5626 .event_mask = SPR_CHA_PMON_EVENT_MASK,
5627 .event_mask_ext = SPR_RAW_EVENT_MASK_EXT,
5628 .num_shared_regs = 1,
5629 .ops = &spr_uncore_chabox_ops,
5630 .format_group = &spr_uncore_chabox_format_group,
5631 .attr_update = uncore_alias_groups,
5634 static struct intel_uncore_type spr_uncore_iio = {
5636 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5637 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5638 .format_group = &snr_uncore_iio_format_group,
5639 .attr_update = uncore_alias_groups,
5642 static struct attribute *spr_uncore_raw_formats_attr[] = {
5643 &format_attr_event.attr,
5644 &format_attr_umask_ext4.attr,
5645 &format_attr_edge.attr,
5646 &format_attr_inv.attr,
5647 &format_attr_thresh8.attr,
5651 static const struct attribute_group spr_uncore_raw_format_group = {
5653 .attrs = spr_uncore_raw_formats_attr,
5656 #define SPR_UNCORE_COMMON_FORMAT() \
5657 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
5658 .event_mask_ext = SPR_RAW_EVENT_MASK_EXT, \
5659 .format_group = &spr_uncore_raw_format_group, \
5660 .attr_update = uncore_alias_groups
5662 static struct intel_uncore_type spr_uncore_irp = {
5663 SPR_UNCORE_COMMON_FORMAT(),
5668 static struct intel_uncore_type spr_uncore_m2pcie = {
5669 SPR_UNCORE_COMMON_FORMAT(),
5673 static struct intel_uncore_type spr_uncore_pcu = {
5675 .attr_update = uncore_alias_groups,
5678 static void spr_uncore_mmio_enable_event(struct intel_uncore_box *box,
5679 struct perf_event *event)
5681 struct hw_perf_event *hwc = &event->hw;
5686 if (uncore_pmc_fixed(hwc->idx))
5687 writel(SNBEP_PMON_CTL_EN, box->io_addr + hwc->config_base);
5689 writel(hwc->config, box->io_addr + hwc->config_base);
5692 static struct intel_uncore_ops spr_uncore_mmio_ops = {
5693 .init_box = intel_generic_uncore_mmio_init_box,
5694 .exit_box = uncore_mmio_exit_box,
5695 .disable_box = intel_generic_uncore_mmio_disable_box,
5696 .enable_box = intel_generic_uncore_mmio_enable_box,
5697 .disable_event = intel_generic_uncore_mmio_disable_event,
5698 .enable_event = spr_uncore_mmio_enable_event,
5699 .read_counter = uncore_mmio_read_counter,
5702 static struct intel_uncore_type spr_uncore_imc = {
5703 SPR_UNCORE_COMMON_FORMAT(),
5705 .fixed_ctr_bits = 48,
5706 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
5707 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
5708 .ops = &spr_uncore_mmio_ops,
5711 static void spr_uncore_pci_enable_event(struct intel_uncore_box *box,
5712 struct perf_event *event)
5714 struct pci_dev *pdev = box->pci_dev;
5715 struct hw_perf_event *hwc = &event->hw;
5717 pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
5718 pci_write_config_dword(pdev, hwc->config_base, (u32)hwc->config);
5721 static struct intel_uncore_ops spr_uncore_pci_ops = {
5722 .init_box = intel_generic_uncore_pci_init_box,
5723 .disable_box = intel_generic_uncore_pci_disable_box,
5724 .enable_box = intel_generic_uncore_pci_enable_box,
5725 .disable_event = intel_generic_uncore_pci_disable_event,
5726 .enable_event = spr_uncore_pci_enable_event,
5727 .read_counter = intel_generic_uncore_pci_read_counter,
5730 #define SPR_UNCORE_PCI_COMMON_FORMAT() \
5731 SPR_UNCORE_COMMON_FORMAT(), \
5732 .ops = &spr_uncore_pci_ops
5734 static struct intel_uncore_type spr_uncore_m2m = {
5735 SPR_UNCORE_PCI_COMMON_FORMAT(),
5739 static struct intel_uncore_type spr_uncore_upi = {
5740 SPR_UNCORE_PCI_COMMON_FORMAT(),
5744 static struct intel_uncore_type spr_uncore_m3upi = {
5745 SPR_UNCORE_PCI_COMMON_FORMAT(),
5749 static struct intel_uncore_type spr_uncore_mdf = {
5750 SPR_UNCORE_COMMON_FORMAT(),
5754 #define UNCORE_SPR_NUM_UNCORE_TYPES 12
5755 #define UNCORE_SPR_IIO 1
5757 static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = {
5772 enum perf_uncore_spr_iio_freerunning_type_id {
5777 SPR_IIO_FREERUNNING_TYPE_MAX,
5780 static struct freerunning_counters spr_iio_freerunning[] = {
5781 [SPR_IIO_MSR_IOCLK] = { 0x340e, 0x1, 0x10, 1, 48 },
5782 [SPR_IIO_MSR_BW_IN] = { 0x3800, 0x1, 0x10, 8, 48 },
5783 [SPR_IIO_MSR_BW_OUT] = { 0x3808, 0x1, 0x10, 8, 48 },
5786 static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
5787 /* Free-Running IIO CLOCKS Counter */
5788 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
5789 /* Free-Running IIO BANDWIDTH IN Counters */
5790 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
5791 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
5792 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
5793 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
5794 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
5795 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
5796 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
5797 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
5798 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
5799 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
5800 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
5801 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
5802 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
5803 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
5804 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
5805 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
5806 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
5807 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
5808 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
5809 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
5810 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
5811 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
5812 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
5813 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
5814 /* Free-Running IIO BANDWIDTH OUT Counters */
5815 INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x30"),
5816 INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"),
5817 INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"),
5818 INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x31"),
5819 INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"),
5820 INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"),
5821 INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x32"),
5822 INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"),
5823 INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"),
5824 INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x33"),
5825 INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"),
5826 INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"),
5827 INTEL_UNCORE_EVENT_DESC(bw_out_port4, "event=0xff,umask=0x34"),
5828 INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale, "3.814697266e-6"),
5829 INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit, "MiB"),
5830 INTEL_UNCORE_EVENT_DESC(bw_out_port5, "event=0xff,umask=0x35"),
5831 INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale, "3.814697266e-6"),
5832 INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit, "MiB"),
5833 INTEL_UNCORE_EVENT_DESC(bw_out_port6, "event=0xff,umask=0x36"),
5834 INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale, "3.814697266e-6"),
5835 INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit, "MiB"),
5836 INTEL_UNCORE_EVENT_DESC(bw_out_port7, "event=0xff,umask=0x37"),
5837 INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale, "3.814697266e-6"),
5838 INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit, "MiB"),
5839 { /* end: all zeroes */ },
5842 static struct intel_uncore_type spr_uncore_iio_free_running = {
5843 .name = "iio_free_running",
5845 .num_freerunning_types = SPR_IIO_FREERUNNING_TYPE_MAX,
5846 .freerunning = spr_iio_freerunning,
5847 .ops = &skx_uncore_iio_freerunning_ops,
5848 .event_descs = spr_uncore_iio_freerunning_events,
5849 .format_group = &skx_uncore_iio_freerunning_format_group,
5852 #define UNCORE_SPR_MSR_EXTRA_UNCORES 1
5854 static struct intel_uncore_type *spr_msr_uncores[UNCORE_SPR_MSR_EXTRA_UNCORES] = {
5855 &spr_uncore_iio_free_running,
5858 static void uncore_type_customized_copy(struct intel_uncore_type *to_type,
5859 struct intel_uncore_type *from_type)
5861 if (!to_type || !from_type)
5864 if (from_type->name)
5865 to_type->name = from_type->name;
5866 if (from_type->fixed_ctr_bits)
5867 to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
5868 if (from_type->event_mask)
5869 to_type->event_mask = from_type->event_mask;
5870 if (from_type->event_mask_ext)
5871 to_type->event_mask_ext = from_type->event_mask_ext;
5872 if (from_type->fixed_ctr)
5873 to_type->fixed_ctr = from_type->fixed_ctr;
5874 if (from_type->fixed_ctl)
5875 to_type->fixed_ctl = from_type->fixed_ctl;
5876 if (from_type->fixed_ctr_bits)
5877 to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
5878 if (from_type->num_shared_regs)
5879 to_type->num_shared_regs = from_type->num_shared_regs;
5880 if (from_type->constraints)
5881 to_type->constraints = from_type->constraints;
5883 to_type->ops = from_type->ops;
5884 if (from_type->event_descs)
5885 to_type->event_descs = from_type->event_descs;
5886 if (from_type->format_group)
5887 to_type->format_group = from_type->format_group;
5888 if (from_type->attr_update)
5889 to_type->attr_update = from_type->attr_update;
5892 static struct intel_uncore_type **
5893 uncore_get_uncores(enum uncore_access_type type_id, int num_extra,
5894 struct intel_uncore_type **extra)
5896 struct intel_uncore_type **types, **start_types;
5899 start_types = types = intel_uncore_generic_init_uncores(type_id, num_extra);
5901 /* Only copy the customized features */
5902 for (; *types; types++) {
5903 if ((*types)->type_id >= UNCORE_SPR_NUM_UNCORE_TYPES)
5905 uncore_type_customized_copy(*types, spr_uncores[(*types)->type_id]);
5908 for (i = 0; i < num_extra; i++, types++)
5914 static struct intel_uncore_type *
5915 uncore_find_type_by_id(struct intel_uncore_type **types, int type_id)
5917 for (; *types; types++) {
5918 if (type_id == (*types)->type_id)
5925 static int uncore_type_max_boxes(struct intel_uncore_type **types,
5928 struct intel_uncore_type *type;
5931 type = uncore_find_type_by_id(types, type_id);
5935 for (i = 0; i < type->num_boxes; i++) {
5936 if (type->box_ids[i] > max)
5937 max = type->box_ids[i];
5943 void spr_uncore_cpu_init(void)
5945 uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
5946 UNCORE_SPR_MSR_EXTRA_UNCORES,
5949 spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
5952 int spr_uncore_pci_init(void)
5954 uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL);
5958 void spr_uncore_mmio_init(void)
5960 uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL);
5963 /* end of SPR uncore support */