1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
5 /* SNB-EP pci bus to socket mapping */
6 #define SNBEP_CPUNODEID 0x40
7 #define SNBEP_GIDNIDMAP 0x54
9 /* SNB-EP Box level control */
10 #define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
11 #define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
12 #define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
13 #define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
14 #define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
15 SNBEP_PMON_BOX_CTL_RST_CTRS | \
16 SNBEP_PMON_BOX_CTL_FRZ_EN)
17 /* SNB-EP event control */
18 #define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
19 #define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
20 #define SNBEP_PMON_CTL_RST (1 << 17)
21 #define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
22 #define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
23 #define SNBEP_PMON_CTL_EN (1 << 22)
24 #define SNBEP_PMON_CTL_INVERT (1 << 23)
25 #define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
26 #define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
27 SNBEP_PMON_CTL_UMASK_MASK | \
28 SNBEP_PMON_CTL_EDGE_DET | \
29 SNBEP_PMON_CTL_INVERT | \
30 SNBEP_PMON_CTL_TRESH_MASK)
32 /* SNB-EP Ubox event control */
33 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
34 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
35 (SNBEP_PMON_CTL_EV_SEL_MASK | \
36 SNBEP_PMON_CTL_UMASK_MASK | \
37 SNBEP_PMON_CTL_EDGE_DET | \
38 SNBEP_PMON_CTL_INVERT | \
39 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
41 #define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
42 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
43 SNBEP_CBO_PMON_CTL_TID_EN)
45 /* SNB-EP PCU event control */
46 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
47 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
50 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
51 (SNBEP_PMON_CTL_EV_SEL_MASK | \
52 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
53 SNBEP_PMON_CTL_EDGE_DET | \
54 SNBEP_PMON_CTL_INVERT | \
55 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
56 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
57 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
59 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
60 (SNBEP_PMON_RAW_EVENT_MASK | \
61 SNBEP_PMON_CTL_EV_SEL_EXT)
63 /* SNB-EP pci control register */
64 #define SNBEP_PCI_PMON_BOX_CTL 0xf4
65 #define SNBEP_PCI_PMON_CTL0 0xd8
66 /* SNB-EP pci counter register */
67 #define SNBEP_PCI_PMON_CTR0 0xa0
69 /* SNB-EP home agent register */
70 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
72 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
73 /* SNB-EP memory controller register */
74 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
76 /* SNB-EP QPI register */
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
82 /* SNB-EP Ubox register */
83 #define SNBEP_U_MSR_PMON_CTR0 0xc16
84 #define SNBEP_U_MSR_PMON_CTL0 0xc10
86 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
89 /* SNB-EP Cbo register */
90 #define SNBEP_C0_MSR_PMON_CTR0 0xd16
91 #define SNBEP_C0_MSR_PMON_CTL0 0xd10
92 #define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
93 #define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
94 #define SNBEP_CBO_MSR_OFFSET 0x20
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
101 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
103 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
104 .config_mask = (m), \
108 /* SNB-EP PCU register */
109 #define SNBEP_PCU_MSR_PMON_CTR0 0xc36
110 #define SNBEP_PCU_MSR_PMON_CTL0 0xc30
111 #define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
112 #define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
114 #define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
115 #define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
117 /* IVBEP event control */
118 #define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
119 SNBEP_PMON_BOX_CTL_RST_CTRS)
120 #define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
121 SNBEP_PMON_CTL_UMASK_MASK | \
122 SNBEP_PMON_CTL_EDGE_DET | \
123 SNBEP_PMON_CTL_TRESH_MASK)
125 #define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00
126 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
127 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
129 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
130 (SNBEP_PMON_CTL_EV_SEL_MASK | \
131 SNBEP_PMON_CTL_UMASK_MASK | \
132 SNBEP_PMON_CTL_EDGE_DET | \
133 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
135 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \
136 SNBEP_CBO_PMON_CTL_TID_EN)
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
147 /* IVBEP home agent */
148 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
149 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \
150 (IVBEP_PMON_RAW_EVENT_MASK | \
151 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
153 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
154 (SNBEP_PMON_CTL_EV_SEL_MASK | \
155 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
156 SNBEP_PMON_CTL_EDGE_DET | \
157 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
158 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
159 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
161 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
162 (IVBEP_PMON_RAW_EVENT_MASK | \
163 SNBEP_PMON_CTL_EV_SEL_EXT)
165 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
166 ((1ULL << (n)) - 1)))
168 /* Haswell-EP Ubox */
169 #define HSWEP_U_MSR_PMON_CTR0 0x709
170 #define HSWEP_U_MSR_PMON_CTL0 0x705
171 #define HSWEP_U_MSR_PMON_FILTER 0x707
173 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0)
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
179 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
180 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
183 #define HSWEP_C0_MSR_PMON_CTR0 0xe08
184 #define HSWEP_C0_MSR_PMON_CTL0 0xe01
185 #define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00
186 #define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05
187 #define HSWEP_CBO_MSR_OFFSET 0x10
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
200 /* Haswell-EP Sbox */
201 #define HSWEP_S0_MSR_PMON_CTR0 0x726
202 #define HSWEP_S0_MSR_PMON_CTL0 0x721
203 #define HSWEP_S0_MSR_PMON_BOX_CTL 0x720
204 #define HSWEP_SBOX_MSR_OFFSET 0xa
205 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
206 SNBEP_CBO_PMON_CTL_TID_EN)
209 #define HSWEP_PCU_MSR_PMON_CTR0 0x717
210 #define HSWEP_PCU_MSR_PMON_CTL0 0x711
211 #define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710
212 #define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715
215 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
216 (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
217 SNBEP_CBO_PMON_CTL_TID_EN)
219 #define KNL_CHA_MSR_OFFSET 0xc
220 #define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16)
221 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
222 (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
223 KNL_CHA_MSR_PMON_CTL_QOR)
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37)
231 /* KNL EDC/MC UCLK */
232 #define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400
233 #define KNL_UCLK_MSR_PMON_CTL0 0x420
234 #define KNL_UCLK_MSR_PMON_BOX_CTL 0x430
235 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454
237 #define KNL_PMON_FIXED_CTL_EN 0x1
240 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00
241 #define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20
242 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30
243 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44
247 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00
248 #define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20
249 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30
250 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44
254 #define KNL_IRP_PCI_PMON_BOX_CTL 0xf0
255 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
256 KNL_CHA_MSR_PMON_CTL_QOR)
258 #define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f
259 #define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7)
260 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000
261 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
262 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
263 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
264 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
265 SNBEP_PMON_CTL_EDGE_DET | \
266 SNBEP_CBO_PMON_CTL_TID_EN | \
267 SNBEP_PMON_CTL_INVERT | \
268 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
269 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
270 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
272 /* SKX pci bus to socket mapping */
273 #define SKX_CPUNODEID 0xc0
274 #define SKX_GIDNIDMAP 0xd4
277 * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
278 * that BIOS programmed. MSR has package scope.
279 * | Bit | Default | Description
280 * | [63] | 00h | VALID - When set, indicates the CPU bus
281 * numbers have been initialized. (RO)
282 * |[62:48]| --- | Reserved
283 * |[47:40]| 00h | BUS_NUM_5 - Return the bus number BIOS assigned
285 * |[39:32]| 00h | BUS_NUM_4 - Return the bus number BIOS assigned
287 * |[31:24]| 00h | BUS_NUM_3 - Return the bus number BIOS assigned
289 * |[23:16]| 00h | BUS_NUM_2 - Return the bus number BIOS assigned
291 * |[15:8] | 00h | BUS_NUM_1 - Return the bus number BIOS assigned
293 * | [7:0] | 00h | BUS_NUM_0 - Return the bus number BIOS assigned
296 #define SKX_MSR_CPU_BUS_NUMBER 0x300
297 #define SKX_MSR_CPU_BUS_VALID_BIT (1ULL << 63)
298 #define BUS_NUM_STRIDE 8
301 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0)
302 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9)
303 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17)
304 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32)
305 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33)
306 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35)
307 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36)
308 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37)
309 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41)
310 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51)
311 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
312 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
313 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
316 #define SKX_IIO0_MSR_PMON_CTL0 0xa48
317 #define SKX_IIO0_MSR_PMON_CTR0 0xa41
318 #define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40
319 #define SKX_IIO_MSR_OFFSET 0x20
321 #define SKX_PMON_CTL_TRESH_MASK (0xff << 24)
322 #define SKX_PMON_CTL_TRESH_MASK_EXT (0xf)
323 #define SKX_PMON_CTL_CH_MASK (0xff << 4)
324 #define SKX_PMON_CTL_FC_MASK (0x7 << 12)
325 #define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
326 SNBEP_PMON_CTL_UMASK_MASK | \
327 SNBEP_PMON_CTL_EDGE_DET | \
328 SNBEP_PMON_CTL_INVERT | \
329 SKX_PMON_CTL_TRESH_MASK)
330 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
331 SKX_PMON_CTL_CH_MASK | \
332 SKX_PMON_CTL_FC_MASK)
335 #define SKX_IRP0_MSR_PMON_CTL0 0xa5b
336 #define SKX_IRP0_MSR_PMON_CTR0 0xa59
337 #define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58
338 #define SKX_IRP_MSR_OFFSET 0x20
341 #define SKX_UPI_PCI_PMON_CTL0 0x350
342 #define SKX_UPI_PCI_PMON_CTR0 0x318
343 #define SKX_UPI_PCI_PMON_BOX_CTL 0x378
344 #define SKX_UPI_CTL_UMASK_EXT 0xffefff
347 #define SKX_M2M_PCI_PMON_CTL0 0x228
348 #define SKX_M2M_PCI_PMON_CTR0 0x200
349 #define SKX_M2M_PCI_PMON_BOX_CTL 0x258
352 #define SNR_U_MSR_PMON_CTR0 0x1f98
353 #define SNR_U_MSR_PMON_CTL0 0x1f91
354 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL 0x1f93
355 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR 0x1f94
358 #define SNR_CHA_RAW_EVENT_MASK_EXT 0x3ffffff
359 #define SNR_CHA_MSR_PMON_CTL0 0x1c01
360 #define SNR_CHA_MSR_PMON_CTR0 0x1c08
361 #define SNR_CHA_MSR_PMON_BOX_CTL 0x1c00
362 #define SNR_C0_MSR_PMON_BOX_FILTER0 0x1c05
366 #define SNR_IIO_MSR_PMON_CTL0 0x1e08
367 #define SNR_IIO_MSR_PMON_CTR0 0x1e01
368 #define SNR_IIO_MSR_PMON_BOX_CTL 0x1e00
369 #define SNR_IIO_MSR_OFFSET 0x10
370 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT 0x7ffff
373 #define SNR_IRP0_MSR_PMON_CTL0 0x1ea8
374 #define SNR_IRP0_MSR_PMON_CTR0 0x1ea1
375 #define SNR_IRP0_MSR_PMON_BOX_CTL 0x1ea0
376 #define SNR_IRP_MSR_OFFSET 0x10
379 #define SNR_M2PCIE_MSR_PMON_CTL0 0x1e58
380 #define SNR_M2PCIE_MSR_PMON_CTR0 0x1e51
381 #define SNR_M2PCIE_MSR_PMON_BOX_CTL 0x1e50
382 #define SNR_M2PCIE_MSR_OFFSET 0x10
385 #define SNR_PCU_MSR_PMON_CTL0 0x1ef1
386 #define SNR_PCU_MSR_PMON_CTR0 0x1ef8
387 #define SNR_PCU_MSR_PMON_BOX_CTL 0x1ef0
388 #define SNR_PCU_MSR_PMON_BOX_FILTER 0x1efc
391 #define SNR_M2M_PCI_PMON_CTL0 0x468
392 #define SNR_M2M_PCI_PMON_CTR0 0x440
393 #define SNR_M2M_PCI_PMON_BOX_CTL 0x438
394 #define SNR_M2M_PCI_PMON_UMASK_EXT 0xff
397 #define SNR_PCIE3_PCI_PMON_CTL0 0x508
398 #define SNR_PCIE3_PCI_PMON_CTR0 0x4e8
399 #define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e0
402 #define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54
403 #define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38
404 #define SNR_IMC_MMIO_PMON_CTL0 0x40
405 #define SNR_IMC_MMIO_PMON_CTR0 0x8
406 #define SNR_IMC_MMIO_PMON_BOX_CTL 0x22800
407 #define SNR_IMC_MMIO_OFFSET 0x4000
408 #define SNR_IMC_MMIO_SIZE 0x4000
409 #define SNR_IMC_MMIO_BASE_OFFSET 0xd0
410 #define SNR_IMC_MMIO_BASE_MASK 0x1FFFFFFF
411 #define SNR_IMC_MMIO_MEM0_OFFSET 0xd8
412 #define SNR_IMC_MMIO_MEM0_MASK 0x7FF
415 #define ICX_C34_MSR_PMON_CTR0 0xb68
416 #define ICX_C34_MSR_PMON_CTL0 0xb61
417 #define ICX_C34_MSR_PMON_BOX_CTL 0xb60
418 #define ICX_C34_MSR_PMON_BOX_FILTER0 0xb65
421 #define ICX_IIO_MSR_PMON_CTL0 0xa58
422 #define ICX_IIO_MSR_PMON_CTR0 0xa51
423 #define ICX_IIO_MSR_PMON_BOX_CTL 0xa50
426 #define ICX_IRP0_MSR_PMON_CTL0 0xa4d
427 #define ICX_IRP0_MSR_PMON_CTR0 0xa4b
428 #define ICX_IRP0_MSR_PMON_BOX_CTL 0xa4a
431 #define ICX_M2PCIE_MSR_PMON_CTL0 0xa46
432 #define ICX_M2PCIE_MSR_PMON_CTR0 0xa41
433 #define ICX_M2PCIE_MSR_PMON_BOX_CTL 0xa40
436 #define ICX_UPI_PCI_PMON_CTL0 0x350
437 #define ICX_UPI_PCI_PMON_CTR0 0x320
438 #define ICX_UPI_PCI_PMON_BOX_CTL 0x318
439 #define ICX_UPI_CTL_UMASK_EXT 0xffffff
442 #define ICX_M3UPI_PCI_PMON_CTL0 0xd8
443 #define ICX_M3UPI_PCI_PMON_CTR0 0xa8
444 #define ICX_M3UPI_PCI_PMON_BOX_CTL 0xa0
447 #define ICX_NUMBER_IMC_CHN 2
448 #define ICX_IMC_MEM_STRIDE 0x4
450 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
451 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
452 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
453 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
454 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
455 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
456 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
457 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
458 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
459 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
460 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
461 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
462 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
463 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
464 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
465 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
466 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
467 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
468 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
469 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
470 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
471 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
472 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
473 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
474 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
475 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
476 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
477 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
478 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
479 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
480 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
481 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
482 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
483 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
484 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
485 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
486 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
487 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
488 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
489 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
490 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
491 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
492 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
493 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
494 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
495 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
496 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
497 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
498 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
499 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
500 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
501 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
502 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
503 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
504 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
505 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
506 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
507 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
508 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
509 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
510 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
511 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
512 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
513 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
514 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
515 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
516 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
517 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
518 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
519 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
520 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
521 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
522 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
523 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
524 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
525 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
526 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
527 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
529 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
531 struct pci_dev *pdev = box->pci_dev;
532 int box_ctl = uncore_pci_box_ctl(box);
535 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
536 config |= SNBEP_PMON_BOX_CTL_FRZ;
537 pci_write_config_dword(pdev, box_ctl, config);
541 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
543 struct pci_dev *pdev = box->pci_dev;
544 int box_ctl = uncore_pci_box_ctl(box);
547 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
548 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
549 pci_write_config_dword(pdev, box_ctl, config);
553 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
555 struct pci_dev *pdev = box->pci_dev;
556 struct hw_perf_event *hwc = &event->hw;
558 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
561 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
563 struct pci_dev *pdev = box->pci_dev;
564 struct hw_perf_event *hwc = &event->hw;
566 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
569 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
571 struct pci_dev *pdev = box->pci_dev;
572 struct hw_perf_event *hwc = &event->hw;
575 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
576 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
581 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
583 struct pci_dev *pdev = box->pci_dev;
584 int box_ctl = uncore_pci_box_ctl(box);
586 pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
589 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
594 msr = uncore_msr_box_ctl(box);
597 config |= SNBEP_PMON_BOX_CTL_FRZ;
602 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
607 msr = uncore_msr_box_ctl(box);
610 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
615 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
617 struct hw_perf_event *hwc = &event->hw;
618 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
620 if (reg1->idx != EXTRA_REG_NONE)
621 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
623 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
626 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
627 struct perf_event *event)
629 struct hw_perf_event *hwc = &event->hw;
631 wrmsrl(hwc->config_base, hwc->config);
634 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
636 unsigned msr = uncore_msr_box_ctl(box);
639 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
642 static struct attribute *snbep_uncore_formats_attr[] = {
643 &format_attr_event.attr,
644 &format_attr_umask.attr,
645 &format_attr_edge.attr,
646 &format_attr_inv.attr,
647 &format_attr_thresh8.attr,
651 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
652 &format_attr_event.attr,
653 &format_attr_umask.attr,
654 &format_attr_edge.attr,
655 &format_attr_inv.attr,
656 &format_attr_thresh5.attr,
660 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
661 &format_attr_event.attr,
662 &format_attr_umask.attr,
663 &format_attr_edge.attr,
664 &format_attr_tid_en.attr,
665 &format_attr_inv.attr,
666 &format_attr_thresh8.attr,
667 &format_attr_filter_tid.attr,
668 &format_attr_filter_nid.attr,
669 &format_attr_filter_state.attr,
670 &format_attr_filter_opc.attr,
674 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
675 &format_attr_event.attr,
676 &format_attr_occ_sel.attr,
677 &format_attr_edge.attr,
678 &format_attr_inv.attr,
679 &format_attr_thresh5.attr,
680 &format_attr_occ_invert.attr,
681 &format_attr_occ_edge.attr,
682 &format_attr_filter_band0.attr,
683 &format_attr_filter_band1.attr,
684 &format_attr_filter_band2.attr,
685 &format_attr_filter_band3.attr,
689 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
690 &format_attr_event_ext.attr,
691 &format_attr_umask.attr,
692 &format_attr_edge.attr,
693 &format_attr_inv.attr,
694 &format_attr_thresh8.attr,
695 &format_attr_match_rds.attr,
696 &format_attr_match_rnid30.attr,
697 &format_attr_match_rnid4.attr,
698 &format_attr_match_dnid.attr,
699 &format_attr_match_mc.attr,
700 &format_attr_match_opc.attr,
701 &format_attr_match_vnw.attr,
702 &format_attr_match0.attr,
703 &format_attr_match1.attr,
704 &format_attr_mask_rds.attr,
705 &format_attr_mask_rnid30.attr,
706 &format_attr_mask_rnid4.attr,
707 &format_attr_mask_dnid.attr,
708 &format_attr_mask_mc.attr,
709 &format_attr_mask_opc.attr,
710 &format_attr_mask_vnw.attr,
711 &format_attr_mask0.attr,
712 &format_attr_mask1.attr,
716 static struct uncore_event_desc snbep_uncore_imc_events[] = {
717 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
718 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
719 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
720 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
721 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
722 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
723 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
724 { /* end: all zeroes */ },
727 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
728 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
729 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
730 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
731 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
732 { /* end: all zeroes */ },
735 static const struct attribute_group snbep_uncore_format_group = {
737 .attrs = snbep_uncore_formats_attr,
740 static const struct attribute_group snbep_uncore_ubox_format_group = {
742 .attrs = snbep_uncore_ubox_formats_attr,
745 static const struct attribute_group snbep_uncore_cbox_format_group = {
747 .attrs = snbep_uncore_cbox_formats_attr,
750 static const struct attribute_group snbep_uncore_pcu_format_group = {
752 .attrs = snbep_uncore_pcu_formats_attr,
755 static const struct attribute_group snbep_uncore_qpi_format_group = {
757 .attrs = snbep_uncore_qpi_formats_attr,
760 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
761 .disable_box = snbep_uncore_msr_disable_box, \
762 .enable_box = snbep_uncore_msr_enable_box, \
763 .disable_event = snbep_uncore_msr_disable_event, \
764 .enable_event = snbep_uncore_msr_enable_event, \
765 .read_counter = uncore_msr_read_counter
767 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
768 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
769 .init_box = snbep_uncore_msr_init_box \
771 static struct intel_uncore_ops snbep_uncore_msr_ops = {
772 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
775 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
776 .init_box = snbep_uncore_pci_init_box, \
777 .disable_box = snbep_uncore_pci_disable_box, \
778 .enable_box = snbep_uncore_pci_enable_box, \
779 .disable_event = snbep_uncore_pci_disable_event, \
780 .read_counter = snbep_uncore_pci_read_counter
782 static struct intel_uncore_ops snbep_uncore_pci_ops = {
783 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
784 .enable_event = snbep_uncore_pci_enable_event, \
787 static struct event_constraint snbep_uncore_cbox_constraints[] = {
788 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
789 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
790 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
791 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
792 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
793 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
794 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
795 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
796 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
797 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
798 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
799 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
800 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
801 UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
802 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
803 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
804 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
805 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
806 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
807 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
808 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
809 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
810 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
811 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
812 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
813 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
817 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
818 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
819 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
820 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
821 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
822 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
823 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
824 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
825 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
826 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
827 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
831 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
832 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
833 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
834 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
835 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
836 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
837 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
838 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
839 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
840 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
841 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
842 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
843 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
844 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
845 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
846 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
847 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
848 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
849 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
850 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
851 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
852 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
853 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
854 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
855 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
856 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
857 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
858 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
859 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
863 static struct intel_uncore_type snbep_uncore_ubox = {
868 .fixed_ctr_bits = 48,
869 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
870 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
871 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
872 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
873 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
874 .ops = &snbep_uncore_msr_ops,
875 .format_group = &snbep_uncore_ubox_format_group,
878 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
879 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
880 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
881 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
882 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
883 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
884 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
885 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
886 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
887 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
888 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
889 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
890 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
891 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
892 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
893 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
894 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
895 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
896 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
897 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
898 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
899 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
900 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
901 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
902 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
903 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
907 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
909 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
910 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
913 if (uncore_box_is_fake(box))
916 for (i = 0; i < 5; i++) {
917 if (reg1->alloc & (0x1 << i))
918 atomic_sub(1 << (i * 6), &er->ref);
923 static struct event_constraint *
924 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
925 u64 (*cbox_filter_mask)(int fields))
927 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
928 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
933 if (reg1->idx == EXTRA_REG_NONE)
936 raw_spin_lock_irqsave(&er->lock, flags);
937 for (i = 0; i < 5; i++) {
938 if (!(reg1->idx & (0x1 << i)))
940 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
943 mask = cbox_filter_mask(0x1 << i);
944 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
945 !((reg1->config ^ er->config) & mask)) {
946 atomic_add(1 << (i * 6), &er->ref);
948 er->config |= reg1->config & mask;
954 raw_spin_unlock_irqrestore(&er->lock, flags);
958 if (!uncore_box_is_fake(box))
959 reg1->alloc |= alloc;
963 for (; i >= 0; i--) {
964 if (alloc & (0x1 << i))
965 atomic_sub(1 << (i * 6), &er->ref);
967 return &uncore_constraint_empty;
970 static u64 snbep_cbox_filter_mask(int fields)
975 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
977 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
979 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
981 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
986 static struct event_constraint *
987 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
989 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
992 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
994 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
995 struct extra_reg *er;
998 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
999 if (er->event != (event->hw.config & er->config_mask))
1005 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1006 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1007 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1013 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1014 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1015 .hw_config = snbep_cbox_hw_config,
1016 .get_constraint = snbep_cbox_get_constraint,
1017 .put_constraint = snbep_cbox_put_constraint,
1020 static struct intel_uncore_type snbep_uncore_cbox = {
1024 .perf_ctr_bits = 44,
1025 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1026 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1027 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1028 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1029 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1030 .num_shared_regs = 1,
1031 .constraints = snbep_uncore_cbox_constraints,
1032 .ops = &snbep_uncore_cbox_ops,
1033 .format_group = &snbep_uncore_cbox_format_group,
1036 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1038 struct hw_perf_event *hwc = &event->hw;
1039 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1040 u64 config = reg1->config;
1042 if (new_idx > reg1->idx)
1043 config <<= 8 * (new_idx - reg1->idx);
1045 config >>= 8 * (reg1->idx - new_idx);
1048 hwc->config += new_idx - reg1->idx;
1049 reg1->config = config;
1050 reg1->idx = new_idx;
1055 static struct event_constraint *
1056 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1058 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1059 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1060 unsigned long flags;
1061 int idx = reg1->idx;
1062 u64 mask, config1 = reg1->config;
1065 if (reg1->idx == EXTRA_REG_NONE ||
1066 (!uncore_box_is_fake(box) && reg1->alloc))
1069 mask = 0xffULL << (idx * 8);
1070 raw_spin_lock_irqsave(&er->lock, flags);
1071 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1072 !((config1 ^ er->config) & mask)) {
1073 atomic_add(1 << (idx * 8), &er->ref);
1074 er->config &= ~mask;
1075 er->config |= config1 & mask;
1078 raw_spin_unlock_irqrestore(&er->lock, flags);
1081 idx = (idx + 1) % 4;
1082 if (idx != reg1->idx) {
1083 config1 = snbep_pcu_alter_er(event, idx, false);
1086 return &uncore_constraint_empty;
1089 if (!uncore_box_is_fake(box)) {
1090 if (idx != reg1->idx)
1091 snbep_pcu_alter_er(event, idx, true);
1097 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1099 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1100 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1102 if (uncore_box_is_fake(box) || !reg1->alloc)
1105 atomic_sub(1 << (reg1->idx * 8), &er->ref);
1109 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1111 struct hw_perf_event *hwc = &event->hw;
1112 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1113 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1115 if (ev_sel >= 0xb && ev_sel <= 0xe) {
1116 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1117 reg1->idx = ev_sel - 0xb;
1118 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1123 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1124 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1125 .hw_config = snbep_pcu_hw_config,
1126 .get_constraint = snbep_pcu_get_constraint,
1127 .put_constraint = snbep_pcu_put_constraint,
1130 static struct intel_uncore_type snbep_uncore_pcu = {
1134 .perf_ctr_bits = 48,
1135 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1136 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1137 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1138 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1139 .num_shared_regs = 1,
1140 .ops = &snbep_uncore_pcu_ops,
1141 .format_group = &snbep_uncore_pcu_format_group,
1144 static struct intel_uncore_type *snbep_msr_uncores[] = {
1151 void snbep_uncore_cpu_init(void)
1153 if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1154 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1155 uncore_msr_uncores = snbep_msr_uncores;
1159 SNBEP_PCI_QPI_PORT0_FILTER,
1160 SNBEP_PCI_QPI_PORT1_FILTER,
1161 BDX_PCI_QPI_PORT2_FILTER,
1165 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1167 struct hw_perf_event *hwc = &event->hw;
1168 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1169 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1171 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1173 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1174 reg1->config = event->attr.config1;
1175 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1176 reg2->config = event->attr.config2;
1181 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1183 struct pci_dev *pdev = box->pci_dev;
1184 struct hw_perf_event *hwc = &event->hw;
1185 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1186 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1188 if (reg1->idx != EXTRA_REG_NONE) {
1189 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1190 int die = box->dieid;
1191 struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1194 pci_write_config_dword(filter_pdev, reg1->reg,
1196 pci_write_config_dword(filter_pdev, reg1->reg + 4,
1197 (u32)(reg1->config >> 32));
1198 pci_write_config_dword(filter_pdev, reg2->reg,
1200 pci_write_config_dword(filter_pdev, reg2->reg + 4,
1201 (u32)(reg2->config >> 32));
1205 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1208 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1209 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1210 .enable_event = snbep_qpi_enable_event,
1211 .hw_config = snbep_qpi_hw_config,
1212 .get_constraint = uncore_get_constraint,
1213 .put_constraint = uncore_put_constraint,
1216 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
1217 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1218 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1219 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
1220 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1221 .ops = &snbep_uncore_pci_ops, \
1222 .format_group = &snbep_uncore_format_group
1224 static struct intel_uncore_type snbep_uncore_ha = {
1228 .perf_ctr_bits = 48,
1229 SNBEP_UNCORE_PCI_COMMON_INIT(),
1232 static struct intel_uncore_type snbep_uncore_imc = {
1236 .perf_ctr_bits = 48,
1237 .fixed_ctr_bits = 48,
1238 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1239 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1240 .event_descs = snbep_uncore_imc_events,
1241 SNBEP_UNCORE_PCI_COMMON_INIT(),
1244 static struct intel_uncore_type snbep_uncore_qpi = {
1248 .perf_ctr_bits = 48,
1249 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1250 .event_ctl = SNBEP_PCI_PMON_CTL0,
1251 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1252 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1253 .num_shared_regs = 1,
1254 .ops = &snbep_uncore_qpi_ops,
1255 .event_descs = snbep_uncore_qpi_events,
1256 .format_group = &snbep_uncore_qpi_format_group,
1260 static struct intel_uncore_type snbep_uncore_r2pcie = {
1264 .perf_ctr_bits = 44,
1265 .constraints = snbep_uncore_r2pcie_constraints,
1266 SNBEP_UNCORE_PCI_COMMON_INIT(),
1269 static struct intel_uncore_type snbep_uncore_r3qpi = {
1273 .perf_ctr_bits = 44,
1274 .constraints = snbep_uncore_r3qpi_constraints,
1275 SNBEP_UNCORE_PCI_COMMON_INIT(),
1279 SNBEP_PCI_UNCORE_HA,
1280 SNBEP_PCI_UNCORE_IMC,
1281 SNBEP_PCI_UNCORE_QPI,
1282 SNBEP_PCI_UNCORE_R2PCIE,
1283 SNBEP_PCI_UNCORE_R3QPI,
1286 static struct intel_uncore_type *snbep_pci_uncores[] = {
1287 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
1288 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
1289 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
1290 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
1291 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
1295 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1297 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1298 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1300 { /* MC Channel 0 */
1301 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1302 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1304 { /* MC Channel 1 */
1305 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1306 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1308 { /* MC Channel 2 */
1309 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1310 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1312 { /* MC Channel 3 */
1313 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1314 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1317 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1318 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1321 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1322 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1325 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1326 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1328 { /* R3QPI Link 0 */
1329 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1330 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1332 { /* R3QPI Link 1 */
1333 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1334 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1336 { /* QPI Port 0 filter */
1337 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1338 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1339 SNBEP_PCI_QPI_PORT0_FILTER),
1341 { /* QPI Port 0 filter */
1342 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1343 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1344 SNBEP_PCI_QPI_PORT1_FILTER),
1346 { /* end: all zeroes */ }
1349 static struct pci_driver snbep_uncore_pci_driver = {
1350 .name = "snbep_uncore",
1351 .id_table = snbep_uncore_pci_ids,
1354 #define NODE_ID_MASK 0x7
1357 * build pci bus to socket mapping
1359 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1361 struct pci_dev *ubox_dev = NULL;
1362 int i, bus, nodeid, segment, die_id;
1363 struct pci2phy_map *map;
1368 /* find the UBOX device */
1369 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1372 bus = ubox_dev->bus->number;
1374 * The nodeid and idmap registers only contain enough
1375 * information to handle 8 nodes. On systems with more
1376 * than 8 nodes, we need to rely on NUMA information,
1377 * filled in from BIOS supplied information, to determine
1380 if (nr_node_ids <= 8) {
1381 /* get the Node ID of the local register */
1382 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1385 nodeid = config & NODE_ID_MASK;
1386 /* get the Node ID mapping */
1387 err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1391 segment = pci_domain_nr(ubox_dev->bus);
1392 raw_spin_lock(&pci2phy_map_lock);
1393 map = __find_pci2phy_map(segment);
1395 raw_spin_unlock(&pci2phy_map_lock);
1401 * every three bits in the Node ID mapping register maps
1402 * to a particular node.
1404 for (i = 0; i < 8; i++) {
1405 if (nodeid == ((config >> (3 * i)) & 0x7)) {
1406 if (topology_max_die_per_package() > 1)
1409 die_id = topology_phys_to_logical_pkg(i);
1410 map->pbus_to_dieid[bus] = die_id;
1414 raw_spin_unlock(&pci2phy_map_lock);
1416 int node = pcibus_to_node(ubox_dev->bus);
1419 segment = pci_domain_nr(ubox_dev->bus);
1420 raw_spin_lock(&pci2phy_map_lock);
1421 map = __find_pci2phy_map(segment);
1423 raw_spin_unlock(&pci2phy_map_lock);
1429 for_each_cpu(cpu, cpumask_of_pcibus(ubox_dev->bus)) {
1430 struct cpuinfo_x86 *c = &cpu_data(cpu);
1432 if (c->initialized && cpu_to_node(cpu) == node) {
1433 map->pbus_to_dieid[bus] = die_id = c->logical_die_id;
1437 raw_spin_unlock(&pci2phy_map_lock);
1439 if (WARN_ON_ONCE(die_id == -1)) {
1448 * For PCI bus with no UBOX device, find the next bus
1449 * that has UBOX device and use its mapping.
1451 raw_spin_lock(&pci2phy_map_lock);
1452 list_for_each_entry(map, &pci2phy_map_head, list) {
1455 for (bus = 255; bus >= 0; bus--) {
1456 if (map->pbus_to_dieid[bus] >= 0)
1457 i = map->pbus_to_dieid[bus];
1459 map->pbus_to_dieid[bus] = i;
1462 for (bus = 0; bus <= 255; bus++) {
1463 if (map->pbus_to_dieid[bus] >= 0)
1464 i = map->pbus_to_dieid[bus];
1466 map->pbus_to_dieid[bus] = i;
1470 raw_spin_unlock(&pci2phy_map_lock);
1473 pci_dev_put(ubox_dev);
1475 return err ? pcibios_err_to_errno(err) : 0;
1478 int snbep_uncore_pci_init(void)
1480 int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1483 uncore_pci_uncores = snbep_pci_uncores;
1484 uncore_pci_driver = &snbep_uncore_pci_driver;
1487 /* end of Sandy Bridge-EP uncore support */
1489 /* IvyTown uncore support */
1490 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1492 unsigned msr = uncore_msr_box_ctl(box);
1494 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1497 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1499 struct pci_dev *pdev = box->pci_dev;
1501 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1504 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
1505 .init_box = ivbep_uncore_msr_init_box, \
1506 .disable_box = snbep_uncore_msr_disable_box, \
1507 .enable_box = snbep_uncore_msr_enable_box, \
1508 .disable_event = snbep_uncore_msr_disable_event, \
1509 .enable_event = snbep_uncore_msr_enable_event, \
1510 .read_counter = uncore_msr_read_counter
1512 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1513 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1516 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1517 .init_box = ivbep_uncore_pci_init_box,
1518 .disable_box = snbep_uncore_pci_disable_box,
1519 .enable_box = snbep_uncore_pci_enable_box,
1520 .disable_event = snbep_uncore_pci_disable_event,
1521 .enable_event = snbep_uncore_pci_enable_event,
1522 .read_counter = snbep_uncore_pci_read_counter,
1525 #define IVBEP_UNCORE_PCI_COMMON_INIT() \
1526 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1527 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1528 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \
1529 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1530 .ops = &ivbep_uncore_pci_ops, \
1531 .format_group = &ivbep_uncore_format_group
1533 static struct attribute *ivbep_uncore_formats_attr[] = {
1534 &format_attr_event.attr,
1535 &format_attr_umask.attr,
1536 &format_attr_edge.attr,
1537 &format_attr_inv.attr,
1538 &format_attr_thresh8.attr,
1542 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1543 &format_attr_event.attr,
1544 &format_attr_umask.attr,
1545 &format_attr_edge.attr,
1546 &format_attr_inv.attr,
1547 &format_attr_thresh5.attr,
1551 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1552 &format_attr_event.attr,
1553 &format_attr_umask.attr,
1554 &format_attr_edge.attr,
1555 &format_attr_tid_en.attr,
1556 &format_attr_thresh8.attr,
1557 &format_attr_filter_tid.attr,
1558 &format_attr_filter_link.attr,
1559 &format_attr_filter_state2.attr,
1560 &format_attr_filter_nid2.attr,
1561 &format_attr_filter_opc2.attr,
1562 &format_attr_filter_nc.attr,
1563 &format_attr_filter_c6.attr,
1564 &format_attr_filter_isoc.attr,
1568 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1569 &format_attr_event.attr,
1570 &format_attr_occ_sel.attr,
1571 &format_attr_edge.attr,
1572 &format_attr_thresh5.attr,
1573 &format_attr_occ_invert.attr,
1574 &format_attr_occ_edge.attr,
1575 &format_attr_filter_band0.attr,
1576 &format_attr_filter_band1.attr,
1577 &format_attr_filter_band2.attr,
1578 &format_attr_filter_band3.attr,
1582 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1583 &format_attr_event_ext.attr,
1584 &format_attr_umask.attr,
1585 &format_attr_edge.attr,
1586 &format_attr_thresh8.attr,
1587 &format_attr_match_rds.attr,
1588 &format_attr_match_rnid30.attr,
1589 &format_attr_match_rnid4.attr,
1590 &format_attr_match_dnid.attr,
1591 &format_attr_match_mc.attr,
1592 &format_attr_match_opc.attr,
1593 &format_attr_match_vnw.attr,
1594 &format_attr_match0.attr,
1595 &format_attr_match1.attr,
1596 &format_attr_mask_rds.attr,
1597 &format_attr_mask_rnid30.attr,
1598 &format_attr_mask_rnid4.attr,
1599 &format_attr_mask_dnid.attr,
1600 &format_attr_mask_mc.attr,
1601 &format_attr_mask_opc.attr,
1602 &format_attr_mask_vnw.attr,
1603 &format_attr_mask0.attr,
1604 &format_attr_mask1.attr,
1608 static const struct attribute_group ivbep_uncore_format_group = {
1610 .attrs = ivbep_uncore_formats_attr,
1613 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1615 .attrs = ivbep_uncore_ubox_formats_attr,
1618 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1620 .attrs = ivbep_uncore_cbox_formats_attr,
1623 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1625 .attrs = ivbep_uncore_pcu_formats_attr,
1628 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1630 .attrs = ivbep_uncore_qpi_formats_attr,
1633 static struct intel_uncore_type ivbep_uncore_ubox = {
1637 .perf_ctr_bits = 44,
1638 .fixed_ctr_bits = 48,
1639 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1640 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1641 .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1642 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1643 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1644 .ops = &ivbep_uncore_msr_ops,
1645 .format_group = &ivbep_uncore_ubox_format_group,
1648 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1649 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1650 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1651 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1652 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1653 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1654 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1655 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1656 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1657 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1658 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1659 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1660 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1661 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1662 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1663 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1664 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1665 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1666 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1667 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1668 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1669 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1670 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1671 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1672 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1673 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1674 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1675 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1676 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1677 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1678 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1679 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1680 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1681 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1682 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1683 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1684 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1685 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1689 static u64 ivbep_cbox_filter_mask(int fields)
1694 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1696 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1698 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1700 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1701 if (fields & 0x10) {
1702 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1703 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1704 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1705 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1711 static struct event_constraint *
1712 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1714 return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1717 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1719 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1720 struct extra_reg *er;
1723 for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1724 if (er->event != (event->hw.config & er->config_mask))
1730 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1731 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1732 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1738 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1740 struct hw_perf_event *hwc = &event->hw;
1741 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1743 if (reg1->idx != EXTRA_REG_NONE) {
1744 u64 filter = uncore_shared_reg_config(box, 0);
1745 wrmsrl(reg1->reg, filter & 0xffffffff);
1746 wrmsrl(reg1->reg + 6, filter >> 32);
1749 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1752 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1753 .init_box = ivbep_uncore_msr_init_box,
1754 .disable_box = snbep_uncore_msr_disable_box,
1755 .enable_box = snbep_uncore_msr_enable_box,
1756 .disable_event = snbep_uncore_msr_disable_event,
1757 .enable_event = ivbep_cbox_enable_event,
1758 .read_counter = uncore_msr_read_counter,
1759 .hw_config = ivbep_cbox_hw_config,
1760 .get_constraint = ivbep_cbox_get_constraint,
1761 .put_constraint = snbep_cbox_put_constraint,
1764 static struct intel_uncore_type ivbep_uncore_cbox = {
1768 .perf_ctr_bits = 44,
1769 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1770 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1771 .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1772 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1773 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1774 .num_shared_regs = 1,
1775 .constraints = snbep_uncore_cbox_constraints,
1776 .ops = &ivbep_uncore_cbox_ops,
1777 .format_group = &ivbep_uncore_cbox_format_group,
1780 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1781 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1782 .hw_config = snbep_pcu_hw_config,
1783 .get_constraint = snbep_pcu_get_constraint,
1784 .put_constraint = snbep_pcu_put_constraint,
1787 static struct intel_uncore_type ivbep_uncore_pcu = {
1791 .perf_ctr_bits = 48,
1792 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1793 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1794 .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1795 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1796 .num_shared_regs = 1,
1797 .ops = &ivbep_uncore_pcu_ops,
1798 .format_group = &ivbep_uncore_pcu_format_group,
1801 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1808 void ivbep_uncore_cpu_init(void)
1810 if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1811 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1812 uncore_msr_uncores = ivbep_msr_uncores;
1815 static struct intel_uncore_type ivbep_uncore_ha = {
1819 .perf_ctr_bits = 48,
1820 IVBEP_UNCORE_PCI_COMMON_INIT(),
1823 static struct intel_uncore_type ivbep_uncore_imc = {
1827 .perf_ctr_bits = 48,
1828 .fixed_ctr_bits = 48,
1829 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1830 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1831 .event_descs = snbep_uncore_imc_events,
1832 IVBEP_UNCORE_PCI_COMMON_INIT(),
1835 /* registers in IRP boxes are not properly aligned */
1836 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1837 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1839 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1841 struct pci_dev *pdev = box->pci_dev;
1842 struct hw_perf_event *hwc = &event->hw;
1844 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1845 hwc->config | SNBEP_PMON_CTL_EN);
1848 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1850 struct pci_dev *pdev = box->pci_dev;
1851 struct hw_perf_event *hwc = &event->hw;
1853 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1856 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1858 struct pci_dev *pdev = box->pci_dev;
1859 struct hw_perf_event *hwc = &event->hw;
1862 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1863 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1868 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1869 .init_box = ivbep_uncore_pci_init_box,
1870 .disable_box = snbep_uncore_pci_disable_box,
1871 .enable_box = snbep_uncore_pci_enable_box,
1872 .disable_event = ivbep_uncore_irp_disable_event,
1873 .enable_event = ivbep_uncore_irp_enable_event,
1874 .read_counter = ivbep_uncore_irp_read_counter,
1877 static struct intel_uncore_type ivbep_uncore_irp = {
1881 .perf_ctr_bits = 48,
1882 .event_mask = IVBEP_PMON_RAW_EVENT_MASK,
1883 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1884 .ops = &ivbep_uncore_irp_ops,
1885 .format_group = &ivbep_uncore_format_group,
1888 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1889 .init_box = ivbep_uncore_pci_init_box,
1890 .disable_box = snbep_uncore_pci_disable_box,
1891 .enable_box = snbep_uncore_pci_enable_box,
1892 .disable_event = snbep_uncore_pci_disable_event,
1893 .enable_event = snbep_qpi_enable_event,
1894 .read_counter = snbep_uncore_pci_read_counter,
1895 .hw_config = snbep_qpi_hw_config,
1896 .get_constraint = uncore_get_constraint,
1897 .put_constraint = uncore_put_constraint,
1900 static struct intel_uncore_type ivbep_uncore_qpi = {
1904 .perf_ctr_bits = 48,
1905 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1906 .event_ctl = SNBEP_PCI_PMON_CTL0,
1907 .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1908 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1909 .num_shared_regs = 1,
1910 .ops = &ivbep_uncore_qpi_ops,
1911 .format_group = &ivbep_uncore_qpi_format_group,
1914 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1918 .perf_ctr_bits = 44,
1919 .constraints = snbep_uncore_r2pcie_constraints,
1920 IVBEP_UNCORE_PCI_COMMON_INIT(),
1923 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1927 .perf_ctr_bits = 44,
1928 .constraints = snbep_uncore_r3qpi_constraints,
1929 IVBEP_UNCORE_PCI_COMMON_INIT(),
1933 IVBEP_PCI_UNCORE_HA,
1934 IVBEP_PCI_UNCORE_IMC,
1935 IVBEP_PCI_UNCORE_IRP,
1936 IVBEP_PCI_UNCORE_QPI,
1937 IVBEP_PCI_UNCORE_R2PCIE,
1938 IVBEP_PCI_UNCORE_R3QPI,
1941 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1942 [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha,
1943 [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc,
1944 [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp,
1945 [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi,
1946 [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie,
1947 [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi,
1951 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1952 { /* Home Agent 0 */
1953 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1954 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1956 { /* Home Agent 1 */
1957 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1958 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1960 { /* MC0 Channel 0 */
1961 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1962 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1964 { /* MC0 Channel 1 */
1965 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1966 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1968 { /* MC0 Channel 3 */
1969 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1970 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1972 { /* MC0 Channel 4 */
1973 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1974 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1976 { /* MC1 Channel 0 */
1977 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1978 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1980 { /* MC1 Channel 1 */
1981 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1982 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1984 { /* MC1 Channel 3 */
1985 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1986 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1988 { /* MC1 Channel 4 */
1989 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1990 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1993 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1994 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1997 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1998 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
2001 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
2002 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
2005 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
2006 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
2009 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
2010 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
2012 { /* R3QPI0 Link 0 */
2013 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
2014 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
2016 { /* R3QPI0 Link 1 */
2017 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
2018 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
2020 { /* R3QPI1 Link 2 */
2021 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
2022 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
2024 { /* QPI Port 0 filter */
2025 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
2026 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2027 SNBEP_PCI_QPI_PORT0_FILTER),
2029 { /* QPI Port 0 filter */
2030 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
2031 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2032 SNBEP_PCI_QPI_PORT1_FILTER),
2034 { /* end: all zeroes */ }
2037 static struct pci_driver ivbep_uncore_pci_driver = {
2038 .name = "ivbep_uncore",
2039 .id_table = ivbep_uncore_pci_ids,
2042 int ivbep_uncore_pci_init(void)
2044 int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2047 uncore_pci_uncores = ivbep_pci_uncores;
2048 uncore_pci_driver = &ivbep_uncore_pci_driver;
2051 /* end of IvyTown uncore support */
2053 /* KNL uncore support */
2054 static struct attribute *knl_uncore_ubox_formats_attr[] = {
2055 &format_attr_event.attr,
2056 &format_attr_umask.attr,
2057 &format_attr_edge.attr,
2058 &format_attr_tid_en.attr,
2059 &format_attr_inv.attr,
2060 &format_attr_thresh5.attr,
2064 static const struct attribute_group knl_uncore_ubox_format_group = {
2066 .attrs = knl_uncore_ubox_formats_attr,
2069 static struct intel_uncore_type knl_uncore_ubox = {
2073 .perf_ctr_bits = 48,
2074 .fixed_ctr_bits = 48,
2075 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2076 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2077 .event_mask = KNL_U_MSR_PMON_RAW_EVENT_MASK,
2078 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2079 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2080 .ops = &snbep_uncore_msr_ops,
2081 .format_group = &knl_uncore_ubox_format_group,
2084 static struct attribute *knl_uncore_cha_formats_attr[] = {
2085 &format_attr_event.attr,
2086 &format_attr_umask.attr,
2087 &format_attr_qor.attr,
2088 &format_attr_edge.attr,
2089 &format_attr_tid_en.attr,
2090 &format_attr_inv.attr,
2091 &format_attr_thresh8.attr,
2092 &format_attr_filter_tid4.attr,
2093 &format_attr_filter_link3.attr,
2094 &format_attr_filter_state4.attr,
2095 &format_attr_filter_local.attr,
2096 &format_attr_filter_all_op.attr,
2097 &format_attr_filter_nnm.attr,
2098 &format_attr_filter_opc3.attr,
2099 &format_attr_filter_nc.attr,
2100 &format_attr_filter_isoc.attr,
2104 static const struct attribute_group knl_uncore_cha_format_group = {
2106 .attrs = knl_uncore_cha_formats_attr,
2109 static struct event_constraint knl_uncore_cha_constraints[] = {
2110 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2111 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2112 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2113 EVENT_CONSTRAINT_END
2116 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2117 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2118 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2119 SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2120 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2121 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2125 static u64 knl_cha_filter_mask(int fields)
2130 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2132 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2134 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2138 static struct event_constraint *
2139 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2141 return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2144 static int knl_cha_hw_config(struct intel_uncore_box *box,
2145 struct perf_event *event)
2147 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2148 struct extra_reg *er;
2151 for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2152 if (er->event != (event->hw.config & er->config_mask))
2158 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2159 KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2160 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2162 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2163 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2164 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2170 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2171 struct perf_event *event);
2173 static struct intel_uncore_ops knl_uncore_cha_ops = {
2174 .init_box = snbep_uncore_msr_init_box,
2175 .disable_box = snbep_uncore_msr_disable_box,
2176 .enable_box = snbep_uncore_msr_enable_box,
2177 .disable_event = snbep_uncore_msr_disable_event,
2178 .enable_event = hswep_cbox_enable_event,
2179 .read_counter = uncore_msr_read_counter,
2180 .hw_config = knl_cha_hw_config,
2181 .get_constraint = knl_cha_get_constraint,
2182 .put_constraint = snbep_cbox_put_constraint,
2185 static struct intel_uncore_type knl_uncore_cha = {
2189 .perf_ctr_bits = 48,
2190 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2191 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2192 .event_mask = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2193 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2194 .msr_offset = KNL_CHA_MSR_OFFSET,
2195 .num_shared_regs = 1,
2196 .constraints = knl_uncore_cha_constraints,
2197 .ops = &knl_uncore_cha_ops,
2198 .format_group = &knl_uncore_cha_format_group,
2201 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2202 &format_attr_event2.attr,
2203 &format_attr_use_occ_ctr.attr,
2204 &format_attr_occ_sel.attr,
2205 &format_attr_edge.attr,
2206 &format_attr_tid_en.attr,
2207 &format_attr_inv.attr,
2208 &format_attr_thresh6.attr,
2209 &format_attr_occ_invert.attr,
2210 &format_attr_occ_edge_det.attr,
2214 static const struct attribute_group knl_uncore_pcu_format_group = {
2216 .attrs = knl_uncore_pcu_formats_attr,
2219 static struct intel_uncore_type knl_uncore_pcu = {
2223 .perf_ctr_bits = 48,
2224 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2225 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2226 .event_mask = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2227 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2228 .ops = &snbep_uncore_msr_ops,
2229 .format_group = &knl_uncore_pcu_format_group,
2232 static struct intel_uncore_type *knl_msr_uncores[] = {
2239 void knl_uncore_cpu_init(void)
2241 uncore_msr_uncores = knl_msr_uncores;
2244 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2246 struct pci_dev *pdev = box->pci_dev;
2247 int box_ctl = uncore_pci_box_ctl(box);
2249 pci_write_config_dword(pdev, box_ctl, 0);
2252 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2253 struct perf_event *event)
2255 struct pci_dev *pdev = box->pci_dev;
2256 struct hw_perf_event *hwc = &event->hw;
2258 if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2259 == UNCORE_FIXED_EVENT)
2260 pci_write_config_dword(pdev, hwc->config_base,
2261 hwc->config | KNL_PMON_FIXED_CTL_EN);
2263 pci_write_config_dword(pdev, hwc->config_base,
2264 hwc->config | SNBEP_PMON_CTL_EN);
2267 static struct intel_uncore_ops knl_uncore_imc_ops = {
2268 .init_box = snbep_uncore_pci_init_box,
2269 .disable_box = snbep_uncore_pci_disable_box,
2270 .enable_box = knl_uncore_imc_enable_box,
2271 .read_counter = snbep_uncore_pci_read_counter,
2272 .enable_event = knl_uncore_imc_enable_event,
2273 .disable_event = snbep_uncore_pci_disable_event,
2276 static struct intel_uncore_type knl_uncore_imc_uclk = {
2280 .perf_ctr_bits = 48,
2281 .fixed_ctr_bits = 48,
2282 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2283 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2284 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2285 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2286 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2287 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2288 .ops = &knl_uncore_imc_ops,
2289 .format_group = &snbep_uncore_format_group,
2292 static struct intel_uncore_type knl_uncore_imc_dclk = {
2296 .perf_ctr_bits = 48,
2297 .fixed_ctr_bits = 48,
2298 .perf_ctr = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2299 .event_ctl = KNL_MC0_CH0_MSR_PMON_CTL0,
2300 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2301 .fixed_ctr = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2302 .fixed_ctl = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2303 .box_ctl = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2304 .ops = &knl_uncore_imc_ops,
2305 .format_group = &snbep_uncore_format_group,
2308 static struct intel_uncore_type knl_uncore_edc_uclk = {
2312 .perf_ctr_bits = 48,
2313 .fixed_ctr_bits = 48,
2314 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2315 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2316 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2317 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2318 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2319 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2320 .ops = &knl_uncore_imc_ops,
2321 .format_group = &snbep_uncore_format_group,
2324 static struct intel_uncore_type knl_uncore_edc_eclk = {
2328 .perf_ctr_bits = 48,
2329 .fixed_ctr_bits = 48,
2330 .perf_ctr = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2331 .event_ctl = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2332 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2333 .fixed_ctr = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2334 .fixed_ctl = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2335 .box_ctl = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2336 .ops = &knl_uncore_imc_ops,
2337 .format_group = &snbep_uncore_format_group,
2340 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2341 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2342 EVENT_CONSTRAINT_END
2345 static struct intel_uncore_type knl_uncore_m2pcie = {
2349 .perf_ctr_bits = 48,
2350 .constraints = knl_uncore_m2pcie_constraints,
2351 SNBEP_UNCORE_PCI_COMMON_INIT(),
2354 static struct attribute *knl_uncore_irp_formats_attr[] = {
2355 &format_attr_event.attr,
2356 &format_attr_umask.attr,
2357 &format_attr_qor.attr,
2358 &format_attr_edge.attr,
2359 &format_attr_inv.attr,
2360 &format_attr_thresh8.attr,
2364 static const struct attribute_group knl_uncore_irp_format_group = {
2366 .attrs = knl_uncore_irp_formats_attr,
2369 static struct intel_uncore_type knl_uncore_irp = {
2373 .perf_ctr_bits = 48,
2374 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2375 .event_ctl = SNBEP_PCI_PMON_CTL0,
2376 .event_mask = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2377 .box_ctl = KNL_IRP_PCI_PMON_BOX_CTL,
2378 .ops = &snbep_uncore_pci_ops,
2379 .format_group = &knl_uncore_irp_format_group,
2383 KNL_PCI_UNCORE_MC_UCLK,
2384 KNL_PCI_UNCORE_MC_DCLK,
2385 KNL_PCI_UNCORE_EDC_UCLK,
2386 KNL_PCI_UNCORE_EDC_ECLK,
2387 KNL_PCI_UNCORE_M2PCIE,
2391 static struct intel_uncore_type *knl_pci_uncores[] = {
2392 [KNL_PCI_UNCORE_MC_UCLK] = &knl_uncore_imc_uclk,
2393 [KNL_PCI_UNCORE_MC_DCLK] = &knl_uncore_imc_dclk,
2394 [KNL_PCI_UNCORE_EDC_UCLK] = &knl_uncore_edc_uclk,
2395 [KNL_PCI_UNCORE_EDC_ECLK] = &knl_uncore_edc_eclk,
2396 [KNL_PCI_UNCORE_M2PCIE] = &knl_uncore_m2pcie,
2397 [KNL_PCI_UNCORE_IRP] = &knl_uncore_irp,
2402 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2403 * device type. prior to KNL, each instance of a PMU device type had a unique
2406 * PCI Device ID Uncore PMU Devices
2407 * ----------------------------------
2408 * 0x7841 MC0 UClk, MC1 UClk
2409 * 0x7843 MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2410 * MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2411 * 0x7833 EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2412 * EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2413 * 0x7835 EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2414 * EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2419 static const struct pci_device_id knl_uncore_pci_ids[] = {
2421 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2422 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2425 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2426 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2428 { /* MC0 DClk CH 0 */
2429 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2430 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2432 { /* MC0 DClk CH 1 */
2433 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2434 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2436 { /* MC0 DClk CH 2 */
2437 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2438 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2440 { /* MC1 DClk CH 0 */
2441 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2442 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2444 { /* MC1 DClk CH 1 */
2445 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2446 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2448 { /* MC1 DClk CH 2 */
2449 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2450 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2453 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2454 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2457 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2458 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2461 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2462 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2465 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2466 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2469 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2470 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2473 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2474 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2477 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2478 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2481 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2482 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2485 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2486 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2489 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2490 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2493 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2494 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2497 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2498 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2501 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2502 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2505 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2506 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2509 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2510 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2513 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2514 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2517 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2518 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2521 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2522 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2524 { /* end: all zeroes */ }
2527 static struct pci_driver knl_uncore_pci_driver = {
2528 .name = "knl_uncore",
2529 .id_table = knl_uncore_pci_ids,
2532 int knl_uncore_pci_init(void)
2536 /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2537 ret = snb_pci2phy_map_init(0x7814); /* IRP */
2540 ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2543 uncore_pci_uncores = knl_pci_uncores;
2544 uncore_pci_driver = &knl_uncore_pci_driver;
2548 /* end of KNL uncore support */
2550 /* Haswell-EP uncore support */
2551 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2552 &format_attr_event.attr,
2553 &format_attr_umask.attr,
2554 &format_attr_edge.attr,
2555 &format_attr_inv.attr,
2556 &format_attr_thresh5.attr,
2557 &format_attr_filter_tid2.attr,
2558 &format_attr_filter_cid.attr,
2562 static const struct attribute_group hswep_uncore_ubox_format_group = {
2564 .attrs = hswep_uncore_ubox_formats_attr,
2567 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2569 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2570 reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2571 reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2576 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2577 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2578 .hw_config = hswep_ubox_hw_config,
2579 .get_constraint = uncore_get_constraint,
2580 .put_constraint = uncore_put_constraint,
2583 static struct intel_uncore_type hswep_uncore_ubox = {
2587 .perf_ctr_bits = 44,
2588 .fixed_ctr_bits = 48,
2589 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2590 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2591 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2592 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2593 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2594 .num_shared_regs = 1,
2595 .ops = &hswep_uncore_ubox_ops,
2596 .format_group = &hswep_uncore_ubox_format_group,
2599 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2600 &format_attr_event.attr,
2601 &format_attr_umask.attr,
2602 &format_attr_edge.attr,
2603 &format_attr_tid_en.attr,
2604 &format_attr_thresh8.attr,
2605 &format_attr_filter_tid3.attr,
2606 &format_attr_filter_link2.attr,
2607 &format_attr_filter_state3.attr,
2608 &format_attr_filter_nid2.attr,
2609 &format_attr_filter_opc2.attr,
2610 &format_attr_filter_nc.attr,
2611 &format_attr_filter_c6.attr,
2612 &format_attr_filter_isoc.attr,
2616 static const struct attribute_group hswep_uncore_cbox_format_group = {
2618 .attrs = hswep_uncore_cbox_formats_attr,
2621 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2622 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2623 UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2624 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2625 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2626 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2627 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2628 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2629 EVENT_CONSTRAINT_END
2632 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2633 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2634 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2635 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2636 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2637 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2638 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2639 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2640 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2641 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2642 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2643 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2644 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2645 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2646 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2647 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2648 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2649 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2650 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2651 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2652 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2653 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2654 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2655 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2656 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2657 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2658 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2659 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2660 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2661 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2662 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2663 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2664 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2665 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2666 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2667 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2668 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2669 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2670 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2674 static u64 hswep_cbox_filter_mask(int fields)
2678 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2680 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2682 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2684 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2685 if (fields & 0x10) {
2686 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2687 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2688 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2689 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2694 static struct event_constraint *
2695 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2697 return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2700 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2702 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2703 struct extra_reg *er;
2706 for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2707 if (er->event != (event->hw.config & er->config_mask))
2713 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2714 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2715 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2721 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2722 struct perf_event *event)
2724 struct hw_perf_event *hwc = &event->hw;
2725 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2727 if (reg1->idx != EXTRA_REG_NONE) {
2728 u64 filter = uncore_shared_reg_config(box, 0);
2729 wrmsrl(reg1->reg, filter & 0xffffffff);
2730 wrmsrl(reg1->reg + 1, filter >> 32);
2733 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2736 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2737 .init_box = snbep_uncore_msr_init_box,
2738 .disable_box = snbep_uncore_msr_disable_box,
2739 .enable_box = snbep_uncore_msr_enable_box,
2740 .disable_event = snbep_uncore_msr_disable_event,
2741 .enable_event = hswep_cbox_enable_event,
2742 .read_counter = uncore_msr_read_counter,
2743 .hw_config = hswep_cbox_hw_config,
2744 .get_constraint = hswep_cbox_get_constraint,
2745 .put_constraint = snbep_cbox_put_constraint,
2748 static struct intel_uncore_type hswep_uncore_cbox = {
2752 .perf_ctr_bits = 48,
2753 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2754 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2755 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2756 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2757 .msr_offset = HSWEP_CBO_MSR_OFFSET,
2758 .num_shared_regs = 1,
2759 .constraints = hswep_uncore_cbox_constraints,
2760 .ops = &hswep_uncore_cbox_ops,
2761 .format_group = &hswep_uncore_cbox_format_group,
2765 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2767 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2769 unsigned msr = uncore_msr_box_ctl(box);
2772 u64 init = SNBEP_PMON_BOX_CTL_INT;
2776 for_each_set_bit(i, (unsigned long *)&init, 64) {
2777 flags |= (1ULL << i);
2783 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2784 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2785 .init_box = hswep_uncore_sbox_msr_init_box
2788 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2789 &format_attr_event.attr,
2790 &format_attr_umask.attr,
2791 &format_attr_edge.attr,
2792 &format_attr_tid_en.attr,
2793 &format_attr_inv.attr,
2794 &format_attr_thresh8.attr,
2798 static const struct attribute_group hswep_uncore_sbox_format_group = {
2800 .attrs = hswep_uncore_sbox_formats_attr,
2803 static struct intel_uncore_type hswep_uncore_sbox = {
2807 .perf_ctr_bits = 44,
2808 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
2809 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
2810 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2811 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
2812 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
2813 .ops = &hswep_uncore_sbox_msr_ops,
2814 .format_group = &hswep_uncore_sbox_format_group,
2817 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2819 struct hw_perf_event *hwc = &event->hw;
2820 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2821 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2823 if (ev_sel >= 0xb && ev_sel <= 0xe) {
2824 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2825 reg1->idx = ev_sel - 0xb;
2826 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2831 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2832 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2833 .hw_config = hswep_pcu_hw_config,
2834 .get_constraint = snbep_pcu_get_constraint,
2835 .put_constraint = snbep_pcu_put_constraint,
2838 static struct intel_uncore_type hswep_uncore_pcu = {
2842 .perf_ctr_bits = 48,
2843 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2844 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2845 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2846 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2847 .num_shared_regs = 1,
2848 .ops = &hswep_uncore_pcu_ops,
2849 .format_group = &snbep_uncore_pcu_format_group,
2852 static struct intel_uncore_type *hswep_msr_uncores[] = {
2860 void hswep_uncore_cpu_init(void)
2862 int pkg = boot_cpu_data.logical_proc_id;
2864 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2865 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2867 /* Detect 6-8 core systems with only two SBOXes */
2868 if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
2871 pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
2873 if (((capid4 >> 6) & 0x3) == 0)
2874 hswep_uncore_sbox.num_boxes = 2;
2877 uncore_msr_uncores = hswep_msr_uncores;
2880 static struct intel_uncore_type hswep_uncore_ha = {
2884 .perf_ctr_bits = 48,
2885 SNBEP_UNCORE_PCI_COMMON_INIT(),
2888 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2889 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
2890 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
2891 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2892 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2893 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2894 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2895 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2896 { /* end: all zeroes */ },
2899 static struct intel_uncore_type hswep_uncore_imc = {
2903 .perf_ctr_bits = 48,
2904 .fixed_ctr_bits = 48,
2905 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2906 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2907 .event_descs = hswep_uncore_imc_events,
2908 SNBEP_UNCORE_PCI_COMMON_INIT(),
2911 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2913 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2915 struct pci_dev *pdev = box->pci_dev;
2916 struct hw_perf_event *hwc = &event->hw;
2919 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2920 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2925 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2926 .init_box = snbep_uncore_pci_init_box,
2927 .disable_box = snbep_uncore_pci_disable_box,
2928 .enable_box = snbep_uncore_pci_enable_box,
2929 .disable_event = ivbep_uncore_irp_disable_event,
2930 .enable_event = ivbep_uncore_irp_enable_event,
2931 .read_counter = hswep_uncore_irp_read_counter,
2934 static struct intel_uncore_type hswep_uncore_irp = {
2938 .perf_ctr_bits = 48,
2939 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2940 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2941 .ops = &hswep_uncore_irp_ops,
2942 .format_group = &snbep_uncore_format_group,
2945 static struct intel_uncore_type hswep_uncore_qpi = {
2949 .perf_ctr_bits = 48,
2950 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2951 .event_ctl = SNBEP_PCI_PMON_CTL0,
2952 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2953 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2954 .num_shared_regs = 1,
2955 .ops = &snbep_uncore_qpi_ops,
2956 .format_group = &snbep_uncore_qpi_format_group,
2959 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2960 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2961 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2962 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2963 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2964 UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2965 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2966 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2967 UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2968 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2969 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2970 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2971 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2972 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2973 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2974 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2975 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2976 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2977 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2978 EVENT_CONSTRAINT_END
2981 static struct intel_uncore_type hswep_uncore_r2pcie = {
2985 .perf_ctr_bits = 48,
2986 .constraints = hswep_uncore_r2pcie_constraints,
2987 SNBEP_UNCORE_PCI_COMMON_INIT(),
2990 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2991 UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2992 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2993 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2994 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2995 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2996 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2997 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2998 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2999 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
3000 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3001 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3002 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3003 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3004 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3005 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3006 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3007 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3008 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3009 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3010 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3011 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3012 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3013 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3014 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3015 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3016 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
3017 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3018 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3019 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3020 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3021 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3022 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3023 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3024 EVENT_CONSTRAINT_END
3027 static struct intel_uncore_type hswep_uncore_r3qpi = {
3031 .perf_ctr_bits = 44,
3032 .constraints = hswep_uncore_r3qpi_constraints,
3033 SNBEP_UNCORE_PCI_COMMON_INIT(),
3037 HSWEP_PCI_UNCORE_HA,
3038 HSWEP_PCI_UNCORE_IMC,
3039 HSWEP_PCI_UNCORE_IRP,
3040 HSWEP_PCI_UNCORE_QPI,
3041 HSWEP_PCI_UNCORE_R2PCIE,
3042 HSWEP_PCI_UNCORE_R3QPI,
3045 static struct intel_uncore_type *hswep_pci_uncores[] = {
3046 [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha,
3047 [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc,
3048 [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp,
3049 [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi,
3050 [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie,
3051 [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi,
3055 static const struct pci_device_id hswep_uncore_pci_ids[] = {
3056 { /* Home Agent 0 */
3057 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3058 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3060 { /* Home Agent 1 */
3061 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3062 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3064 { /* MC0 Channel 0 */
3065 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3066 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3068 { /* MC0 Channel 1 */
3069 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3070 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3072 { /* MC0 Channel 2 */
3073 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3074 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3076 { /* MC0 Channel 3 */
3077 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3078 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3080 { /* MC1 Channel 0 */
3081 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3082 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3084 { /* MC1 Channel 1 */
3085 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3086 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3088 { /* MC1 Channel 2 */
3089 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3090 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3092 { /* MC1 Channel 3 */
3093 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3094 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3097 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3098 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3101 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3102 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3105 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3106 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3109 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3110 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3113 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3114 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3116 { /* R3QPI0 Link 0 */
3117 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3118 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3120 { /* R3QPI0 Link 1 */
3121 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3122 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3124 { /* R3QPI1 Link 2 */
3125 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3126 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3128 { /* QPI Port 0 filter */
3129 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3130 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3131 SNBEP_PCI_QPI_PORT0_FILTER),
3133 { /* QPI Port 1 filter */
3134 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3135 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3136 SNBEP_PCI_QPI_PORT1_FILTER),
3138 { /* PCU.3 (for Capability registers) */
3139 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
3140 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3143 { /* end: all zeroes */ }
3146 static struct pci_driver hswep_uncore_pci_driver = {
3147 .name = "hswep_uncore",
3148 .id_table = hswep_uncore_pci_ids,
3151 int hswep_uncore_pci_init(void)
3153 int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3156 uncore_pci_uncores = hswep_pci_uncores;
3157 uncore_pci_driver = &hswep_uncore_pci_driver;
3160 /* end of Haswell-EP uncore support */
3162 /* BDX uncore support */
3164 static struct intel_uncore_type bdx_uncore_ubox = {
3168 .perf_ctr_bits = 48,
3169 .fixed_ctr_bits = 48,
3170 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3171 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3172 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3173 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3174 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3175 .num_shared_regs = 1,
3176 .ops = &ivbep_uncore_msr_ops,
3177 .format_group = &ivbep_uncore_ubox_format_group,
3180 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3181 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3182 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3183 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3184 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3185 EVENT_CONSTRAINT_END
3188 static struct intel_uncore_type bdx_uncore_cbox = {
3192 .perf_ctr_bits = 48,
3193 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3194 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3195 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3196 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3197 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3198 .num_shared_regs = 1,
3199 .constraints = bdx_uncore_cbox_constraints,
3200 .ops = &hswep_uncore_cbox_ops,
3201 .format_group = &hswep_uncore_cbox_format_group,
3204 static struct intel_uncore_type bdx_uncore_sbox = {
3208 .perf_ctr_bits = 48,
3209 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
3210 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
3211 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3212 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
3213 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
3214 .ops = &hswep_uncore_sbox_msr_ops,
3215 .format_group = &hswep_uncore_sbox_format_group,
3218 #define BDX_MSR_UNCORE_SBOX 3
3220 static struct intel_uncore_type *bdx_msr_uncores[] = {
3228 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3229 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3230 EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3231 EVENT_CONSTRAINT_END
3234 void bdx_uncore_cpu_init(void)
3236 int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
3238 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3239 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3240 uncore_msr_uncores = bdx_msr_uncores;
3242 /* BDX-DE doesn't have SBOX */
3243 if (boot_cpu_data.x86_model == 86) {
3244 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3245 /* Detect systems with no SBOXes */
3246 } else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
3247 struct pci_dev *pdev;
3250 pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3];
3251 pci_read_config_dword(pdev, 0x94, &capid4);
3252 if (((capid4 >> 6) & 0x3) == 0)
3253 bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3255 hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3258 static struct intel_uncore_type bdx_uncore_ha = {
3262 .perf_ctr_bits = 48,
3263 SNBEP_UNCORE_PCI_COMMON_INIT(),
3266 static struct intel_uncore_type bdx_uncore_imc = {
3270 .perf_ctr_bits = 48,
3271 .fixed_ctr_bits = 48,
3272 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3273 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3274 .event_descs = hswep_uncore_imc_events,
3275 SNBEP_UNCORE_PCI_COMMON_INIT(),
3278 static struct intel_uncore_type bdx_uncore_irp = {
3282 .perf_ctr_bits = 48,
3283 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3284 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3285 .ops = &hswep_uncore_irp_ops,
3286 .format_group = &snbep_uncore_format_group,
3289 static struct intel_uncore_type bdx_uncore_qpi = {
3293 .perf_ctr_bits = 48,
3294 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3295 .event_ctl = SNBEP_PCI_PMON_CTL0,
3296 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3297 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3298 .num_shared_regs = 1,
3299 .ops = &snbep_uncore_qpi_ops,
3300 .format_group = &snbep_uncore_qpi_format_group,
3303 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3304 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3305 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3306 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3307 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3308 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3309 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3310 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3311 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3312 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3313 EVENT_CONSTRAINT_END
3316 static struct intel_uncore_type bdx_uncore_r2pcie = {
3320 .perf_ctr_bits = 48,
3321 .constraints = bdx_uncore_r2pcie_constraints,
3322 SNBEP_UNCORE_PCI_COMMON_INIT(),
3325 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3326 UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3327 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3328 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3329 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3330 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3331 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3332 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3333 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3334 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3335 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3336 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3337 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3338 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3339 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3340 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3341 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3342 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3343 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3344 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3345 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3346 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3347 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3348 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3349 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3350 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3351 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3352 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3353 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3354 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3355 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3356 EVENT_CONSTRAINT_END
3359 static struct intel_uncore_type bdx_uncore_r3qpi = {
3363 .perf_ctr_bits = 48,
3364 .constraints = bdx_uncore_r3qpi_constraints,
3365 SNBEP_UNCORE_PCI_COMMON_INIT(),
3373 BDX_PCI_UNCORE_R2PCIE,
3374 BDX_PCI_UNCORE_R3QPI,
3377 static struct intel_uncore_type *bdx_pci_uncores[] = {
3378 [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha,
3379 [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc,
3380 [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp,
3381 [BDX_PCI_UNCORE_QPI] = &bdx_uncore_qpi,
3382 [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3383 [BDX_PCI_UNCORE_R3QPI] = &bdx_uncore_r3qpi,
3387 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3388 { /* Home Agent 0 */
3389 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3390 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3392 { /* Home Agent 1 */
3393 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3394 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3396 { /* MC0 Channel 0 */
3397 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3398 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3400 { /* MC0 Channel 1 */
3401 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3402 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3404 { /* MC0 Channel 2 */
3405 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3406 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3408 { /* MC0 Channel 3 */
3409 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3410 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3412 { /* MC1 Channel 0 */
3413 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3414 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3416 { /* MC1 Channel 1 */
3417 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3418 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3420 { /* MC1 Channel 2 */
3421 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3422 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3424 { /* MC1 Channel 3 */
3425 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3426 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3429 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3430 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3433 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3434 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3437 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3438 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3441 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3442 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3445 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3446 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3448 { /* R3QPI0 Link 0 */
3449 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3450 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3452 { /* R3QPI0 Link 1 */
3453 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3454 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3456 { /* R3QPI1 Link 2 */
3457 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3458 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3460 { /* QPI Port 0 filter */
3461 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3462 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3463 SNBEP_PCI_QPI_PORT0_FILTER),
3465 { /* QPI Port 1 filter */
3466 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3467 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3468 SNBEP_PCI_QPI_PORT1_FILTER),
3470 { /* QPI Port 2 filter */
3471 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3472 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3473 BDX_PCI_QPI_PORT2_FILTER),
3475 { /* PCU.3 (for Capability registers) */
3476 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
3477 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3480 { /* end: all zeroes */ }
3483 static struct pci_driver bdx_uncore_pci_driver = {
3484 .name = "bdx_uncore",
3485 .id_table = bdx_uncore_pci_ids,
3488 int bdx_uncore_pci_init(void)
3490 int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3494 uncore_pci_uncores = bdx_pci_uncores;
3495 uncore_pci_driver = &bdx_uncore_pci_driver;
3499 /* end of BDX uncore support */
3501 /* SKX uncore support */
3503 static struct intel_uncore_type skx_uncore_ubox = {
3507 .perf_ctr_bits = 48,
3508 .fixed_ctr_bits = 48,
3509 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3510 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3511 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3512 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3513 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3514 .ops = &ivbep_uncore_msr_ops,
3515 .format_group = &ivbep_uncore_ubox_format_group,
3518 static struct attribute *skx_uncore_cha_formats_attr[] = {
3519 &format_attr_event.attr,
3520 &format_attr_umask.attr,
3521 &format_attr_edge.attr,
3522 &format_attr_tid_en.attr,
3523 &format_attr_inv.attr,
3524 &format_attr_thresh8.attr,
3525 &format_attr_filter_tid4.attr,
3526 &format_attr_filter_state5.attr,
3527 &format_attr_filter_rem.attr,
3528 &format_attr_filter_loc.attr,
3529 &format_attr_filter_nm.attr,
3530 &format_attr_filter_all_op.attr,
3531 &format_attr_filter_not_nm.attr,
3532 &format_attr_filter_opc_0.attr,
3533 &format_attr_filter_opc_1.attr,
3534 &format_attr_filter_nc.attr,
3535 &format_attr_filter_isoc.attr,
3539 static const struct attribute_group skx_uncore_chabox_format_group = {
3541 .attrs = skx_uncore_cha_formats_attr,
3544 static struct event_constraint skx_uncore_chabox_constraints[] = {
3545 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3546 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3547 EVENT_CONSTRAINT_END
3550 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3551 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3552 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3553 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3554 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3555 SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3556 SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3557 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3558 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3559 SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3563 static u64 skx_cha_filter_mask(int fields)
3568 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3570 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3572 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3574 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3575 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3576 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3577 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3578 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3579 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3580 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3581 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3582 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3587 static struct event_constraint *
3588 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3590 return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3593 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3595 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3596 struct extra_reg *er;
3599 for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3600 if (er->event != (event->hw.config & er->config_mask))
3606 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3607 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3608 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3614 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3615 /* There is no frz_en for chabox ctl */
3616 .init_box = ivbep_uncore_msr_init_box,
3617 .disable_box = snbep_uncore_msr_disable_box,
3618 .enable_box = snbep_uncore_msr_enable_box,
3619 .disable_event = snbep_uncore_msr_disable_event,
3620 .enable_event = hswep_cbox_enable_event,
3621 .read_counter = uncore_msr_read_counter,
3622 .hw_config = skx_cha_hw_config,
3623 .get_constraint = skx_cha_get_constraint,
3624 .put_constraint = snbep_cbox_put_constraint,
3627 static struct intel_uncore_type skx_uncore_chabox = {
3630 .perf_ctr_bits = 48,
3631 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3632 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3633 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3634 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3635 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3636 .num_shared_regs = 1,
3637 .constraints = skx_uncore_chabox_constraints,
3638 .ops = &skx_uncore_chabox_ops,
3639 .format_group = &skx_uncore_chabox_format_group,
3642 static struct attribute *skx_uncore_iio_formats_attr[] = {
3643 &format_attr_event.attr,
3644 &format_attr_umask.attr,
3645 &format_attr_edge.attr,
3646 &format_attr_inv.attr,
3647 &format_attr_thresh9.attr,
3648 &format_attr_ch_mask.attr,
3649 &format_attr_fc_mask.attr,
3653 static const struct attribute_group skx_uncore_iio_format_group = {
3655 .attrs = skx_uncore_iio_formats_attr,
3658 static struct event_constraint skx_uncore_iio_constraints[] = {
3659 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3660 UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3661 UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3662 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3663 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3664 UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3665 EVENT_CONSTRAINT_END
3668 static void skx_iio_enable_event(struct intel_uncore_box *box,
3669 struct perf_event *event)
3671 struct hw_perf_event *hwc = &event->hw;
3673 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3676 static struct intel_uncore_ops skx_uncore_iio_ops = {
3677 .init_box = ivbep_uncore_msr_init_box,
3678 .disable_box = snbep_uncore_msr_disable_box,
3679 .enable_box = snbep_uncore_msr_enable_box,
3680 .disable_event = snbep_uncore_msr_disable_event,
3681 .enable_event = skx_iio_enable_event,
3682 .read_counter = uncore_msr_read_counter,
3685 static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
3687 return pmu->type->topology[die] >> (pmu->pmu_idx * BUS_NUM_STRIDE);
3691 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3693 struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3695 /* Root bus 0x00 is valid only for die 0 AND pmu_idx = 0. */
3696 return (!skx_iio_stack(pmu, die) && pmu->pmu_idx) ? 0 : attr->mode;
3699 static ssize_t skx_iio_mapping_show(struct device *dev,
3700 struct device_attribute *attr, char *buf)
3702 struct pci_bus *bus = pci_find_next_bus(NULL);
3703 struct intel_uncore_pmu *uncore_pmu = dev_to_uncore_pmu(dev);
3704 struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3705 long die = (long)ea->var;
3708 * Current implementation is for single segment configuration hence it's
3709 * safe to take the segment value from the first available root bus.
3711 return sprintf(buf, "%04x:%02x\n", pci_domain_nr(bus),
3712 skx_iio_stack(uncore_pmu, die));
3715 static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3719 if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3720 !(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3723 *topology = msr_value;
3728 static int die_to_cpu(int die)
3730 int res = 0, cpu, current_die;
3732 * Using cpus_read_lock() to ensure cpu is not going down between
3733 * looking at cpu_online_mask.
3736 for_each_online_cpu(cpu) {
3737 current_die = topology_logical_die_id(cpu);
3738 if (current_die == die) {
3747 static int skx_iio_get_topology(struct intel_uncore_type *type)
3750 struct pci_bus *bus = NULL;
3753 * Verified single-segment environments only; disabled for multiple
3754 * segment topologies for now except VMD domains.
3755 * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
3757 while ((bus = pci_find_next_bus(bus))
3758 && (!pci_domain_nr(bus) || pci_domain_nr(bus) > 0xffff))
3763 type->topology = kcalloc(uncore_max_dies(), sizeof(u64), GFP_KERNEL);
3764 if (!type->topology)
3767 for (i = 0; i < uncore_max_dies(); i++) {
3768 ret = skx_msr_cpu_bus_read(die_to_cpu(i), &type->topology[i]);
3770 kfree(type->topology);
3771 type->topology = NULL;
3779 static struct attribute_group skx_iio_mapping_group = {
3780 .is_visible = skx_iio_mapping_visible,
3783 static const struct attribute_group *skx_iio_attr_update[] = {
3784 &skx_iio_mapping_group,
3788 static int skx_iio_set_mapping(struct intel_uncore_type *type)
3793 struct attribute **attrs = NULL;
3794 struct dev_ext_attribute *eas = NULL;
3796 ret = skx_iio_get_topology(type);
3798 goto clear_attr_update;
3802 /* One more for NULL. */
3803 attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3807 eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3811 for (die = 0; die < uncore_max_dies(); die++) {
3812 sprintf(buf, "die%ld", die);
3813 sysfs_attr_init(&eas[die].attr.attr);
3814 eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3815 if (!eas[die].attr.attr.name)
3817 eas[die].attr.attr.mode = 0444;
3818 eas[die].attr.show = skx_iio_mapping_show;
3819 eas[die].attr.store = NULL;
3820 eas[die].var = (void *)die;
3821 attrs[die] = &eas[die].attr.attr;
3823 skx_iio_mapping_group.attrs = attrs;
3827 for (; die >= 0; die--)
3828 kfree(eas[die].attr.attr.name);
3831 kfree(type->topology);
3833 type->attr_update = NULL;
3837 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
3839 struct attribute **attr = skx_iio_mapping_group.attrs;
3844 for (; *attr; attr++)
3845 kfree((*attr)->name);
3846 kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs));
3847 kfree(skx_iio_mapping_group.attrs);
3848 skx_iio_mapping_group.attrs = NULL;
3849 kfree(type->topology);
3852 static struct intel_uncore_type skx_uncore_iio = {
3856 .perf_ctr_bits = 48,
3857 .event_ctl = SKX_IIO0_MSR_PMON_CTL0,
3858 .perf_ctr = SKX_IIO0_MSR_PMON_CTR0,
3859 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
3860 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3861 .box_ctl = SKX_IIO0_MSR_PMON_BOX_CTL,
3862 .msr_offset = SKX_IIO_MSR_OFFSET,
3863 .constraints = skx_uncore_iio_constraints,
3864 .ops = &skx_uncore_iio_ops,
3865 .format_group = &skx_uncore_iio_format_group,
3866 .attr_update = skx_iio_attr_update,
3867 .set_mapping = skx_iio_set_mapping,
3868 .cleanup_mapping = skx_iio_cleanup_mapping,
3871 enum perf_uncore_iio_freerunning_type_id {
3872 SKX_IIO_MSR_IOCLK = 0,
3874 SKX_IIO_MSR_UTIL = 2,
3876 SKX_IIO_FREERUNNING_TYPE_MAX,
3880 static struct freerunning_counters skx_iio_freerunning[] = {
3881 [SKX_IIO_MSR_IOCLK] = { 0xa45, 0x1, 0x20, 1, 36 },
3882 [SKX_IIO_MSR_BW] = { 0xb00, 0x1, 0x10, 8, 36 },
3883 [SKX_IIO_MSR_UTIL] = { 0xb08, 0x1, 0x10, 8, 36 },
3886 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3887 /* Free-Running IO CLOCKS Counter */
3888 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
3889 /* Free-Running IIO BANDWIDTH Counters */
3890 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
3891 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
3892 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
3893 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
3894 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
3895 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
3896 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
3897 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
3898 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
3899 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
3900 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
3901 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
3902 INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x24"),
3903 INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"),
3904 INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"),
3905 INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x25"),
3906 INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"),
3907 INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"),
3908 INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x26"),
3909 INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"),
3910 INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"),
3911 INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x27"),
3912 INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"),
3913 INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"),
3914 /* Free-running IIO UTILIZATION Counters */
3915 INTEL_UNCORE_EVENT_DESC(util_in_port0, "event=0xff,umask=0x30"),
3916 INTEL_UNCORE_EVENT_DESC(util_out_port0, "event=0xff,umask=0x31"),
3917 INTEL_UNCORE_EVENT_DESC(util_in_port1, "event=0xff,umask=0x32"),
3918 INTEL_UNCORE_EVENT_DESC(util_out_port1, "event=0xff,umask=0x33"),
3919 INTEL_UNCORE_EVENT_DESC(util_in_port2, "event=0xff,umask=0x34"),
3920 INTEL_UNCORE_EVENT_DESC(util_out_port2, "event=0xff,umask=0x35"),
3921 INTEL_UNCORE_EVENT_DESC(util_in_port3, "event=0xff,umask=0x36"),
3922 INTEL_UNCORE_EVENT_DESC(util_out_port3, "event=0xff,umask=0x37"),
3923 { /* end: all zeroes */ },
3926 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3927 .read_counter = uncore_msr_read_counter,
3928 .hw_config = uncore_freerunning_hw_config,
3931 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3932 &format_attr_event.attr,
3933 &format_attr_umask.attr,
3937 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3939 .attrs = skx_uncore_iio_freerunning_formats_attr,
3942 static struct intel_uncore_type skx_uncore_iio_free_running = {
3943 .name = "iio_free_running",
3946 .num_freerunning_types = SKX_IIO_FREERUNNING_TYPE_MAX,
3947 .freerunning = skx_iio_freerunning,
3948 .ops = &skx_uncore_iio_freerunning_ops,
3949 .event_descs = skx_uncore_iio_freerunning_events,
3950 .format_group = &skx_uncore_iio_freerunning_format_group,
3953 static struct attribute *skx_uncore_formats_attr[] = {
3954 &format_attr_event.attr,
3955 &format_attr_umask.attr,
3956 &format_attr_edge.attr,
3957 &format_attr_inv.attr,
3958 &format_attr_thresh8.attr,
3962 static const struct attribute_group skx_uncore_format_group = {
3964 .attrs = skx_uncore_formats_attr,
3967 static struct intel_uncore_type skx_uncore_irp = {
3971 .perf_ctr_bits = 48,
3972 .event_ctl = SKX_IRP0_MSR_PMON_CTL0,
3973 .perf_ctr = SKX_IRP0_MSR_PMON_CTR0,
3974 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3975 .box_ctl = SKX_IRP0_MSR_PMON_BOX_CTL,
3976 .msr_offset = SKX_IRP_MSR_OFFSET,
3977 .ops = &skx_uncore_iio_ops,
3978 .format_group = &skx_uncore_format_group,
3981 static struct attribute *skx_uncore_pcu_formats_attr[] = {
3982 &format_attr_event.attr,
3983 &format_attr_umask.attr,
3984 &format_attr_edge.attr,
3985 &format_attr_inv.attr,
3986 &format_attr_thresh8.attr,
3987 &format_attr_occ_invert.attr,
3988 &format_attr_occ_edge_det.attr,
3989 &format_attr_filter_band0.attr,
3990 &format_attr_filter_band1.attr,
3991 &format_attr_filter_band2.attr,
3992 &format_attr_filter_band3.attr,
3996 static struct attribute_group skx_uncore_pcu_format_group = {
3998 .attrs = skx_uncore_pcu_formats_attr,
4001 static struct intel_uncore_ops skx_uncore_pcu_ops = {
4002 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4003 .hw_config = hswep_pcu_hw_config,
4004 .get_constraint = snbep_pcu_get_constraint,
4005 .put_constraint = snbep_pcu_put_constraint,
4008 static struct intel_uncore_type skx_uncore_pcu = {
4012 .perf_ctr_bits = 48,
4013 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
4014 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
4015 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
4016 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
4017 .num_shared_regs = 1,
4018 .ops = &skx_uncore_pcu_ops,
4019 .format_group = &skx_uncore_pcu_format_group,
4022 static struct intel_uncore_type *skx_msr_uncores[] = {
4026 &skx_uncore_iio_free_running,
4033 * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
4034 * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
4036 #define SKX_CAPID6 0x9c
4037 #define SKX_CHA_BIT_MASK GENMASK(27, 0)
4039 static int skx_count_chabox(void)
4041 struct pci_dev *dev = NULL;
4044 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4048 pci_read_config_dword(dev, SKX_CAPID6, &val);
4049 val &= SKX_CHA_BIT_MASK;
4052 return hweight32(val);
4055 void skx_uncore_cpu_init(void)
4057 skx_uncore_chabox.num_boxes = skx_count_chabox();
4058 uncore_msr_uncores = skx_msr_uncores;
4061 static struct intel_uncore_type skx_uncore_imc = {
4065 .perf_ctr_bits = 48,
4066 .fixed_ctr_bits = 48,
4067 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4068 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4069 .event_descs = hswep_uncore_imc_events,
4070 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4071 .event_ctl = SNBEP_PCI_PMON_CTL0,
4072 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4073 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4074 .ops = &ivbep_uncore_pci_ops,
4075 .format_group = &skx_uncore_format_group,
4078 static struct attribute *skx_upi_uncore_formats_attr[] = {
4079 &format_attr_event.attr,
4080 &format_attr_umask_ext.attr,
4081 &format_attr_edge.attr,
4082 &format_attr_inv.attr,
4083 &format_attr_thresh8.attr,
4087 static const struct attribute_group skx_upi_uncore_format_group = {
4089 .attrs = skx_upi_uncore_formats_attr,
4092 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4094 struct pci_dev *pdev = box->pci_dev;
4096 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4097 pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4100 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4101 .init_box = skx_upi_uncore_pci_init_box,
4102 .disable_box = snbep_uncore_pci_disable_box,
4103 .enable_box = snbep_uncore_pci_enable_box,
4104 .disable_event = snbep_uncore_pci_disable_event,
4105 .enable_event = snbep_uncore_pci_enable_event,
4106 .read_counter = snbep_uncore_pci_read_counter,
4109 static struct intel_uncore_type skx_uncore_upi = {
4113 .perf_ctr_bits = 48,
4114 .perf_ctr = SKX_UPI_PCI_PMON_CTR0,
4115 .event_ctl = SKX_UPI_PCI_PMON_CTL0,
4116 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4117 .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4118 .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL,
4119 .ops = &skx_upi_uncore_pci_ops,
4120 .format_group = &skx_upi_uncore_format_group,
4123 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4125 struct pci_dev *pdev = box->pci_dev;
4127 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4128 pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4131 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4132 .init_box = skx_m2m_uncore_pci_init_box,
4133 .disable_box = snbep_uncore_pci_disable_box,
4134 .enable_box = snbep_uncore_pci_enable_box,
4135 .disable_event = snbep_uncore_pci_disable_event,
4136 .enable_event = snbep_uncore_pci_enable_event,
4137 .read_counter = snbep_uncore_pci_read_counter,
4140 static struct intel_uncore_type skx_uncore_m2m = {
4144 .perf_ctr_bits = 48,
4145 .perf_ctr = SKX_M2M_PCI_PMON_CTR0,
4146 .event_ctl = SKX_M2M_PCI_PMON_CTL0,
4147 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4148 .box_ctl = SKX_M2M_PCI_PMON_BOX_CTL,
4149 .ops = &skx_m2m_uncore_pci_ops,
4150 .format_group = &skx_uncore_format_group,
4153 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4154 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4155 EVENT_CONSTRAINT_END
4158 static struct intel_uncore_type skx_uncore_m2pcie = {
4162 .perf_ctr_bits = 48,
4163 .constraints = skx_uncore_m2pcie_constraints,
4164 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4165 .event_ctl = SNBEP_PCI_PMON_CTL0,
4166 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4167 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4168 .ops = &ivbep_uncore_pci_ops,
4169 .format_group = &skx_uncore_format_group,
4172 static struct event_constraint skx_uncore_m3upi_constraints[] = {
4173 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4174 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4175 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4176 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4177 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4178 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4179 UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
4180 UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
4181 EVENT_CONSTRAINT_END
4184 static struct intel_uncore_type skx_uncore_m3upi = {
4188 .perf_ctr_bits = 48,
4189 .constraints = skx_uncore_m3upi_constraints,
4190 .perf_ctr = SNBEP_PCI_PMON_CTR0,
4191 .event_ctl = SNBEP_PCI_PMON_CTL0,
4192 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4193 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
4194 .ops = &ivbep_uncore_pci_ops,
4195 .format_group = &skx_uncore_format_group,
4202 SKX_PCI_UNCORE_M2PCIE,
4203 SKX_PCI_UNCORE_M3UPI,
4206 static struct intel_uncore_type *skx_pci_uncores[] = {
4207 [SKX_PCI_UNCORE_IMC] = &skx_uncore_imc,
4208 [SKX_PCI_UNCORE_M2M] = &skx_uncore_m2m,
4209 [SKX_PCI_UNCORE_UPI] = &skx_uncore_upi,
4210 [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
4211 [SKX_PCI_UNCORE_M3UPI] = &skx_uncore_m3upi,
4215 static const struct pci_device_id skx_uncore_pci_ids[] = {
4216 { /* MC0 Channel 0 */
4217 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4218 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4220 { /* MC0 Channel 1 */
4221 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4222 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4224 { /* MC0 Channel 2 */
4225 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4226 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4228 { /* MC1 Channel 0 */
4229 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4230 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4232 { /* MC1 Channel 1 */
4233 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4234 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4236 { /* MC1 Channel 2 */
4237 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4238 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4241 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4242 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4245 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4246 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4249 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4250 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4253 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4254 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4257 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4258 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4261 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4262 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4265 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4266 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4269 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4270 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4273 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4274 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4276 { /* M3UPI0 Link 0 */
4277 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4278 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4280 { /* M3UPI0 Link 1 */
4281 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4282 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4284 { /* M3UPI1 Link 2 */
4285 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4286 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4288 { /* end: all zeroes */ }
4292 static struct pci_driver skx_uncore_pci_driver = {
4293 .name = "skx_uncore",
4294 .id_table = skx_uncore_pci_ids,
4297 int skx_uncore_pci_init(void)
4299 /* need to double check pci address */
4300 int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4305 uncore_pci_uncores = skx_pci_uncores;
4306 uncore_pci_driver = &skx_uncore_pci_driver;
4310 /* end of SKX uncore support */
4312 /* SNR uncore support */
4314 static struct intel_uncore_type snr_uncore_ubox = {
4318 .perf_ctr_bits = 48,
4319 .fixed_ctr_bits = 48,
4320 .perf_ctr = SNR_U_MSR_PMON_CTR0,
4321 .event_ctl = SNR_U_MSR_PMON_CTL0,
4322 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4323 .fixed_ctr = SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4324 .fixed_ctl = SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4325 .ops = &ivbep_uncore_msr_ops,
4326 .format_group = &ivbep_uncore_format_group,
4329 static struct attribute *snr_uncore_cha_formats_attr[] = {
4330 &format_attr_event.attr,
4331 &format_attr_umask_ext2.attr,
4332 &format_attr_edge.attr,
4333 &format_attr_tid_en.attr,
4334 &format_attr_inv.attr,
4335 &format_attr_thresh8.attr,
4336 &format_attr_filter_tid5.attr,
4339 static const struct attribute_group snr_uncore_chabox_format_group = {
4341 .attrs = snr_uncore_cha_formats_attr,
4344 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4346 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4348 reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4349 box->pmu->type->msr_offset * box->pmu->pmu_idx;
4350 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4356 static void snr_cha_enable_event(struct intel_uncore_box *box,
4357 struct perf_event *event)
4359 struct hw_perf_event *hwc = &event->hw;
4360 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4362 if (reg1->idx != EXTRA_REG_NONE)
4363 wrmsrl(reg1->reg, reg1->config);
4365 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4368 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4369 .init_box = ivbep_uncore_msr_init_box,
4370 .disable_box = snbep_uncore_msr_disable_box,
4371 .enable_box = snbep_uncore_msr_enable_box,
4372 .disable_event = snbep_uncore_msr_disable_event,
4373 .enable_event = snr_cha_enable_event,
4374 .read_counter = uncore_msr_read_counter,
4375 .hw_config = snr_cha_hw_config,
4378 static struct intel_uncore_type snr_uncore_chabox = {
4382 .perf_ctr_bits = 48,
4383 .event_ctl = SNR_CHA_MSR_PMON_CTL0,
4384 .perf_ctr = SNR_CHA_MSR_PMON_CTR0,
4385 .box_ctl = SNR_CHA_MSR_PMON_BOX_CTL,
4386 .msr_offset = HSWEP_CBO_MSR_OFFSET,
4387 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4388 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
4389 .ops = &snr_uncore_chabox_ops,
4390 .format_group = &snr_uncore_chabox_format_group,
4393 static struct attribute *snr_uncore_iio_formats_attr[] = {
4394 &format_attr_event.attr,
4395 &format_attr_umask.attr,
4396 &format_attr_edge.attr,
4397 &format_attr_inv.attr,
4398 &format_attr_thresh9.attr,
4399 &format_attr_ch_mask2.attr,
4400 &format_attr_fc_mask2.attr,
4404 static const struct attribute_group snr_uncore_iio_format_group = {
4406 .attrs = snr_uncore_iio_formats_attr,
4409 static struct intel_uncore_type snr_uncore_iio = {
4413 .perf_ctr_bits = 48,
4414 .event_ctl = SNR_IIO_MSR_PMON_CTL0,
4415 .perf_ctr = SNR_IIO_MSR_PMON_CTR0,
4416 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4417 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4418 .box_ctl = SNR_IIO_MSR_PMON_BOX_CTL,
4419 .msr_offset = SNR_IIO_MSR_OFFSET,
4420 .ops = &ivbep_uncore_msr_ops,
4421 .format_group = &snr_uncore_iio_format_group,
4424 static struct intel_uncore_type snr_uncore_irp = {
4428 .perf_ctr_bits = 48,
4429 .event_ctl = SNR_IRP0_MSR_PMON_CTL0,
4430 .perf_ctr = SNR_IRP0_MSR_PMON_CTR0,
4431 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4432 .box_ctl = SNR_IRP0_MSR_PMON_BOX_CTL,
4433 .msr_offset = SNR_IRP_MSR_OFFSET,
4434 .ops = &ivbep_uncore_msr_ops,
4435 .format_group = &ivbep_uncore_format_group,
4438 static struct intel_uncore_type snr_uncore_m2pcie = {
4442 .perf_ctr_bits = 48,
4443 .event_ctl = SNR_M2PCIE_MSR_PMON_CTL0,
4444 .perf_ctr = SNR_M2PCIE_MSR_PMON_CTR0,
4445 .box_ctl = SNR_M2PCIE_MSR_PMON_BOX_CTL,
4446 .msr_offset = SNR_M2PCIE_MSR_OFFSET,
4447 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4448 .ops = &ivbep_uncore_msr_ops,
4449 .format_group = &ivbep_uncore_format_group,
4452 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4454 struct hw_perf_event *hwc = &event->hw;
4455 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4456 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4458 if (ev_sel >= 0xb && ev_sel <= 0xe) {
4459 reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4460 reg1->idx = ev_sel - 0xb;
4461 reg1->config = event->attr.config1 & (0xff << reg1->idx);
4466 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4467 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4468 .hw_config = snr_pcu_hw_config,
4469 .get_constraint = snbep_pcu_get_constraint,
4470 .put_constraint = snbep_pcu_put_constraint,
4473 static struct intel_uncore_type snr_uncore_pcu = {
4477 .perf_ctr_bits = 48,
4478 .perf_ctr = SNR_PCU_MSR_PMON_CTR0,
4479 .event_ctl = SNR_PCU_MSR_PMON_CTL0,
4480 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4481 .box_ctl = SNR_PCU_MSR_PMON_BOX_CTL,
4482 .num_shared_regs = 1,
4483 .ops = &snr_uncore_pcu_ops,
4484 .format_group = &skx_uncore_pcu_format_group,
4487 enum perf_uncore_snr_iio_freerunning_type_id {
4491 SNR_IIO_FREERUNNING_TYPE_MAX,
4494 static struct freerunning_counters snr_iio_freerunning[] = {
4495 [SNR_IIO_MSR_IOCLK] = { 0x1eac, 0x1, 0x10, 1, 48 },
4496 [SNR_IIO_MSR_BW_IN] = { 0x1f00, 0x1, 0x10, 8, 48 },
4499 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4500 /* Free-Running IIO CLOCKS Counter */
4501 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
4502 /* Free-Running IIO BANDWIDTH IN Counters */
4503 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
4504 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
4505 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
4506 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
4507 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
4508 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
4509 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
4510 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
4511 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
4512 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
4513 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
4514 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
4515 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
4516 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
4517 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
4518 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
4519 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
4520 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
4521 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
4522 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
4523 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
4524 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
4525 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
4526 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
4527 { /* end: all zeroes */ },
4530 static struct intel_uncore_type snr_uncore_iio_free_running = {
4531 .name = "iio_free_running",
4534 .num_freerunning_types = SNR_IIO_FREERUNNING_TYPE_MAX,
4535 .freerunning = snr_iio_freerunning,
4536 .ops = &skx_uncore_iio_freerunning_ops,
4537 .event_descs = snr_uncore_iio_freerunning_events,
4538 .format_group = &skx_uncore_iio_freerunning_format_group,
4541 static struct intel_uncore_type *snr_msr_uncores[] = {
4548 &snr_uncore_iio_free_running,
4552 void snr_uncore_cpu_init(void)
4554 uncore_msr_uncores = snr_msr_uncores;
4557 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4559 struct pci_dev *pdev = box->pci_dev;
4560 int box_ctl = uncore_pci_box_ctl(box);
4562 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4563 pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4566 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4567 .init_box = snr_m2m_uncore_pci_init_box,
4568 .disable_box = snbep_uncore_pci_disable_box,
4569 .enable_box = snbep_uncore_pci_enable_box,
4570 .disable_event = snbep_uncore_pci_disable_event,
4571 .enable_event = snbep_uncore_pci_enable_event,
4572 .read_counter = snbep_uncore_pci_read_counter,
4575 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4576 &format_attr_event.attr,
4577 &format_attr_umask_ext3.attr,
4578 &format_attr_edge.attr,
4579 &format_attr_inv.attr,
4580 &format_attr_thresh8.attr,
4584 static const struct attribute_group snr_m2m_uncore_format_group = {
4586 .attrs = snr_m2m_uncore_formats_attr,
4589 static struct intel_uncore_type snr_uncore_m2m = {
4593 .perf_ctr_bits = 48,
4594 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
4595 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
4596 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4597 .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
4598 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
4599 .ops = &snr_m2m_uncore_pci_ops,
4600 .format_group = &snr_m2m_uncore_format_group,
4603 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4605 struct pci_dev *pdev = box->pci_dev;
4606 struct hw_perf_event *hwc = &event->hw;
4608 pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4609 pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4612 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
4613 .init_box = snr_m2m_uncore_pci_init_box,
4614 .disable_box = snbep_uncore_pci_disable_box,
4615 .enable_box = snbep_uncore_pci_enable_box,
4616 .disable_event = snbep_uncore_pci_disable_event,
4617 .enable_event = snr_uncore_pci_enable_event,
4618 .read_counter = snbep_uncore_pci_read_counter,
4621 static struct intel_uncore_type snr_uncore_pcie3 = {
4625 .perf_ctr_bits = 48,
4626 .perf_ctr = SNR_PCIE3_PCI_PMON_CTR0,
4627 .event_ctl = SNR_PCIE3_PCI_PMON_CTL0,
4628 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
4629 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4630 .box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL,
4631 .ops = &snr_pcie3_uncore_pci_ops,
4632 .format_group = &skx_uncore_iio_format_group,
4637 SNR_PCI_UNCORE_PCIE3,
4640 static struct intel_uncore_type *snr_pci_uncores[] = {
4641 [SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m,
4642 [SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3,
4646 static const struct pci_device_id snr_uncore_pci_ids[] = {
4648 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4649 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4651 { /* end: all zeroes */ }
4654 static struct pci_driver snr_uncore_pci_driver = {
4655 .name = "snr_uncore",
4656 .id_table = snr_uncore_pci_ids,
4659 static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
4661 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
4662 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
4664 { /* end: all zeroes */ }
4667 static struct pci_driver snr_uncore_pci_sub_driver = {
4668 .name = "snr_uncore_sub",
4669 .id_table = snr_uncore_pci_sub_ids,
4672 int snr_uncore_pci_init(void)
4675 int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4676 SKX_GIDNIDMAP, true);
4681 uncore_pci_uncores = snr_pci_uncores;
4682 uncore_pci_driver = &snr_uncore_pci_driver;
4683 uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
4687 static struct pci_dev *snr_uncore_get_mc_dev(int id)
4689 struct pci_dev *mc_dev = NULL;
4693 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
4696 pkg = uncore_pcibus_to_dieid(mc_dev->bus);
4703 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
4704 unsigned int box_ctl, int mem_offset)
4706 struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
4707 struct intel_uncore_type *type = box->pmu->type;
4708 resource_size_t addr;
4714 pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4715 addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4717 pci_read_config_dword(pdev, mem_offset, &pci_dword);
4718 addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4722 box->io_addr = ioremap(addr, type->mmio_map_size);
4723 if (!box->io_addr) {
4724 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
4728 writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4731 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4733 __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
4734 SNR_IMC_MMIO_MEM0_OFFSET);
4737 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4744 config = readl(box->io_addr);
4745 config |= SNBEP_PMON_BOX_CTL_FRZ;
4746 writel(config, box->io_addr);
4749 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4756 config = readl(box->io_addr);
4757 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4758 writel(config, box->io_addr);
4761 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4762 struct perf_event *event)
4764 struct hw_perf_event *hwc = &event->hw;
4769 if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4772 writel(hwc->config | SNBEP_PMON_CTL_EN,
4773 box->io_addr + hwc->config_base);
4776 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4777 struct perf_event *event)
4779 struct hw_perf_event *hwc = &event->hw;
4784 if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4787 writel(hwc->config, box->io_addr + hwc->config_base);
4790 static struct intel_uncore_ops snr_uncore_mmio_ops = {
4791 .init_box = snr_uncore_mmio_init_box,
4792 .exit_box = uncore_mmio_exit_box,
4793 .disable_box = snr_uncore_mmio_disable_box,
4794 .enable_box = snr_uncore_mmio_enable_box,
4795 .disable_event = snr_uncore_mmio_disable_event,
4796 .enable_event = snr_uncore_mmio_enable_event,
4797 .read_counter = uncore_mmio_read_counter,
4800 static struct uncore_event_desc snr_uncore_imc_events[] = {
4801 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
4802 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x0f"),
4803 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4804 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4805 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4806 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4807 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4808 { /* end: all zeroes */ },
4811 static struct intel_uncore_type snr_uncore_imc = {
4815 .perf_ctr_bits = 48,
4816 .fixed_ctr_bits = 48,
4817 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
4818 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
4819 .event_descs = snr_uncore_imc_events,
4820 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
4821 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
4822 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4823 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
4824 .mmio_offset = SNR_IMC_MMIO_OFFSET,
4825 .mmio_map_size = SNR_IMC_MMIO_SIZE,
4826 .ops = &snr_uncore_mmio_ops,
4827 .format_group = &skx_uncore_format_group,
4830 enum perf_uncore_snr_imc_freerunning_type_id {
4834 SNR_IMC_FREERUNNING_TYPE_MAX,
4837 static struct freerunning_counters snr_imc_freerunning[] = {
4838 [SNR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
4839 [SNR_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
4842 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4843 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
4845 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
4846 INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
4847 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
4848 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
4849 INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
4850 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
4851 { /* end: all zeroes */ },
4854 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
4855 .init_box = snr_uncore_mmio_init_box,
4856 .exit_box = uncore_mmio_exit_box,
4857 .read_counter = uncore_mmio_read_counter,
4858 .hw_config = uncore_freerunning_hw_config,
4861 static struct intel_uncore_type snr_uncore_imc_free_running = {
4862 .name = "imc_free_running",
4865 .num_freerunning_types = SNR_IMC_FREERUNNING_TYPE_MAX,
4866 .mmio_map_size = SNR_IMC_MMIO_SIZE,
4867 .freerunning = snr_imc_freerunning,
4868 .ops = &snr_uncore_imc_freerunning_ops,
4869 .event_descs = snr_uncore_imc_freerunning_events,
4870 .format_group = &skx_uncore_iio_freerunning_format_group,
4873 static struct intel_uncore_type *snr_mmio_uncores[] = {
4875 &snr_uncore_imc_free_running,
4879 void snr_uncore_mmio_init(void)
4881 uncore_mmio_uncores = snr_mmio_uncores;
4884 /* end of SNR uncore support */
4886 /* ICX uncore support */
4888 static unsigned icx_cha_msr_offsets[] = {
4889 0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
4890 0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
4891 0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
4892 0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0, 0xe,
4893 0x1c, 0x2a, 0x38, 0x46,
4896 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4898 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4899 bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
4902 reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
4903 icx_cha_msr_offsets[box->pmu->pmu_idx];
4904 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4911 static struct intel_uncore_ops icx_uncore_chabox_ops = {
4912 .init_box = ivbep_uncore_msr_init_box,
4913 .disable_box = snbep_uncore_msr_disable_box,
4914 .enable_box = snbep_uncore_msr_enable_box,
4915 .disable_event = snbep_uncore_msr_disable_event,
4916 .enable_event = snr_cha_enable_event,
4917 .read_counter = uncore_msr_read_counter,
4918 .hw_config = icx_cha_hw_config,
4921 static struct intel_uncore_type icx_uncore_chabox = {
4924 .perf_ctr_bits = 48,
4925 .event_ctl = ICX_C34_MSR_PMON_CTL0,
4926 .perf_ctr = ICX_C34_MSR_PMON_CTR0,
4927 .box_ctl = ICX_C34_MSR_PMON_BOX_CTL,
4928 .msr_offsets = icx_cha_msr_offsets,
4929 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4930 .event_mask_ext = SNR_CHA_RAW_EVENT_MASK_EXT,
4931 .constraints = skx_uncore_chabox_constraints,
4932 .ops = &icx_uncore_chabox_ops,
4933 .format_group = &snr_uncore_chabox_format_group,
4936 static unsigned icx_msr_offsets[] = {
4937 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
4940 static struct event_constraint icx_uncore_iio_constraints[] = {
4941 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
4942 UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
4943 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
4944 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
4945 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
4946 EVENT_CONSTRAINT_END
4949 static struct intel_uncore_type icx_uncore_iio = {
4953 .perf_ctr_bits = 48,
4954 .event_ctl = ICX_IIO_MSR_PMON_CTL0,
4955 .perf_ctr = ICX_IIO_MSR_PMON_CTR0,
4956 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4957 .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4958 .box_ctl = ICX_IIO_MSR_PMON_BOX_CTL,
4959 .msr_offsets = icx_msr_offsets,
4960 .constraints = icx_uncore_iio_constraints,
4961 .ops = &skx_uncore_iio_ops,
4962 .format_group = &snr_uncore_iio_format_group,
4965 static struct intel_uncore_type icx_uncore_irp = {
4969 .perf_ctr_bits = 48,
4970 .event_ctl = ICX_IRP0_MSR_PMON_CTL0,
4971 .perf_ctr = ICX_IRP0_MSR_PMON_CTR0,
4972 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4973 .box_ctl = ICX_IRP0_MSR_PMON_BOX_CTL,
4974 .msr_offsets = icx_msr_offsets,
4975 .ops = &ivbep_uncore_msr_ops,
4976 .format_group = &ivbep_uncore_format_group,
4979 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
4980 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
4981 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4982 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
4983 EVENT_CONSTRAINT_END
4986 static struct intel_uncore_type icx_uncore_m2pcie = {
4990 .perf_ctr_bits = 48,
4991 .event_ctl = ICX_M2PCIE_MSR_PMON_CTL0,
4992 .perf_ctr = ICX_M2PCIE_MSR_PMON_CTR0,
4993 .box_ctl = ICX_M2PCIE_MSR_PMON_BOX_CTL,
4994 .msr_offsets = icx_msr_offsets,
4995 .constraints = icx_uncore_m2pcie_constraints,
4996 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
4997 .ops = &ivbep_uncore_msr_ops,
4998 .format_group = &ivbep_uncore_format_group,
5001 enum perf_uncore_icx_iio_freerunning_type_id {
5005 ICX_IIO_FREERUNNING_TYPE_MAX,
5008 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
5009 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5012 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
5013 0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
5016 static struct freerunning_counters icx_iio_freerunning[] = {
5017 [ICX_IIO_MSR_IOCLK] = { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5018 [ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5021 static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
5022 /* Free-Running IIO CLOCKS Counter */
5023 INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
5024 /* Free-Running IIO BANDWIDTH IN Counters */
5025 INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
5026 INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
5027 INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
5028 INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
5029 INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
5030 INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
5031 INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
5032 INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
5033 INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
5034 INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
5035 INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
5036 INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
5037 INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
5038 INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
5039 INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
5040 INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
5041 INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
5042 INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
5043 INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
5044 INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
5045 INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
5046 INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
5047 INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
5048 INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
5049 { /* end: all zeroes */ },
5052 static struct intel_uncore_type icx_uncore_iio_free_running = {
5053 .name = "iio_free_running",
5056 .num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX,
5057 .freerunning = icx_iio_freerunning,
5058 .ops = &skx_uncore_iio_freerunning_ops,
5059 .event_descs = icx_uncore_iio_freerunning_events,
5060 .format_group = &skx_uncore_iio_freerunning_format_group,
5063 static struct intel_uncore_type *icx_msr_uncores[] = {
5070 &icx_uncore_iio_free_running,
5075 * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5076 * registers which located at Device 30, Function 3
5078 #define ICX_CAPID6 0x9c
5079 #define ICX_CAPID7 0xa0
5081 static u64 icx_count_chabox(void)
5083 struct pci_dev *dev = NULL;
5086 dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5090 pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5091 pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5094 return hweight64(caps);
5097 void icx_uncore_cpu_init(void)
5099 u64 num_boxes = icx_count_chabox();
5101 if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5103 icx_uncore_chabox.num_boxes = num_boxes;
5104 uncore_msr_uncores = icx_msr_uncores;
5107 static struct intel_uncore_type icx_uncore_m2m = {
5111 .perf_ctr_bits = 48,
5112 .perf_ctr = SNR_M2M_PCI_PMON_CTR0,
5113 .event_ctl = SNR_M2M_PCI_PMON_CTL0,
5114 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5115 .box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
5116 .ops = &snr_m2m_uncore_pci_ops,
5117 .format_group = &skx_uncore_format_group,
5120 static struct attribute *icx_upi_uncore_formats_attr[] = {
5121 &format_attr_event.attr,
5122 &format_attr_umask_ext4.attr,
5123 &format_attr_edge.attr,
5124 &format_attr_inv.attr,
5125 &format_attr_thresh8.attr,
5129 static const struct attribute_group icx_upi_uncore_format_group = {
5131 .attrs = icx_upi_uncore_formats_attr,
5134 static struct intel_uncore_type icx_uncore_upi = {
5138 .perf_ctr_bits = 48,
5139 .perf_ctr = ICX_UPI_PCI_PMON_CTR0,
5140 .event_ctl = ICX_UPI_PCI_PMON_CTL0,
5141 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5142 .event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5143 .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL,
5144 .ops = &skx_upi_uncore_pci_ops,
5145 .format_group = &icx_upi_uncore_format_group,
5148 static struct event_constraint icx_uncore_m3upi_constraints[] = {
5149 UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5150 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5151 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5152 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5153 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5154 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5155 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5156 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5157 EVENT_CONSTRAINT_END
5160 static struct intel_uncore_type icx_uncore_m3upi = {
5164 .perf_ctr_bits = 48,
5165 .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0,
5166 .event_ctl = ICX_M3UPI_PCI_PMON_CTL0,
5167 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5168 .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL,
5169 .constraints = icx_uncore_m3upi_constraints,
5170 .ops = &ivbep_uncore_pci_ops,
5171 .format_group = &skx_uncore_format_group,
5177 ICX_PCI_UNCORE_M3UPI,
5180 static struct intel_uncore_type *icx_pci_uncores[] = {
5181 [ICX_PCI_UNCORE_M2M] = &icx_uncore_m2m,
5182 [ICX_PCI_UNCORE_UPI] = &icx_uncore_upi,
5183 [ICX_PCI_UNCORE_M3UPI] = &icx_uncore_m3upi,
5187 static const struct pci_device_id icx_uncore_pci_ids[] = {
5189 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5190 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5193 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5194 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5197 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5198 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5201 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5202 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5205 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5206 .driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5209 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5210 .driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5213 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5214 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5216 { /* M3UPI Link 0 */
5217 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5218 .driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5220 { /* M3UPI Link 1 */
5221 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5222 .driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5224 { /* M3UPI Link 2 */
5225 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5226 .driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5228 { /* end: all zeroes */ }
5231 static struct pci_driver icx_uncore_pci_driver = {
5232 .name = "icx_uncore",
5233 .id_table = icx_uncore_pci_ids,
5236 int icx_uncore_pci_init(void)
5239 int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5240 SKX_GIDNIDMAP, true);
5245 uncore_pci_uncores = icx_pci_uncores;
5246 uncore_pci_driver = &icx_uncore_pci_driver;
5250 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5252 unsigned int box_ctl = box->pmu->type->box_ctl +
5253 box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5254 int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5255 SNR_IMC_MMIO_MEM0_OFFSET;
5257 __snr_uncore_mmio_init_box(box, box_ctl, mem_offset);
5260 static struct intel_uncore_ops icx_uncore_mmio_ops = {
5261 .init_box = icx_uncore_imc_init_box,
5262 .exit_box = uncore_mmio_exit_box,
5263 .disable_box = snr_uncore_mmio_disable_box,
5264 .enable_box = snr_uncore_mmio_enable_box,
5265 .disable_event = snr_uncore_mmio_disable_event,
5266 .enable_event = snr_uncore_mmio_enable_event,
5267 .read_counter = uncore_mmio_read_counter,
5270 static struct intel_uncore_type icx_uncore_imc = {
5274 .perf_ctr_bits = 48,
5275 .fixed_ctr_bits = 48,
5276 .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
5277 .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
5278 .event_descs = hswep_uncore_imc_events,
5279 .perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
5280 .event_ctl = SNR_IMC_MMIO_PMON_CTL0,
5281 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
5282 .box_ctl = SNR_IMC_MMIO_PMON_BOX_CTL,
5283 .mmio_offset = SNR_IMC_MMIO_OFFSET,
5284 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5285 .ops = &icx_uncore_mmio_ops,
5286 .format_group = &skx_uncore_format_group,
5289 enum perf_uncore_icx_imc_freerunning_type_id {
5294 ICX_IMC_FREERUNNING_TYPE_MAX,
5297 static struct freerunning_counters icx_imc_freerunning[] = {
5298 [ICX_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
5299 [ICX_IMC_DDR] = { 0x2290, 0x8, 0, 2, 48 },
5300 [ICX_IMC_DDRT] = { 0x22a0, 0x8, 0, 2, 48 },
5303 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5304 INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
5306 INTEL_UNCORE_EVENT_DESC(read, "event=0xff,umask=0x20"),
5307 INTEL_UNCORE_EVENT_DESC(read.scale, "6.103515625e-5"),
5308 INTEL_UNCORE_EVENT_DESC(read.unit, "MiB"),
5309 INTEL_UNCORE_EVENT_DESC(write, "event=0xff,umask=0x21"),
5310 INTEL_UNCORE_EVENT_DESC(write.scale, "6.103515625e-5"),
5311 INTEL_UNCORE_EVENT_DESC(write.unit, "MiB"),
5313 INTEL_UNCORE_EVENT_DESC(ddrt_read, "event=0xff,umask=0x30"),
5314 INTEL_UNCORE_EVENT_DESC(ddrt_read.scale, "6.103515625e-5"),
5315 INTEL_UNCORE_EVENT_DESC(ddrt_read.unit, "MiB"),
5316 INTEL_UNCORE_EVENT_DESC(ddrt_write, "event=0xff,umask=0x31"),
5317 INTEL_UNCORE_EVENT_DESC(ddrt_write.scale, "6.103515625e-5"),
5318 INTEL_UNCORE_EVENT_DESC(ddrt_write.unit, "MiB"),
5319 { /* end: all zeroes */ },
5322 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5324 int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5325 SNR_IMC_MMIO_MEM0_OFFSET;
5327 __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), mem_offset);
5330 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5331 .init_box = icx_uncore_imc_freerunning_init_box,
5332 .exit_box = uncore_mmio_exit_box,
5333 .read_counter = uncore_mmio_read_counter,
5334 .hw_config = uncore_freerunning_hw_config,
5337 static struct intel_uncore_type icx_uncore_imc_free_running = {
5338 .name = "imc_free_running",
5341 .num_freerunning_types = ICX_IMC_FREERUNNING_TYPE_MAX,
5342 .mmio_map_size = SNR_IMC_MMIO_SIZE,
5343 .freerunning = icx_imc_freerunning,
5344 .ops = &icx_uncore_imc_freerunning_ops,
5345 .event_descs = icx_uncore_imc_freerunning_events,
5346 .format_group = &skx_uncore_iio_freerunning_format_group,
5349 static struct intel_uncore_type *icx_mmio_uncores[] = {
5351 &icx_uncore_imc_free_running,
5355 void icx_uncore_mmio_init(void)
5357 uncore_mmio_uncores = icx_mmio_uncores;
5360 /* end of ICX uncore support */