988254961fd8b3a1a942bced2907610145601de3
[linux-2.6-microblaze.git] / arch / x86 / events / intel / uncore_snbep.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include "uncore.h"
4 #include "uncore_discovery.h"
5
6 /* SNB-EP pci bus to socket mapping */
7 #define SNBEP_CPUNODEID                 0x40
8 #define SNBEP_GIDNIDMAP                 0x54
9
10 /* SNB-EP Box level control */
11 #define SNBEP_PMON_BOX_CTL_RST_CTRL     (1 << 0)
12 #define SNBEP_PMON_BOX_CTL_RST_CTRS     (1 << 1)
13 #define SNBEP_PMON_BOX_CTL_FRZ          (1 << 8)
14 #define SNBEP_PMON_BOX_CTL_FRZ_EN       (1 << 16)
15 #define SNBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
16                                          SNBEP_PMON_BOX_CTL_RST_CTRS | \
17                                          SNBEP_PMON_BOX_CTL_FRZ_EN)
18 /* SNB-EP event control */
19 #define SNBEP_PMON_CTL_EV_SEL_MASK      0x000000ff
20 #define SNBEP_PMON_CTL_UMASK_MASK       0x0000ff00
21 #define SNBEP_PMON_CTL_RST              (1 << 17)
22 #define SNBEP_PMON_CTL_EDGE_DET         (1 << 18)
23 #define SNBEP_PMON_CTL_EV_SEL_EXT       (1 << 21)
24 #define SNBEP_PMON_CTL_EN               (1 << 22)
25 #define SNBEP_PMON_CTL_INVERT           (1 << 23)
26 #define SNBEP_PMON_CTL_TRESH_MASK       0xff000000
27 #define SNBEP_PMON_RAW_EVENT_MASK       (SNBEP_PMON_CTL_EV_SEL_MASK | \
28                                          SNBEP_PMON_CTL_UMASK_MASK | \
29                                          SNBEP_PMON_CTL_EDGE_DET | \
30                                          SNBEP_PMON_CTL_INVERT | \
31                                          SNBEP_PMON_CTL_TRESH_MASK)
32
33 /* SNB-EP Ubox event control */
34 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK         0x1f000000
35 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK         \
36                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
37                                  SNBEP_PMON_CTL_UMASK_MASK | \
38                                  SNBEP_PMON_CTL_EDGE_DET | \
39                                  SNBEP_PMON_CTL_INVERT | \
40                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
41
42 #define SNBEP_CBO_PMON_CTL_TID_EN               (1 << 19)
43 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK       (SNBEP_PMON_RAW_EVENT_MASK | \
44                                                  SNBEP_CBO_PMON_CTL_TID_EN)
45
46 /* SNB-EP PCU event control */
47 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK     0x0000c000
48 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK       0x1f000000
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT       (1 << 30)
50 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET     (1 << 31)
51 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
52                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
53                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
54                                  SNBEP_PMON_CTL_EDGE_DET | \
55                                  SNBEP_PMON_CTL_INVERT | \
56                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
57                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
58                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
59
60 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
61                                 (SNBEP_PMON_RAW_EVENT_MASK | \
62                                  SNBEP_PMON_CTL_EV_SEL_EXT)
63
64 /* SNB-EP pci control register */
65 #define SNBEP_PCI_PMON_BOX_CTL                  0xf4
66 #define SNBEP_PCI_PMON_CTL0                     0xd8
67 /* SNB-EP pci counter register */
68 #define SNBEP_PCI_PMON_CTR0                     0xa0
69
70 /* SNB-EP home agent register */
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0        0x40
72 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1        0x44
73 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH       0x48
74 /* SNB-EP memory controller register */
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL         0xf0
76 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR         0xd0
77 /* SNB-EP QPI register */
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0          0x228
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1          0x22c
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0           0x238
81 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1           0x23c
82
83 /* SNB-EP Ubox register */
84 #define SNBEP_U_MSR_PMON_CTR0                   0xc16
85 #define SNBEP_U_MSR_PMON_CTL0                   0xc10
86
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL         0xc08
88 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR         0xc09
89
90 /* SNB-EP Cbo register */
91 #define SNBEP_C0_MSR_PMON_CTR0                  0xd16
92 #define SNBEP_C0_MSR_PMON_CTL0                  0xd10
93 #define SNBEP_C0_MSR_PMON_BOX_CTL               0xd04
94 #define SNBEP_C0_MSR_PMON_BOX_FILTER            0xd14
95 #define SNBEP_CBO_MSR_OFFSET                    0x20
96
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID       0x1f
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID       0x3fc00
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE     0x7c0000
100 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC       0xff800000
101
102 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {    \
103         .event = (e),                           \
104         .msr = SNBEP_C0_MSR_PMON_BOX_FILTER,    \
105         .config_mask = (m),                     \
106         .idx = (i)                              \
107 }
108
109 /* SNB-EP PCU register */
110 #define SNBEP_PCU_MSR_PMON_CTR0                 0xc36
111 #define SNBEP_PCU_MSR_PMON_CTL0                 0xc30
112 #define SNBEP_PCU_MSR_PMON_BOX_CTL              0xc24
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER           0xc34
114 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK      0xffffffff
115 #define SNBEP_PCU_MSR_CORE_C3_CTR               0x3fc
116 #define SNBEP_PCU_MSR_CORE_C6_CTR               0x3fd
117
118 /* IVBEP event control */
119 #define IVBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
120                                          SNBEP_PMON_BOX_CTL_RST_CTRS)
121 #define IVBEP_PMON_RAW_EVENT_MASK               (SNBEP_PMON_CTL_EV_SEL_MASK | \
122                                          SNBEP_PMON_CTL_UMASK_MASK | \
123                                          SNBEP_PMON_CTL_EDGE_DET | \
124                                          SNBEP_PMON_CTL_TRESH_MASK)
125 /* IVBEP Ubox */
126 #define IVBEP_U_MSR_PMON_GLOBAL_CTL             0xc00
127 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL             (1 << 31)
128 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL           (1 << 29)
129
130 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
131                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
132                                  SNBEP_PMON_CTL_UMASK_MASK | \
133                                  SNBEP_PMON_CTL_EDGE_DET | \
134                                  SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
135 /* IVBEP Cbo */
136 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK               (IVBEP_PMON_RAW_EVENT_MASK | \
137                                                  SNBEP_CBO_PMON_CTL_TID_EN)
138
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x1fULL << 0)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 5)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x3fULL << 17)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
146 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
147
148 /* IVBEP home agent */
149 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST         (1 << 16)
150 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK                \
151                                 (IVBEP_PMON_RAW_EVENT_MASK | \
152                                  IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
153 /* IVBEP PCU */
154 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
155                                 (SNBEP_PMON_CTL_EV_SEL_MASK | \
156                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
157                                  SNBEP_PMON_CTL_EDGE_DET | \
158                                  SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
159                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
160                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
161 /* IVBEP QPI */
162 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
163                                 (IVBEP_PMON_RAW_EVENT_MASK | \
164                                  SNBEP_PMON_CTL_EV_SEL_EXT)
165
166 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
167                                 ((1ULL << (n)) - 1)))
168
169 /* Haswell-EP Ubox */
170 #define HSWEP_U_MSR_PMON_CTR0                   0x709
171 #define HSWEP_U_MSR_PMON_CTL0                   0x705
172 #define HSWEP_U_MSR_PMON_FILTER                 0x707
173
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL         0x703
175 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR         0x704
176
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID         (0x1 << 0)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID         (0x1fULL << 1)
179 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
180                                         (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
181                                          HSWEP_U_MSR_PMON_BOX_FILTER_CID)
182
183 /* Haswell-EP CBo */
184 #define HSWEP_C0_MSR_PMON_CTR0                  0xe08
185 #define HSWEP_C0_MSR_PMON_CTL0                  0xe01
186 #define HSWEP_C0_MSR_PMON_BOX_CTL                       0xe00
187 #define HSWEP_C0_MSR_PMON_BOX_FILTER0           0xe05
188 #define HSWEP_CBO_MSR_OFFSET                    0x10
189
190
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x3fULL << 0)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 6)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x7fULL << 17)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
198 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
199
200
201 /* Haswell-EP Sbox */
202 #define HSWEP_S0_MSR_PMON_CTR0                  0x726
203 #define HSWEP_S0_MSR_PMON_CTL0                  0x721
204 #define HSWEP_S0_MSR_PMON_BOX_CTL                       0x720
205 #define HSWEP_SBOX_MSR_OFFSET                   0xa
206 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
207                                                  SNBEP_CBO_PMON_CTL_TID_EN)
208
209 /* Haswell-EP PCU */
210 #define HSWEP_PCU_MSR_PMON_CTR0                 0x717
211 #define HSWEP_PCU_MSR_PMON_CTL0                 0x711
212 #define HSWEP_PCU_MSR_PMON_BOX_CTL              0x710
213 #define HSWEP_PCU_MSR_PMON_BOX_FILTER           0x715
214
215 /* KNL Ubox */
216 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
217                                         (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
218                                                 SNBEP_CBO_PMON_CTL_TID_EN)
219 /* KNL CHA */
220 #define KNL_CHA_MSR_OFFSET                      0xc
221 #define KNL_CHA_MSR_PMON_CTL_QOR                (1 << 16)
222 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
223                                         (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
224                                          KNL_CHA_MSR_PMON_CTL_QOR)
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID         0x1ff
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE       (7 << 18)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP          (0xfffffe2aULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE  (0x1ULL << 33)
230 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC         (0x1ULL << 37)
231
232 /* KNL EDC/MC UCLK */
233 #define KNL_UCLK_MSR_PMON_CTR0_LOW              0x400
234 #define KNL_UCLK_MSR_PMON_CTL0                  0x420
235 #define KNL_UCLK_MSR_PMON_BOX_CTL               0x430
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW        0x44c
237 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL        0x454
238 #define KNL_PMON_FIXED_CTL_EN                   0x1
239
240 /* KNL EDC */
241 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW         0xa00
242 #define KNL_EDC0_ECLK_MSR_PMON_CTL0             0xa20
243 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL          0xa30
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW   0xa3c
245 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL   0xa44
246
247 /* KNL MC */
248 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW           0xb00
249 #define KNL_MC0_CH0_MSR_PMON_CTL0               0xb20
250 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL            0xb30
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW          0xb3c
252 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL          0xb44
253
254 /* KNL IRP */
255 #define KNL_IRP_PCI_PMON_BOX_CTL                0xf0
256 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
257                                                  KNL_CHA_MSR_PMON_CTL_QOR)
258 /* KNL PCU */
259 #define KNL_PCU_PMON_CTL_EV_SEL_MASK            0x0000007f
260 #define KNL_PCU_PMON_CTL_USE_OCC_CTR            (1 << 7)
261 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK         0x3f000000
262 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
263                                 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
264                                  KNL_PCU_PMON_CTL_USE_OCC_CTR | \
265                                  SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
266                                  SNBEP_PMON_CTL_EDGE_DET | \
267                                  SNBEP_CBO_PMON_CTL_TID_EN | \
268                                  SNBEP_PMON_CTL_INVERT | \
269                                  KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
270                                  SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
271                                  SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
272
273 /* SKX pci bus to socket mapping */
274 #define SKX_CPUNODEID                   0xc0
275 #define SKX_GIDNIDMAP                   0xd4
276
277 /*
278  * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
279  * that BIOS programmed. MSR has package scope.
280  * |  Bit  |  Default  |  Description
281  * | [63]  |    00h    | VALID - When set, indicates the CPU bus
282  *                       numbers have been initialized. (RO)
283  * |[62:48]|    ---    | Reserved
284  * |[47:40]|    00h    | BUS_NUM_5 - Return the bus number BIOS assigned
285  *                       CPUBUSNO(5). (RO)
286  * |[39:32]|    00h    | BUS_NUM_4 - Return the bus number BIOS assigned
287  *                       CPUBUSNO(4). (RO)
288  * |[31:24]|    00h    | BUS_NUM_3 - Return the bus number BIOS assigned
289  *                       CPUBUSNO(3). (RO)
290  * |[23:16]|    00h    | BUS_NUM_2 - Return the bus number BIOS assigned
291  *                       CPUBUSNO(2). (RO)
292  * |[15:8] |    00h    | BUS_NUM_1 - Return the bus number BIOS assigned
293  *                       CPUBUSNO(1). (RO)
294  * | [7:0] |    00h    | BUS_NUM_0 - Return the bus number BIOS assigned
295  *                       CPUBUSNO(0). (RO)
296  */
297 #define SKX_MSR_CPU_BUS_NUMBER          0x300
298 #define SKX_MSR_CPU_BUS_VALID_BIT       (1ULL << 63)
299 #define BUS_NUM_STRIDE                  8
300
301 /* SKX CHA */
302 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID         (0x1ffULL << 0)
303 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK        (0xfULL << 9)
304 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE       (0x3ffULL << 17)
305 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM         (0x1ULL << 32)
306 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC         (0x1ULL << 33)
307 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC     (0x1ULL << 35)
308 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM          (0x1ULL << 36)
309 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM      (0x1ULL << 37)
310 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0        (0x3ffULL << 41)
311 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1        (0x3ffULL << 51)
312 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6          (0x1ULL << 61)
313 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC          (0x1ULL << 62)
314 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC        (0x1ULL << 63)
315
316 /* SKX IIO */
317 #define SKX_IIO0_MSR_PMON_CTL0          0xa48
318 #define SKX_IIO0_MSR_PMON_CTR0          0xa41
319 #define SKX_IIO0_MSR_PMON_BOX_CTL       0xa40
320 #define SKX_IIO_MSR_OFFSET              0x20
321
322 #define SKX_PMON_CTL_TRESH_MASK         (0xff << 24)
323 #define SKX_PMON_CTL_TRESH_MASK_EXT     (0xf)
324 #define SKX_PMON_CTL_CH_MASK            (0xff << 4)
325 #define SKX_PMON_CTL_FC_MASK            (0x7 << 12)
326 #define SKX_IIO_PMON_RAW_EVENT_MASK     (SNBEP_PMON_CTL_EV_SEL_MASK | \
327                                          SNBEP_PMON_CTL_UMASK_MASK | \
328                                          SNBEP_PMON_CTL_EDGE_DET | \
329                                          SNBEP_PMON_CTL_INVERT | \
330                                          SKX_PMON_CTL_TRESH_MASK)
331 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
332                                          SKX_PMON_CTL_CH_MASK | \
333                                          SKX_PMON_CTL_FC_MASK)
334
335 /* SKX IRP */
336 #define SKX_IRP0_MSR_PMON_CTL0          0xa5b
337 #define SKX_IRP0_MSR_PMON_CTR0          0xa59
338 #define SKX_IRP0_MSR_PMON_BOX_CTL       0xa58
339 #define SKX_IRP_MSR_OFFSET              0x20
340
341 /* SKX UPI */
342 #define SKX_UPI_PCI_PMON_CTL0           0x350
343 #define SKX_UPI_PCI_PMON_CTR0           0x318
344 #define SKX_UPI_PCI_PMON_BOX_CTL        0x378
345 #define SKX_UPI_CTL_UMASK_EXT           0xffefff
346
347 /* SKX M2M */
348 #define SKX_M2M_PCI_PMON_CTL0           0x228
349 #define SKX_M2M_PCI_PMON_CTR0           0x200
350 #define SKX_M2M_PCI_PMON_BOX_CTL        0x258
351
352 /* Memory Map registers device ID */
353 #define SNR_ICX_MESH2IIO_MMAP_DID               0x9a2
354 #define SNR_ICX_SAD_CONTROL_CFG         0x3f4
355
356 /* Getting I/O stack id in SAD_COTROL_CFG notation */
357 #define SAD_CONTROL_STACK_ID(data)              (((data) >> 4) & 0x7)
358
359 /* SNR Ubox */
360 #define SNR_U_MSR_PMON_CTR0                     0x1f98
361 #define SNR_U_MSR_PMON_CTL0                     0x1f91
362 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL           0x1f93
363 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR           0x1f94
364
365 /* SNR CHA */
366 #define SNR_CHA_RAW_EVENT_MASK_EXT              0x3ffffff
367 #define SNR_CHA_MSR_PMON_CTL0                   0x1c01
368 #define SNR_CHA_MSR_PMON_CTR0                   0x1c08
369 #define SNR_CHA_MSR_PMON_BOX_CTL                0x1c00
370 #define SNR_C0_MSR_PMON_BOX_FILTER0             0x1c05
371
372
373 /* SNR IIO */
374 #define SNR_IIO_MSR_PMON_CTL0                   0x1e08
375 #define SNR_IIO_MSR_PMON_CTR0                   0x1e01
376 #define SNR_IIO_MSR_PMON_BOX_CTL                0x1e00
377 #define SNR_IIO_MSR_OFFSET                      0x10
378 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT         0x7ffff
379
380 /* SNR IRP */
381 #define SNR_IRP0_MSR_PMON_CTL0                  0x1ea8
382 #define SNR_IRP0_MSR_PMON_CTR0                  0x1ea1
383 #define SNR_IRP0_MSR_PMON_BOX_CTL               0x1ea0
384 #define SNR_IRP_MSR_OFFSET                      0x10
385
386 /* SNR M2PCIE */
387 #define SNR_M2PCIE_MSR_PMON_CTL0                0x1e58
388 #define SNR_M2PCIE_MSR_PMON_CTR0                0x1e51
389 #define SNR_M2PCIE_MSR_PMON_BOX_CTL             0x1e50
390 #define SNR_M2PCIE_MSR_OFFSET                   0x10
391
392 /* SNR PCU */
393 #define SNR_PCU_MSR_PMON_CTL0                   0x1ef1
394 #define SNR_PCU_MSR_PMON_CTR0                   0x1ef8
395 #define SNR_PCU_MSR_PMON_BOX_CTL                0x1ef0
396 #define SNR_PCU_MSR_PMON_BOX_FILTER             0x1efc
397
398 /* SNR M2M */
399 #define SNR_M2M_PCI_PMON_CTL0                   0x468
400 #define SNR_M2M_PCI_PMON_CTR0                   0x440
401 #define SNR_M2M_PCI_PMON_BOX_CTL                0x438
402 #define SNR_M2M_PCI_PMON_UMASK_EXT              0xff
403
404 /* SNR PCIE3 */
405 #define SNR_PCIE3_PCI_PMON_CTL0                 0x508
406 #define SNR_PCIE3_PCI_PMON_CTR0                 0x4e8
407 #define SNR_PCIE3_PCI_PMON_BOX_CTL              0x4e0
408
409 /* SNR IMC */
410 #define SNR_IMC_MMIO_PMON_FIXED_CTL             0x54
411 #define SNR_IMC_MMIO_PMON_FIXED_CTR             0x38
412 #define SNR_IMC_MMIO_PMON_CTL0                  0x40
413 #define SNR_IMC_MMIO_PMON_CTR0                  0x8
414 #define SNR_IMC_MMIO_PMON_BOX_CTL               0x22800
415 #define SNR_IMC_MMIO_OFFSET                     0x4000
416 #define SNR_IMC_MMIO_SIZE                       0x4000
417 #define SNR_IMC_MMIO_BASE_OFFSET                0xd0
418 #define SNR_IMC_MMIO_BASE_MASK                  0x1FFFFFFF
419 #define SNR_IMC_MMIO_MEM0_OFFSET                0xd8
420 #define SNR_IMC_MMIO_MEM0_MASK                  0x7FF
421
422 /* ICX CHA */
423 #define ICX_C34_MSR_PMON_CTR0                   0xb68
424 #define ICX_C34_MSR_PMON_CTL0                   0xb61
425 #define ICX_C34_MSR_PMON_BOX_CTL                0xb60
426 #define ICX_C34_MSR_PMON_BOX_FILTER0            0xb65
427
428 /* ICX IIO */
429 #define ICX_IIO_MSR_PMON_CTL0                   0xa58
430 #define ICX_IIO_MSR_PMON_CTR0                   0xa51
431 #define ICX_IIO_MSR_PMON_BOX_CTL                0xa50
432
433 /* ICX IRP */
434 #define ICX_IRP0_MSR_PMON_CTL0                  0xa4d
435 #define ICX_IRP0_MSR_PMON_CTR0                  0xa4b
436 #define ICX_IRP0_MSR_PMON_BOX_CTL               0xa4a
437
438 /* ICX M2PCIE */
439 #define ICX_M2PCIE_MSR_PMON_CTL0                0xa46
440 #define ICX_M2PCIE_MSR_PMON_CTR0                0xa41
441 #define ICX_M2PCIE_MSR_PMON_BOX_CTL             0xa40
442
443 /* ICX UPI */
444 #define ICX_UPI_PCI_PMON_CTL0                   0x350
445 #define ICX_UPI_PCI_PMON_CTR0                   0x320
446 #define ICX_UPI_PCI_PMON_BOX_CTL                0x318
447 #define ICX_UPI_CTL_UMASK_EXT                   0xffffff
448
449 /* ICX M3UPI*/
450 #define ICX_M3UPI_PCI_PMON_CTL0                 0xd8
451 #define ICX_M3UPI_PCI_PMON_CTR0                 0xa8
452 #define ICX_M3UPI_PCI_PMON_BOX_CTL              0xa0
453
454 /* ICX IMC */
455 #define ICX_NUMBER_IMC_CHN                      2
456 #define ICX_IMC_MEM_STRIDE                      0x4
457
458 /* SPR */
459 #define SPR_RAW_EVENT_MASK_EXT                  0xffffff
460
461 /* SPR CHA */
462 #define SPR_CHA_PMON_CTL_TID_EN                 (1 << 16)
463 #define SPR_CHA_PMON_EVENT_MASK                 (SNBEP_PMON_RAW_EVENT_MASK | \
464                                                  SPR_CHA_PMON_CTL_TID_EN)
465 #define SPR_CHA_PMON_BOX_FILTER_TID             0x3ff
466
467 #define SPR_C0_MSR_PMON_BOX_FILTER0             0x200e
468
469 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
470 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
471 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
472 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
473 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
474 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
475 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
476 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
477 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
478 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
479 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
480 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
481 DEFINE_UNCORE_FORMAT_ATTR(tid_en2, tid_en, "config:16");
482 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
483 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
484 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
485 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
486 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
487 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
488 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
489 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
490 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
491 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
492 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
493 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
494 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
495 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
496 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
497 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
498 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
499 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
500 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
501 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
502 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
503 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
504 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
505 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
506 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
507 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
508 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
509 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
510 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
511 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
512 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
513 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
514 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
515 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
516 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
517 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
518 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
519 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
520 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
521 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
522 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
523 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
524 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
525 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
526 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
527 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
528 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
529 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
530 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
531 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
532 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
533 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
534 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
535 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
536 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
537 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
538 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
539 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
540 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
541 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
542 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
543 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
544 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
545 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
546 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
547 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
548
549 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
550 {
551         struct pci_dev *pdev = box->pci_dev;
552         int box_ctl = uncore_pci_box_ctl(box);
553         u32 config = 0;
554
555         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
556                 config |= SNBEP_PMON_BOX_CTL_FRZ;
557                 pci_write_config_dword(pdev, box_ctl, config);
558         }
559 }
560
561 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
562 {
563         struct pci_dev *pdev = box->pci_dev;
564         int box_ctl = uncore_pci_box_ctl(box);
565         u32 config = 0;
566
567         if (!pci_read_config_dword(pdev, box_ctl, &config)) {
568                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
569                 pci_write_config_dword(pdev, box_ctl, config);
570         }
571 }
572
573 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
574 {
575         struct pci_dev *pdev = box->pci_dev;
576         struct hw_perf_event *hwc = &event->hw;
577
578         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
579 }
580
581 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
582 {
583         struct pci_dev *pdev = box->pci_dev;
584         struct hw_perf_event *hwc = &event->hw;
585
586         pci_write_config_dword(pdev, hwc->config_base, hwc->config);
587 }
588
589 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
590 {
591         struct pci_dev *pdev = box->pci_dev;
592         struct hw_perf_event *hwc = &event->hw;
593         u64 count = 0;
594
595         pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
596         pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
597
598         return count;
599 }
600
601 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
602 {
603         struct pci_dev *pdev = box->pci_dev;
604         int box_ctl = uncore_pci_box_ctl(box);
605
606         pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
607 }
608
609 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
610 {
611         u64 config;
612         unsigned msr;
613
614         msr = uncore_msr_box_ctl(box);
615         if (msr) {
616                 rdmsrl(msr, config);
617                 config |= SNBEP_PMON_BOX_CTL_FRZ;
618                 wrmsrl(msr, config);
619         }
620 }
621
622 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
623 {
624         u64 config;
625         unsigned msr;
626
627         msr = uncore_msr_box_ctl(box);
628         if (msr) {
629                 rdmsrl(msr, config);
630                 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
631                 wrmsrl(msr, config);
632         }
633 }
634
635 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
636 {
637         struct hw_perf_event *hwc = &event->hw;
638         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
639
640         if (reg1->idx != EXTRA_REG_NONE)
641                 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
642
643         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
644 }
645
646 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
647                                         struct perf_event *event)
648 {
649         struct hw_perf_event *hwc = &event->hw;
650
651         wrmsrl(hwc->config_base, hwc->config);
652 }
653
654 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
655 {
656         unsigned msr = uncore_msr_box_ctl(box);
657
658         if (msr)
659                 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
660 }
661
662 static struct attribute *snbep_uncore_formats_attr[] = {
663         &format_attr_event.attr,
664         &format_attr_umask.attr,
665         &format_attr_edge.attr,
666         &format_attr_inv.attr,
667         &format_attr_thresh8.attr,
668         NULL,
669 };
670
671 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
672         &format_attr_event.attr,
673         &format_attr_umask.attr,
674         &format_attr_edge.attr,
675         &format_attr_inv.attr,
676         &format_attr_thresh5.attr,
677         NULL,
678 };
679
680 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
681         &format_attr_event.attr,
682         &format_attr_umask.attr,
683         &format_attr_edge.attr,
684         &format_attr_tid_en.attr,
685         &format_attr_inv.attr,
686         &format_attr_thresh8.attr,
687         &format_attr_filter_tid.attr,
688         &format_attr_filter_nid.attr,
689         &format_attr_filter_state.attr,
690         &format_attr_filter_opc.attr,
691         NULL,
692 };
693
694 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
695         &format_attr_event.attr,
696         &format_attr_occ_sel.attr,
697         &format_attr_edge.attr,
698         &format_attr_inv.attr,
699         &format_attr_thresh5.attr,
700         &format_attr_occ_invert.attr,
701         &format_attr_occ_edge.attr,
702         &format_attr_filter_band0.attr,
703         &format_attr_filter_band1.attr,
704         &format_attr_filter_band2.attr,
705         &format_attr_filter_band3.attr,
706         NULL,
707 };
708
709 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
710         &format_attr_event_ext.attr,
711         &format_attr_umask.attr,
712         &format_attr_edge.attr,
713         &format_attr_inv.attr,
714         &format_attr_thresh8.attr,
715         &format_attr_match_rds.attr,
716         &format_attr_match_rnid30.attr,
717         &format_attr_match_rnid4.attr,
718         &format_attr_match_dnid.attr,
719         &format_attr_match_mc.attr,
720         &format_attr_match_opc.attr,
721         &format_attr_match_vnw.attr,
722         &format_attr_match0.attr,
723         &format_attr_match1.attr,
724         &format_attr_mask_rds.attr,
725         &format_attr_mask_rnid30.attr,
726         &format_attr_mask_rnid4.attr,
727         &format_attr_mask_dnid.attr,
728         &format_attr_mask_mc.attr,
729         &format_attr_mask_opc.attr,
730         &format_attr_mask_vnw.attr,
731         &format_attr_mask0.attr,
732         &format_attr_mask1.attr,
733         NULL,
734 };
735
736 static struct uncore_event_desc snbep_uncore_imc_events[] = {
737         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
738         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
739         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
740         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
741         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
742         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
743         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
744         { /* end: all zeroes */ },
745 };
746
747 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
748         INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
749         INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
750         INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
751         INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
752         { /* end: all zeroes */ },
753 };
754
755 static const struct attribute_group snbep_uncore_format_group = {
756         .name = "format",
757         .attrs = snbep_uncore_formats_attr,
758 };
759
760 static const struct attribute_group snbep_uncore_ubox_format_group = {
761         .name = "format",
762         .attrs = snbep_uncore_ubox_formats_attr,
763 };
764
765 static const struct attribute_group snbep_uncore_cbox_format_group = {
766         .name = "format",
767         .attrs = snbep_uncore_cbox_formats_attr,
768 };
769
770 static const struct attribute_group snbep_uncore_pcu_format_group = {
771         .name = "format",
772         .attrs = snbep_uncore_pcu_formats_attr,
773 };
774
775 static const struct attribute_group snbep_uncore_qpi_format_group = {
776         .name = "format",
777         .attrs = snbep_uncore_qpi_formats_attr,
778 };
779
780 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                    \
781         .disable_box    = snbep_uncore_msr_disable_box,         \
782         .enable_box     = snbep_uncore_msr_enable_box,          \
783         .disable_event  = snbep_uncore_msr_disable_event,       \
784         .enable_event   = snbep_uncore_msr_enable_event,        \
785         .read_counter   = uncore_msr_read_counter
786
787 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
788         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),                   \
789         .init_box       = snbep_uncore_msr_init_box             \
790
791 static struct intel_uncore_ops snbep_uncore_msr_ops = {
792         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
793 };
794
795 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
796         .init_box       = snbep_uncore_pci_init_box,            \
797         .disable_box    = snbep_uncore_pci_disable_box,         \
798         .enable_box     = snbep_uncore_pci_enable_box,          \
799         .disable_event  = snbep_uncore_pci_disable_event,       \
800         .read_counter   = snbep_uncore_pci_read_counter
801
802 static struct intel_uncore_ops snbep_uncore_pci_ops = {
803         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
804         .enable_event   = snbep_uncore_pci_enable_event,        \
805 };
806
807 static struct event_constraint snbep_uncore_cbox_constraints[] = {
808         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
809         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
810         UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
811         UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
812         UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
813         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
814         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
815         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
816         UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
817         UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
818         UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
819         UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
820         UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
821         UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
822         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
823         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
824         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
825         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
826         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
827         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
828         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
829         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
830         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
831         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
832         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
833         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
834         EVENT_CONSTRAINT_END
835 };
836
837 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
838         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
839         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
840         UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
841         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
842         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
843         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
844         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
845         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
846         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
847         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
848         EVENT_CONSTRAINT_END
849 };
850
851 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
852         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
853         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
854         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
855         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
856         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
857         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
858         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
859         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
860         UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
861         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
862         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
863         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
864         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
865         UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
866         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
867         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
868         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
869         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
870         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
871         UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
872         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
873         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
874         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
875         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
876         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
877         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
878         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
879         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
880         EVENT_CONSTRAINT_END
881 };
882
883 static struct intel_uncore_type snbep_uncore_ubox = {
884         .name           = "ubox",
885         .num_counters   = 2,
886         .num_boxes      = 1,
887         .perf_ctr_bits  = 44,
888         .fixed_ctr_bits = 48,
889         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
890         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
891         .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
892         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
893         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
894         .ops            = &snbep_uncore_msr_ops,
895         .format_group   = &snbep_uncore_ubox_format_group,
896 };
897
898 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
899         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
900                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
901         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
902         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
903         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
904         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
905         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
906         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
907         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
908         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
909         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
910         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
911         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
912         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
913         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
914         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
915         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
916         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
917         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
918         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
919         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
920         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
921         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
922         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
923         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
924         EVENT_EXTRA_END
925 };
926
927 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
928 {
929         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
930         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
931         int i;
932
933         if (uncore_box_is_fake(box))
934                 return;
935
936         for (i = 0; i < 5; i++) {
937                 if (reg1->alloc & (0x1 << i))
938                         atomic_sub(1 << (i * 6), &er->ref);
939         }
940         reg1->alloc = 0;
941 }
942
943 static struct event_constraint *
944 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
945                             u64 (*cbox_filter_mask)(int fields))
946 {
947         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
948         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
949         int i, alloc = 0;
950         unsigned long flags;
951         u64 mask;
952
953         if (reg1->idx == EXTRA_REG_NONE)
954                 return NULL;
955
956         raw_spin_lock_irqsave(&er->lock, flags);
957         for (i = 0; i < 5; i++) {
958                 if (!(reg1->idx & (0x1 << i)))
959                         continue;
960                 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
961                         continue;
962
963                 mask = cbox_filter_mask(0x1 << i);
964                 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
965                     !((reg1->config ^ er->config) & mask)) {
966                         atomic_add(1 << (i * 6), &er->ref);
967                         er->config &= ~mask;
968                         er->config |= reg1->config & mask;
969                         alloc |= (0x1 << i);
970                 } else {
971                         break;
972                 }
973         }
974         raw_spin_unlock_irqrestore(&er->lock, flags);
975         if (i < 5)
976                 goto fail;
977
978         if (!uncore_box_is_fake(box))
979                 reg1->alloc |= alloc;
980
981         return NULL;
982 fail:
983         for (; i >= 0; i--) {
984                 if (alloc & (0x1 << i))
985                         atomic_sub(1 << (i * 6), &er->ref);
986         }
987         return &uncore_constraint_empty;
988 }
989
990 static u64 snbep_cbox_filter_mask(int fields)
991 {
992         u64 mask = 0;
993
994         if (fields & 0x1)
995                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
996         if (fields & 0x2)
997                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
998         if (fields & 0x4)
999                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1000         if (fields & 0x8)
1001                 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1002
1003         return mask;
1004 }
1005
1006 static struct event_constraint *
1007 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1008 {
1009         return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
1010 }
1011
1012 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1013 {
1014         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1015         struct extra_reg *er;
1016         int idx = 0;
1017
1018         for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
1019                 if (er->event != (event->hw.config & er->config_mask))
1020                         continue;
1021                 idx |= er->idx;
1022         }
1023
1024         if (idx) {
1025                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1026                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1027                 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1028                 reg1->idx = idx;
1029         }
1030         return 0;
1031 }
1032
1033 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1034         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1035         .hw_config              = snbep_cbox_hw_config,
1036         .get_constraint         = snbep_cbox_get_constraint,
1037         .put_constraint         = snbep_cbox_put_constraint,
1038 };
1039
1040 static struct intel_uncore_type snbep_uncore_cbox = {
1041         .name                   = "cbox",
1042         .num_counters           = 4,
1043         .num_boxes              = 8,
1044         .perf_ctr_bits          = 44,
1045         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1046         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1047         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1048         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1049         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1050         .num_shared_regs        = 1,
1051         .constraints            = snbep_uncore_cbox_constraints,
1052         .ops                    = &snbep_uncore_cbox_ops,
1053         .format_group           = &snbep_uncore_cbox_format_group,
1054 };
1055
1056 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1057 {
1058         struct hw_perf_event *hwc = &event->hw;
1059         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1060         u64 config = reg1->config;
1061
1062         if (new_idx > reg1->idx)
1063                 config <<= 8 * (new_idx - reg1->idx);
1064         else
1065                 config >>= 8 * (reg1->idx - new_idx);
1066
1067         if (modify) {
1068                 hwc->config += new_idx - reg1->idx;
1069                 reg1->config = config;
1070                 reg1->idx = new_idx;
1071         }
1072         return config;
1073 }
1074
1075 static struct event_constraint *
1076 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1077 {
1078         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1079         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1080         unsigned long flags;
1081         int idx = reg1->idx;
1082         u64 mask, config1 = reg1->config;
1083         bool ok = false;
1084
1085         if (reg1->idx == EXTRA_REG_NONE ||
1086             (!uncore_box_is_fake(box) && reg1->alloc))
1087                 return NULL;
1088 again:
1089         mask = 0xffULL << (idx * 8);
1090         raw_spin_lock_irqsave(&er->lock, flags);
1091         if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1092             !((config1 ^ er->config) & mask)) {
1093                 atomic_add(1 << (idx * 8), &er->ref);
1094                 er->config &= ~mask;
1095                 er->config |= config1 & mask;
1096                 ok = true;
1097         }
1098         raw_spin_unlock_irqrestore(&er->lock, flags);
1099
1100         if (!ok) {
1101                 idx = (idx + 1) % 4;
1102                 if (idx != reg1->idx) {
1103                         config1 = snbep_pcu_alter_er(event, idx, false);
1104                         goto again;
1105                 }
1106                 return &uncore_constraint_empty;
1107         }
1108
1109         if (!uncore_box_is_fake(box)) {
1110                 if (idx != reg1->idx)
1111                         snbep_pcu_alter_er(event, idx, true);
1112                 reg1->alloc = 1;
1113         }
1114         return NULL;
1115 }
1116
1117 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1118 {
1119         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1120         struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1121
1122         if (uncore_box_is_fake(box) || !reg1->alloc)
1123                 return;
1124
1125         atomic_sub(1 << (reg1->idx * 8), &er->ref);
1126         reg1->alloc = 0;
1127 }
1128
1129 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1130 {
1131         struct hw_perf_event *hwc = &event->hw;
1132         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1133         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1134
1135         if (ev_sel >= 0xb && ev_sel <= 0xe) {
1136                 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1137                 reg1->idx = ev_sel - 0xb;
1138                 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1139         }
1140         return 0;
1141 }
1142
1143 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1144         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1145         .hw_config              = snbep_pcu_hw_config,
1146         .get_constraint         = snbep_pcu_get_constraint,
1147         .put_constraint         = snbep_pcu_put_constraint,
1148 };
1149
1150 static struct intel_uncore_type snbep_uncore_pcu = {
1151         .name                   = "pcu",
1152         .num_counters           = 4,
1153         .num_boxes              = 1,
1154         .perf_ctr_bits          = 48,
1155         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1156         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1157         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1158         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1159         .num_shared_regs        = 1,
1160         .ops                    = &snbep_uncore_pcu_ops,
1161         .format_group           = &snbep_uncore_pcu_format_group,
1162 };
1163
1164 static struct intel_uncore_type *snbep_msr_uncores[] = {
1165         &snbep_uncore_ubox,
1166         &snbep_uncore_cbox,
1167         &snbep_uncore_pcu,
1168         NULL,
1169 };
1170
1171 void snbep_uncore_cpu_init(void)
1172 {
1173         if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1174                 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1175         uncore_msr_uncores = snbep_msr_uncores;
1176 }
1177
1178 enum {
1179         SNBEP_PCI_QPI_PORT0_FILTER,
1180         SNBEP_PCI_QPI_PORT1_FILTER,
1181         BDX_PCI_QPI_PORT2_FILTER,
1182 };
1183
1184 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1185 {
1186         struct hw_perf_event *hwc = &event->hw;
1187         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1188         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1189
1190         if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1191                 reg1->idx = 0;
1192                 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1193                 reg1->config = event->attr.config1;
1194                 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1195                 reg2->config = event->attr.config2;
1196         }
1197         return 0;
1198 }
1199
1200 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1201 {
1202         struct pci_dev *pdev = box->pci_dev;
1203         struct hw_perf_event *hwc = &event->hw;
1204         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1205         struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1206
1207         if (reg1->idx != EXTRA_REG_NONE) {
1208                 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1209                 int die = box->dieid;
1210                 struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1211
1212                 if (filter_pdev) {
1213                         pci_write_config_dword(filter_pdev, reg1->reg,
1214                                                 (u32)reg1->config);
1215                         pci_write_config_dword(filter_pdev, reg1->reg + 4,
1216                                                 (u32)(reg1->config >> 32));
1217                         pci_write_config_dword(filter_pdev, reg2->reg,
1218                                                 (u32)reg2->config);
1219                         pci_write_config_dword(filter_pdev, reg2->reg + 4,
1220                                                 (u32)(reg2->config >> 32));
1221                 }
1222         }
1223
1224         pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1225 }
1226
1227 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1228         SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1229         .enable_event           = snbep_qpi_enable_event,
1230         .hw_config              = snbep_qpi_hw_config,
1231         .get_constraint         = uncore_get_constraint,
1232         .put_constraint         = uncore_put_constraint,
1233 };
1234
1235 #define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
1236         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1237         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1238         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
1239         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1240         .ops            = &snbep_uncore_pci_ops,                \
1241         .format_group   = &snbep_uncore_format_group
1242
1243 static struct intel_uncore_type snbep_uncore_ha = {
1244         .name           = "ha",
1245         .num_counters   = 4,
1246         .num_boxes      = 1,
1247         .perf_ctr_bits  = 48,
1248         SNBEP_UNCORE_PCI_COMMON_INIT(),
1249 };
1250
1251 static struct intel_uncore_type snbep_uncore_imc = {
1252         .name           = "imc",
1253         .num_counters   = 4,
1254         .num_boxes      = 4,
1255         .perf_ctr_bits  = 48,
1256         .fixed_ctr_bits = 48,
1257         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1258         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1259         .event_descs    = snbep_uncore_imc_events,
1260         SNBEP_UNCORE_PCI_COMMON_INIT(),
1261 };
1262
1263 static struct intel_uncore_type snbep_uncore_qpi = {
1264         .name                   = "qpi",
1265         .num_counters           = 4,
1266         .num_boxes              = 2,
1267         .perf_ctr_bits          = 48,
1268         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1269         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1270         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1271         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1272         .num_shared_regs        = 1,
1273         .ops                    = &snbep_uncore_qpi_ops,
1274         .event_descs            = snbep_uncore_qpi_events,
1275         .format_group           = &snbep_uncore_qpi_format_group,
1276 };
1277
1278
1279 static struct intel_uncore_type snbep_uncore_r2pcie = {
1280         .name           = "r2pcie",
1281         .num_counters   = 4,
1282         .num_boxes      = 1,
1283         .perf_ctr_bits  = 44,
1284         .constraints    = snbep_uncore_r2pcie_constraints,
1285         SNBEP_UNCORE_PCI_COMMON_INIT(),
1286 };
1287
1288 static struct intel_uncore_type snbep_uncore_r3qpi = {
1289         .name           = "r3qpi",
1290         .num_counters   = 3,
1291         .num_boxes      = 2,
1292         .perf_ctr_bits  = 44,
1293         .constraints    = snbep_uncore_r3qpi_constraints,
1294         SNBEP_UNCORE_PCI_COMMON_INIT(),
1295 };
1296
1297 enum {
1298         SNBEP_PCI_UNCORE_HA,
1299         SNBEP_PCI_UNCORE_IMC,
1300         SNBEP_PCI_UNCORE_QPI,
1301         SNBEP_PCI_UNCORE_R2PCIE,
1302         SNBEP_PCI_UNCORE_R3QPI,
1303 };
1304
1305 static struct intel_uncore_type *snbep_pci_uncores[] = {
1306         [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
1307         [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
1308         [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
1309         [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
1310         [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
1311         NULL,
1312 };
1313
1314 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1315         { /* Home Agent */
1316                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1317                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1318         },
1319         { /* MC Channel 0 */
1320                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1321                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1322         },
1323         { /* MC Channel 1 */
1324                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1325                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1326         },
1327         { /* MC Channel 2 */
1328                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1329                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1330         },
1331         { /* MC Channel 3 */
1332                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1333                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1334         },
1335         { /* QPI Port 0 */
1336                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1337                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1338         },
1339         { /* QPI Port 1 */
1340                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1341                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1342         },
1343         { /* R2PCIe */
1344                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1345                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1346         },
1347         { /* R3QPI Link 0 */
1348                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1349                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1350         },
1351         { /* R3QPI Link 1 */
1352                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1353                 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1354         },
1355         { /* QPI Port 0 filter  */
1356                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1357                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1358                                                    SNBEP_PCI_QPI_PORT0_FILTER),
1359         },
1360         { /* QPI Port 0 filter  */
1361                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1362                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1363                                                    SNBEP_PCI_QPI_PORT1_FILTER),
1364         },
1365         { /* end: all zeroes */ }
1366 };
1367
1368 static struct pci_driver snbep_uncore_pci_driver = {
1369         .name           = "snbep_uncore",
1370         .id_table       = snbep_uncore_pci_ids,
1371 };
1372
1373 #define NODE_ID_MASK    0x7
1374
1375 /*
1376  * build pci bus to socket mapping
1377  */
1378 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1379 {
1380         struct pci_dev *ubox_dev = NULL;
1381         int i, bus, nodeid, segment, die_id;
1382         struct pci2phy_map *map;
1383         int err = 0;
1384         u32 config = 0;
1385
1386         while (1) {
1387                 /* find the UBOX device */
1388                 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1389                 if (!ubox_dev)
1390                         break;
1391                 bus = ubox_dev->bus->number;
1392                 /*
1393                  * The nodeid and idmap registers only contain enough
1394                  * information to handle 8 nodes.  On systems with more
1395                  * than 8 nodes, we need to rely on NUMA information,
1396                  * filled in from BIOS supplied information, to determine
1397                  * the topology.
1398                  */
1399                 if (nr_node_ids <= 8) {
1400                         /* get the Node ID of the local register */
1401                         err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1402                         if (err)
1403                                 break;
1404                         nodeid = config & NODE_ID_MASK;
1405                         /* get the Node ID mapping */
1406                         err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1407                         if (err)
1408                                 break;
1409
1410                         segment = pci_domain_nr(ubox_dev->bus);
1411                         raw_spin_lock(&pci2phy_map_lock);
1412                         map = __find_pci2phy_map(segment);
1413                         if (!map) {
1414                                 raw_spin_unlock(&pci2phy_map_lock);
1415                                 err = -ENOMEM;
1416                                 break;
1417                         }
1418
1419                         /*
1420                          * every three bits in the Node ID mapping register maps
1421                          * to a particular node.
1422                          */
1423                         for (i = 0; i < 8; i++) {
1424                                 if (nodeid == ((config >> (3 * i)) & 0x7)) {
1425                                         if (topology_max_die_per_package() > 1)
1426                                                 die_id = i;
1427                                         else
1428                                                 die_id = topology_phys_to_logical_pkg(i);
1429                                         map->pbus_to_dieid[bus] = die_id;
1430                                         break;
1431                                 }
1432                         }
1433                         raw_spin_unlock(&pci2phy_map_lock);
1434                 } else {
1435                         int node = pcibus_to_node(ubox_dev->bus);
1436                         int cpu;
1437
1438                         segment = pci_domain_nr(ubox_dev->bus);
1439                         raw_spin_lock(&pci2phy_map_lock);
1440                         map = __find_pci2phy_map(segment);
1441                         if (!map) {
1442                                 raw_spin_unlock(&pci2phy_map_lock);
1443                                 err = -ENOMEM;
1444                                 break;
1445                         }
1446
1447                         die_id = -1;
1448                         for_each_cpu(cpu, cpumask_of_pcibus(ubox_dev->bus)) {
1449                                 struct cpuinfo_x86 *c = &cpu_data(cpu);
1450
1451                                 if (c->initialized && cpu_to_node(cpu) == node) {
1452                                         map->pbus_to_dieid[bus] = die_id = c->logical_die_id;
1453                                         break;
1454                                 }
1455                         }
1456                         raw_spin_unlock(&pci2phy_map_lock);
1457
1458                         if (WARN_ON_ONCE(die_id == -1)) {
1459                                 err = -EINVAL;
1460                                 break;
1461                         }
1462                 }
1463         }
1464
1465         if (!err) {
1466                 /*
1467                  * For PCI bus with no UBOX device, find the next bus
1468                  * that has UBOX device and use its mapping.
1469                  */
1470                 raw_spin_lock(&pci2phy_map_lock);
1471                 list_for_each_entry(map, &pci2phy_map_head, list) {
1472                         i = -1;
1473                         if (reverse) {
1474                                 for (bus = 255; bus >= 0; bus--) {
1475                                         if (map->pbus_to_dieid[bus] >= 0)
1476                                                 i = map->pbus_to_dieid[bus];
1477                                         else
1478                                                 map->pbus_to_dieid[bus] = i;
1479                                 }
1480                         } else {
1481                                 for (bus = 0; bus <= 255; bus++) {
1482                                         if (map->pbus_to_dieid[bus] >= 0)
1483                                                 i = map->pbus_to_dieid[bus];
1484                                         else
1485                                                 map->pbus_to_dieid[bus] = i;
1486                                 }
1487                         }
1488                 }
1489                 raw_spin_unlock(&pci2phy_map_lock);
1490         }
1491
1492         pci_dev_put(ubox_dev);
1493
1494         return err ? pcibios_err_to_errno(err) : 0;
1495 }
1496
1497 int snbep_uncore_pci_init(void)
1498 {
1499         int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1500         if (ret)
1501                 return ret;
1502         uncore_pci_uncores = snbep_pci_uncores;
1503         uncore_pci_driver = &snbep_uncore_pci_driver;
1504         return 0;
1505 }
1506 /* end of Sandy Bridge-EP uncore support */
1507
1508 /* IvyTown uncore support */
1509 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1510 {
1511         unsigned msr = uncore_msr_box_ctl(box);
1512         if (msr)
1513                 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1514 }
1515
1516 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1517 {
1518         struct pci_dev *pdev = box->pci_dev;
1519
1520         pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1521 }
1522
1523 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
1524         .init_box       = ivbep_uncore_msr_init_box,            \
1525         .disable_box    = snbep_uncore_msr_disable_box,         \
1526         .enable_box     = snbep_uncore_msr_enable_box,          \
1527         .disable_event  = snbep_uncore_msr_disable_event,       \
1528         .enable_event   = snbep_uncore_msr_enable_event,        \
1529         .read_counter   = uncore_msr_read_counter
1530
1531 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1532         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1533 };
1534
1535 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1536         .init_box       = ivbep_uncore_pci_init_box,
1537         .disable_box    = snbep_uncore_pci_disable_box,
1538         .enable_box     = snbep_uncore_pci_enable_box,
1539         .disable_event  = snbep_uncore_pci_disable_event,
1540         .enable_event   = snbep_uncore_pci_enable_event,
1541         .read_counter   = snbep_uncore_pci_read_counter,
1542 };
1543
1544 #define IVBEP_UNCORE_PCI_COMMON_INIT()                          \
1545         .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1546         .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1547         .event_mask     = IVBEP_PMON_RAW_EVENT_MASK,            \
1548         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1549         .ops            = &ivbep_uncore_pci_ops,                        \
1550         .format_group   = &ivbep_uncore_format_group
1551
1552 static struct attribute *ivbep_uncore_formats_attr[] = {
1553         &format_attr_event.attr,
1554         &format_attr_umask.attr,
1555         &format_attr_edge.attr,
1556         &format_attr_inv.attr,
1557         &format_attr_thresh8.attr,
1558         NULL,
1559 };
1560
1561 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1562         &format_attr_event.attr,
1563         &format_attr_umask.attr,
1564         &format_attr_edge.attr,
1565         &format_attr_inv.attr,
1566         &format_attr_thresh5.attr,
1567         NULL,
1568 };
1569
1570 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1571         &format_attr_event.attr,
1572         &format_attr_umask.attr,
1573         &format_attr_edge.attr,
1574         &format_attr_tid_en.attr,
1575         &format_attr_thresh8.attr,
1576         &format_attr_filter_tid.attr,
1577         &format_attr_filter_link.attr,
1578         &format_attr_filter_state2.attr,
1579         &format_attr_filter_nid2.attr,
1580         &format_attr_filter_opc2.attr,
1581         &format_attr_filter_nc.attr,
1582         &format_attr_filter_c6.attr,
1583         &format_attr_filter_isoc.attr,
1584         NULL,
1585 };
1586
1587 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1588         &format_attr_event.attr,
1589         &format_attr_occ_sel.attr,
1590         &format_attr_edge.attr,
1591         &format_attr_thresh5.attr,
1592         &format_attr_occ_invert.attr,
1593         &format_attr_occ_edge.attr,
1594         &format_attr_filter_band0.attr,
1595         &format_attr_filter_band1.attr,
1596         &format_attr_filter_band2.attr,
1597         &format_attr_filter_band3.attr,
1598         NULL,
1599 };
1600
1601 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1602         &format_attr_event_ext.attr,
1603         &format_attr_umask.attr,
1604         &format_attr_edge.attr,
1605         &format_attr_thresh8.attr,
1606         &format_attr_match_rds.attr,
1607         &format_attr_match_rnid30.attr,
1608         &format_attr_match_rnid4.attr,
1609         &format_attr_match_dnid.attr,
1610         &format_attr_match_mc.attr,
1611         &format_attr_match_opc.attr,
1612         &format_attr_match_vnw.attr,
1613         &format_attr_match0.attr,
1614         &format_attr_match1.attr,
1615         &format_attr_mask_rds.attr,
1616         &format_attr_mask_rnid30.attr,
1617         &format_attr_mask_rnid4.attr,
1618         &format_attr_mask_dnid.attr,
1619         &format_attr_mask_mc.attr,
1620         &format_attr_mask_opc.attr,
1621         &format_attr_mask_vnw.attr,
1622         &format_attr_mask0.attr,
1623         &format_attr_mask1.attr,
1624         NULL,
1625 };
1626
1627 static const struct attribute_group ivbep_uncore_format_group = {
1628         .name = "format",
1629         .attrs = ivbep_uncore_formats_attr,
1630 };
1631
1632 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1633         .name = "format",
1634         .attrs = ivbep_uncore_ubox_formats_attr,
1635 };
1636
1637 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1638         .name = "format",
1639         .attrs = ivbep_uncore_cbox_formats_attr,
1640 };
1641
1642 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1643         .name = "format",
1644         .attrs = ivbep_uncore_pcu_formats_attr,
1645 };
1646
1647 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1648         .name = "format",
1649         .attrs = ivbep_uncore_qpi_formats_attr,
1650 };
1651
1652 static struct intel_uncore_type ivbep_uncore_ubox = {
1653         .name           = "ubox",
1654         .num_counters   = 2,
1655         .num_boxes      = 1,
1656         .perf_ctr_bits  = 44,
1657         .fixed_ctr_bits = 48,
1658         .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1659         .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1660         .event_mask     = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1661         .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1662         .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1663         .ops            = &ivbep_uncore_msr_ops,
1664         .format_group   = &ivbep_uncore_ubox_format_group,
1665 };
1666
1667 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1668         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1669                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1670         SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1671         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1672         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1673         SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1674         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1675         SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1676         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1677         SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1678         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1679         SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1680         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1681         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1682         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1683         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1684         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1685         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1686         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1687         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1688         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1689         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1690         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1691         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1692         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1693         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1694         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1695         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1696         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1697         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1698         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1699         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1700         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1701         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1702         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1703         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1704         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1705         EVENT_EXTRA_END
1706 };
1707
1708 static u64 ivbep_cbox_filter_mask(int fields)
1709 {
1710         u64 mask = 0;
1711
1712         if (fields & 0x1)
1713                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1714         if (fields & 0x2)
1715                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1716         if (fields & 0x4)
1717                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1718         if (fields & 0x8)
1719                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1720         if (fields & 0x10) {
1721                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1722                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1723                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1724                 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1725         }
1726
1727         return mask;
1728 }
1729
1730 static struct event_constraint *
1731 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1732 {
1733         return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1734 }
1735
1736 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1737 {
1738         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1739         struct extra_reg *er;
1740         int idx = 0;
1741
1742         for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1743                 if (er->event != (event->hw.config & er->config_mask))
1744                         continue;
1745                 idx |= er->idx;
1746         }
1747
1748         if (idx) {
1749                 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1750                         SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1751                 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1752                 reg1->idx = idx;
1753         }
1754         return 0;
1755 }
1756
1757 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1758 {
1759         struct hw_perf_event *hwc = &event->hw;
1760         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1761
1762         if (reg1->idx != EXTRA_REG_NONE) {
1763                 u64 filter = uncore_shared_reg_config(box, 0);
1764                 wrmsrl(reg1->reg, filter & 0xffffffff);
1765                 wrmsrl(reg1->reg + 6, filter >> 32);
1766         }
1767
1768         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1769 }
1770
1771 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1772         .init_box               = ivbep_uncore_msr_init_box,
1773         .disable_box            = snbep_uncore_msr_disable_box,
1774         .enable_box             = snbep_uncore_msr_enable_box,
1775         .disable_event          = snbep_uncore_msr_disable_event,
1776         .enable_event           = ivbep_cbox_enable_event,
1777         .read_counter           = uncore_msr_read_counter,
1778         .hw_config              = ivbep_cbox_hw_config,
1779         .get_constraint         = ivbep_cbox_get_constraint,
1780         .put_constraint         = snbep_cbox_put_constraint,
1781 };
1782
1783 static struct intel_uncore_type ivbep_uncore_cbox = {
1784         .name                   = "cbox",
1785         .num_counters           = 4,
1786         .num_boxes              = 15,
1787         .perf_ctr_bits          = 44,
1788         .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1789         .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1790         .event_mask             = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1791         .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1792         .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1793         .num_shared_regs        = 1,
1794         .constraints            = snbep_uncore_cbox_constraints,
1795         .ops                    = &ivbep_uncore_cbox_ops,
1796         .format_group           = &ivbep_uncore_cbox_format_group,
1797 };
1798
1799 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1800         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1801         .hw_config              = snbep_pcu_hw_config,
1802         .get_constraint         = snbep_pcu_get_constraint,
1803         .put_constraint         = snbep_pcu_put_constraint,
1804 };
1805
1806 static struct intel_uncore_type ivbep_uncore_pcu = {
1807         .name                   = "pcu",
1808         .num_counters           = 4,
1809         .num_boxes              = 1,
1810         .perf_ctr_bits          = 48,
1811         .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1812         .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1813         .event_mask             = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1814         .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1815         .num_shared_regs        = 1,
1816         .ops                    = &ivbep_uncore_pcu_ops,
1817         .format_group           = &ivbep_uncore_pcu_format_group,
1818 };
1819
1820 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1821         &ivbep_uncore_ubox,
1822         &ivbep_uncore_cbox,
1823         &ivbep_uncore_pcu,
1824         NULL,
1825 };
1826
1827 void ivbep_uncore_cpu_init(void)
1828 {
1829         if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1830                 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1831         uncore_msr_uncores = ivbep_msr_uncores;
1832 }
1833
1834 static struct intel_uncore_type ivbep_uncore_ha = {
1835         .name           = "ha",
1836         .num_counters   = 4,
1837         .num_boxes      = 2,
1838         .perf_ctr_bits  = 48,
1839         IVBEP_UNCORE_PCI_COMMON_INIT(),
1840 };
1841
1842 static struct intel_uncore_type ivbep_uncore_imc = {
1843         .name           = "imc",
1844         .num_counters   = 4,
1845         .num_boxes      = 8,
1846         .perf_ctr_bits  = 48,
1847         .fixed_ctr_bits = 48,
1848         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1849         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1850         .event_descs    = snbep_uncore_imc_events,
1851         IVBEP_UNCORE_PCI_COMMON_INIT(),
1852 };
1853
1854 /* registers in IRP boxes are not properly aligned */
1855 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1856 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1857
1858 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1859 {
1860         struct pci_dev *pdev = box->pci_dev;
1861         struct hw_perf_event *hwc = &event->hw;
1862
1863         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1864                                hwc->config | SNBEP_PMON_CTL_EN);
1865 }
1866
1867 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1868 {
1869         struct pci_dev *pdev = box->pci_dev;
1870         struct hw_perf_event *hwc = &event->hw;
1871
1872         pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1873 }
1874
1875 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1876 {
1877         struct pci_dev *pdev = box->pci_dev;
1878         struct hw_perf_event *hwc = &event->hw;
1879         u64 count = 0;
1880
1881         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1882         pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1883
1884         return count;
1885 }
1886
1887 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1888         .init_box       = ivbep_uncore_pci_init_box,
1889         .disable_box    = snbep_uncore_pci_disable_box,
1890         .enable_box     = snbep_uncore_pci_enable_box,
1891         .disable_event  = ivbep_uncore_irp_disable_event,
1892         .enable_event   = ivbep_uncore_irp_enable_event,
1893         .read_counter   = ivbep_uncore_irp_read_counter,
1894 };
1895
1896 static struct intel_uncore_type ivbep_uncore_irp = {
1897         .name                   = "irp",
1898         .num_counters           = 4,
1899         .num_boxes              = 1,
1900         .perf_ctr_bits          = 48,
1901         .event_mask             = IVBEP_PMON_RAW_EVENT_MASK,
1902         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1903         .ops                    = &ivbep_uncore_irp_ops,
1904         .format_group           = &ivbep_uncore_format_group,
1905 };
1906
1907 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1908         .init_box       = ivbep_uncore_pci_init_box,
1909         .disable_box    = snbep_uncore_pci_disable_box,
1910         .enable_box     = snbep_uncore_pci_enable_box,
1911         .disable_event  = snbep_uncore_pci_disable_event,
1912         .enable_event   = snbep_qpi_enable_event,
1913         .read_counter   = snbep_uncore_pci_read_counter,
1914         .hw_config      = snbep_qpi_hw_config,
1915         .get_constraint = uncore_get_constraint,
1916         .put_constraint = uncore_put_constraint,
1917 };
1918
1919 static struct intel_uncore_type ivbep_uncore_qpi = {
1920         .name                   = "qpi",
1921         .num_counters           = 4,
1922         .num_boxes              = 3,
1923         .perf_ctr_bits          = 48,
1924         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1925         .event_ctl              = SNBEP_PCI_PMON_CTL0,
1926         .event_mask             = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1927         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1928         .num_shared_regs        = 1,
1929         .ops                    = &ivbep_uncore_qpi_ops,
1930         .format_group           = &ivbep_uncore_qpi_format_group,
1931 };
1932
1933 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1934         .name           = "r2pcie",
1935         .num_counters   = 4,
1936         .num_boxes      = 1,
1937         .perf_ctr_bits  = 44,
1938         .constraints    = snbep_uncore_r2pcie_constraints,
1939         IVBEP_UNCORE_PCI_COMMON_INIT(),
1940 };
1941
1942 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1943         .name           = "r3qpi",
1944         .num_counters   = 3,
1945         .num_boxes      = 2,
1946         .perf_ctr_bits  = 44,
1947         .constraints    = snbep_uncore_r3qpi_constraints,
1948         IVBEP_UNCORE_PCI_COMMON_INIT(),
1949 };
1950
1951 enum {
1952         IVBEP_PCI_UNCORE_HA,
1953         IVBEP_PCI_UNCORE_IMC,
1954         IVBEP_PCI_UNCORE_IRP,
1955         IVBEP_PCI_UNCORE_QPI,
1956         IVBEP_PCI_UNCORE_R2PCIE,
1957         IVBEP_PCI_UNCORE_R3QPI,
1958 };
1959
1960 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1961         [IVBEP_PCI_UNCORE_HA]   = &ivbep_uncore_ha,
1962         [IVBEP_PCI_UNCORE_IMC]  = &ivbep_uncore_imc,
1963         [IVBEP_PCI_UNCORE_IRP]  = &ivbep_uncore_irp,
1964         [IVBEP_PCI_UNCORE_QPI]  = &ivbep_uncore_qpi,
1965         [IVBEP_PCI_UNCORE_R2PCIE]       = &ivbep_uncore_r2pcie,
1966         [IVBEP_PCI_UNCORE_R3QPI]        = &ivbep_uncore_r3qpi,
1967         NULL,
1968 };
1969
1970 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1971         { /* Home Agent 0 */
1972                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1973                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1974         },
1975         { /* Home Agent 1 */
1976                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1977                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1978         },
1979         { /* MC0 Channel 0 */
1980                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1981                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1982         },
1983         { /* MC0 Channel 1 */
1984                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1985                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1986         },
1987         { /* MC0 Channel 3 */
1988                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1989                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1990         },
1991         { /* MC0 Channel 4 */
1992                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1993                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1994         },
1995         { /* MC1 Channel 0 */
1996                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1997                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1998         },
1999         { /* MC1 Channel 1 */
2000                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
2001                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
2002         },
2003         { /* MC1 Channel 3 */
2004                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
2005                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
2006         },
2007         { /* MC1 Channel 4 */
2008                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
2009                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
2010         },
2011         { /* IRP */
2012                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
2013                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
2014         },
2015         { /* QPI0 Port 0 */
2016                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
2017                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
2018         },
2019         { /* QPI0 Port 1 */
2020                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
2021                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
2022         },
2023         { /* QPI1 Port 2 */
2024                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
2025                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
2026         },
2027         { /* R2PCIe */
2028                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
2029                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
2030         },
2031         { /* R3QPI0 Link 0 */
2032                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
2033                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
2034         },
2035         { /* R3QPI0 Link 1 */
2036                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
2037                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
2038         },
2039         { /* R3QPI1 Link 2 */
2040                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
2041                 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
2042         },
2043         { /* QPI Port 0 filter  */
2044                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
2045                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2046                                                    SNBEP_PCI_QPI_PORT0_FILTER),
2047         },
2048         { /* QPI Port 0 filter  */
2049                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
2050                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2051                                                    SNBEP_PCI_QPI_PORT1_FILTER),
2052         },
2053         { /* end: all zeroes */ }
2054 };
2055
2056 static struct pci_driver ivbep_uncore_pci_driver = {
2057         .name           = "ivbep_uncore",
2058         .id_table       = ivbep_uncore_pci_ids,
2059 };
2060
2061 int ivbep_uncore_pci_init(void)
2062 {
2063         int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2064         if (ret)
2065                 return ret;
2066         uncore_pci_uncores = ivbep_pci_uncores;
2067         uncore_pci_driver = &ivbep_uncore_pci_driver;
2068         return 0;
2069 }
2070 /* end of IvyTown uncore support */
2071
2072 /* KNL uncore support */
2073 static struct attribute *knl_uncore_ubox_formats_attr[] = {
2074         &format_attr_event.attr,
2075         &format_attr_umask.attr,
2076         &format_attr_edge.attr,
2077         &format_attr_tid_en.attr,
2078         &format_attr_inv.attr,
2079         &format_attr_thresh5.attr,
2080         NULL,
2081 };
2082
2083 static const struct attribute_group knl_uncore_ubox_format_group = {
2084         .name = "format",
2085         .attrs = knl_uncore_ubox_formats_attr,
2086 };
2087
2088 static struct intel_uncore_type knl_uncore_ubox = {
2089         .name                   = "ubox",
2090         .num_counters           = 2,
2091         .num_boxes              = 1,
2092         .perf_ctr_bits          = 48,
2093         .fixed_ctr_bits         = 48,
2094         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2095         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2096         .event_mask             = KNL_U_MSR_PMON_RAW_EVENT_MASK,
2097         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2098         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2099         .ops                    = &snbep_uncore_msr_ops,
2100         .format_group           = &knl_uncore_ubox_format_group,
2101 };
2102
2103 static struct attribute *knl_uncore_cha_formats_attr[] = {
2104         &format_attr_event.attr,
2105         &format_attr_umask.attr,
2106         &format_attr_qor.attr,
2107         &format_attr_edge.attr,
2108         &format_attr_tid_en.attr,
2109         &format_attr_inv.attr,
2110         &format_attr_thresh8.attr,
2111         &format_attr_filter_tid4.attr,
2112         &format_attr_filter_link3.attr,
2113         &format_attr_filter_state4.attr,
2114         &format_attr_filter_local.attr,
2115         &format_attr_filter_all_op.attr,
2116         &format_attr_filter_nnm.attr,
2117         &format_attr_filter_opc3.attr,
2118         &format_attr_filter_nc.attr,
2119         &format_attr_filter_isoc.attr,
2120         NULL,
2121 };
2122
2123 static const struct attribute_group knl_uncore_cha_format_group = {
2124         .name = "format",
2125         .attrs = knl_uncore_cha_formats_attr,
2126 };
2127
2128 static struct event_constraint knl_uncore_cha_constraints[] = {
2129         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2130         UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2131         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2132         EVENT_CONSTRAINT_END
2133 };
2134
2135 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2136         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2137                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2138         SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2139         SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2140         SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2141         EVENT_EXTRA_END
2142 };
2143
2144 static u64 knl_cha_filter_mask(int fields)
2145 {
2146         u64 mask = 0;
2147
2148         if (fields & 0x1)
2149                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2150         if (fields & 0x2)
2151                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2152         if (fields & 0x4)
2153                 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2154         return mask;
2155 }
2156
2157 static struct event_constraint *
2158 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2159 {
2160         return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2161 }
2162
2163 static int knl_cha_hw_config(struct intel_uncore_box *box,
2164                              struct perf_event *event)
2165 {
2166         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2167         struct extra_reg *er;
2168         int idx = 0;
2169
2170         for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2171                 if (er->event != (event->hw.config & er->config_mask))
2172                         continue;
2173                 idx |= er->idx;
2174         }
2175
2176         if (idx) {
2177                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2178                             KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2179                 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2180
2181                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2182                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2183                 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2184                 reg1->idx = idx;
2185         }
2186         return 0;
2187 }
2188
2189 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2190                                     struct perf_event *event);
2191
2192 static struct intel_uncore_ops knl_uncore_cha_ops = {
2193         .init_box               = snbep_uncore_msr_init_box,
2194         .disable_box            = snbep_uncore_msr_disable_box,
2195         .enable_box             = snbep_uncore_msr_enable_box,
2196         .disable_event          = snbep_uncore_msr_disable_event,
2197         .enable_event           = hswep_cbox_enable_event,
2198         .read_counter           = uncore_msr_read_counter,
2199         .hw_config              = knl_cha_hw_config,
2200         .get_constraint         = knl_cha_get_constraint,
2201         .put_constraint         = snbep_cbox_put_constraint,
2202 };
2203
2204 static struct intel_uncore_type knl_uncore_cha = {
2205         .name                   = "cha",
2206         .num_counters           = 4,
2207         .num_boxes              = 38,
2208         .perf_ctr_bits          = 48,
2209         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2210         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2211         .event_mask             = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2212         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2213         .msr_offset             = KNL_CHA_MSR_OFFSET,
2214         .num_shared_regs        = 1,
2215         .constraints            = knl_uncore_cha_constraints,
2216         .ops                    = &knl_uncore_cha_ops,
2217         .format_group           = &knl_uncore_cha_format_group,
2218 };
2219
2220 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2221         &format_attr_event2.attr,
2222         &format_attr_use_occ_ctr.attr,
2223         &format_attr_occ_sel.attr,
2224         &format_attr_edge.attr,
2225         &format_attr_tid_en.attr,
2226         &format_attr_inv.attr,
2227         &format_attr_thresh6.attr,
2228         &format_attr_occ_invert.attr,
2229         &format_attr_occ_edge_det.attr,
2230         NULL,
2231 };
2232
2233 static const struct attribute_group knl_uncore_pcu_format_group = {
2234         .name = "format",
2235         .attrs = knl_uncore_pcu_formats_attr,
2236 };
2237
2238 static struct intel_uncore_type knl_uncore_pcu = {
2239         .name                   = "pcu",
2240         .num_counters           = 4,
2241         .num_boxes              = 1,
2242         .perf_ctr_bits          = 48,
2243         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2244         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2245         .event_mask             = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2246         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2247         .ops                    = &snbep_uncore_msr_ops,
2248         .format_group           = &knl_uncore_pcu_format_group,
2249 };
2250
2251 static struct intel_uncore_type *knl_msr_uncores[] = {
2252         &knl_uncore_ubox,
2253         &knl_uncore_cha,
2254         &knl_uncore_pcu,
2255         NULL,
2256 };
2257
2258 void knl_uncore_cpu_init(void)
2259 {
2260         uncore_msr_uncores = knl_msr_uncores;
2261 }
2262
2263 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2264 {
2265         struct pci_dev *pdev = box->pci_dev;
2266         int box_ctl = uncore_pci_box_ctl(box);
2267
2268         pci_write_config_dword(pdev, box_ctl, 0);
2269 }
2270
2271 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2272                                         struct perf_event *event)
2273 {
2274         struct pci_dev *pdev = box->pci_dev;
2275         struct hw_perf_event *hwc = &event->hw;
2276
2277         if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2278                                                         == UNCORE_FIXED_EVENT)
2279                 pci_write_config_dword(pdev, hwc->config_base,
2280                                        hwc->config | KNL_PMON_FIXED_CTL_EN);
2281         else
2282                 pci_write_config_dword(pdev, hwc->config_base,
2283                                        hwc->config | SNBEP_PMON_CTL_EN);
2284 }
2285
2286 static struct intel_uncore_ops knl_uncore_imc_ops = {
2287         .init_box       = snbep_uncore_pci_init_box,
2288         .disable_box    = snbep_uncore_pci_disable_box,
2289         .enable_box     = knl_uncore_imc_enable_box,
2290         .read_counter   = snbep_uncore_pci_read_counter,
2291         .enable_event   = knl_uncore_imc_enable_event,
2292         .disable_event  = snbep_uncore_pci_disable_event,
2293 };
2294
2295 static struct intel_uncore_type knl_uncore_imc_uclk = {
2296         .name                   = "imc_uclk",
2297         .num_counters           = 4,
2298         .num_boxes              = 2,
2299         .perf_ctr_bits          = 48,
2300         .fixed_ctr_bits         = 48,
2301         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2302         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2303         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2304         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2305         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2306         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2307         .ops                    = &knl_uncore_imc_ops,
2308         .format_group           = &snbep_uncore_format_group,
2309 };
2310
2311 static struct intel_uncore_type knl_uncore_imc_dclk = {
2312         .name                   = "imc",
2313         .num_counters           = 4,
2314         .num_boxes              = 6,
2315         .perf_ctr_bits          = 48,
2316         .fixed_ctr_bits         = 48,
2317         .perf_ctr               = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2318         .event_ctl              = KNL_MC0_CH0_MSR_PMON_CTL0,
2319         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2320         .fixed_ctr              = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2321         .fixed_ctl              = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2322         .box_ctl                = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2323         .ops                    = &knl_uncore_imc_ops,
2324         .format_group           = &snbep_uncore_format_group,
2325 };
2326
2327 static struct intel_uncore_type knl_uncore_edc_uclk = {
2328         .name                   = "edc_uclk",
2329         .num_counters           = 4,
2330         .num_boxes              = 8,
2331         .perf_ctr_bits          = 48,
2332         .fixed_ctr_bits         = 48,
2333         .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2334         .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2335         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2336         .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2337         .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2338         .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2339         .ops                    = &knl_uncore_imc_ops,
2340         .format_group           = &snbep_uncore_format_group,
2341 };
2342
2343 static struct intel_uncore_type knl_uncore_edc_eclk = {
2344         .name                   = "edc_eclk",
2345         .num_counters           = 4,
2346         .num_boxes              = 8,
2347         .perf_ctr_bits          = 48,
2348         .fixed_ctr_bits         = 48,
2349         .perf_ctr               = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2350         .event_ctl              = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2351         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2352         .fixed_ctr              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2353         .fixed_ctl              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2354         .box_ctl                = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2355         .ops                    = &knl_uncore_imc_ops,
2356         .format_group           = &snbep_uncore_format_group,
2357 };
2358
2359 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2360         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2361         EVENT_CONSTRAINT_END
2362 };
2363
2364 static struct intel_uncore_type knl_uncore_m2pcie = {
2365         .name           = "m2pcie",
2366         .num_counters   = 4,
2367         .num_boxes      = 1,
2368         .perf_ctr_bits  = 48,
2369         .constraints    = knl_uncore_m2pcie_constraints,
2370         SNBEP_UNCORE_PCI_COMMON_INIT(),
2371 };
2372
2373 static struct attribute *knl_uncore_irp_formats_attr[] = {
2374         &format_attr_event.attr,
2375         &format_attr_umask.attr,
2376         &format_attr_qor.attr,
2377         &format_attr_edge.attr,
2378         &format_attr_inv.attr,
2379         &format_attr_thresh8.attr,
2380         NULL,
2381 };
2382
2383 static const struct attribute_group knl_uncore_irp_format_group = {
2384         .name = "format",
2385         .attrs = knl_uncore_irp_formats_attr,
2386 };
2387
2388 static struct intel_uncore_type knl_uncore_irp = {
2389         .name                   = "irp",
2390         .num_counters           = 2,
2391         .num_boxes              = 1,
2392         .perf_ctr_bits          = 48,
2393         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2394         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2395         .event_mask             = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2396         .box_ctl                = KNL_IRP_PCI_PMON_BOX_CTL,
2397         .ops                    = &snbep_uncore_pci_ops,
2398         .format_group           = &knl_uncore_irp_format_group,
2399 };
2400
2401 enum {
2402         KNL_PCI_UNCORE_MC_UCLK,
2403         KNL_PCI_UNCORE_MC_DCLK,
2404         KNL_PCI_UNCORE_EDC_UCLK,
2405         KNL_PCI_UNCORE_EDC_ECLK,
2406         KNL_PCI_UNCORE_M2PCIE,
2407         KNL_PCI_UNCORE_IRP,
2408 };
2409
2410 static struct intel_uncore_type *knl_pci_uncores[] = {
2411         [KNL_PCI_UNCORE_MC_UCLK]        = &knl_uncore_imc_uclk,
2412         [KNL_PCI_UNCORE_MC_DCLK]        = &knl_uncore_imc_dclk,
2413         [KNL_PCI_UNCORE_EDC_UCLK]       = &knl_uncore_edc_uclk,
2414         [KNL_PCI_UNCORE_EDC_ECLK]       = &knl_uncore_edc_eclk,
2415         [KNL_PCI_UNCORE_M2PCIE]         = &knl_uncore_m2pcie,
2416         [KNL_PCI_UNCORE_IRP]            = &knl_uncore_irp,
2417         NULL,
2418 };
2419
2420 /*
2421  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2422  * device type. prior to KNL, each instance of a PMU device type had a unique
2423  * device ID.
2424  *
2425  *      PCI Device ID   Uncore PMU Devices
2426  *      ----------------------------------
2427  *      0x7841          MC0 UClk, MC1 UClk
2428  *      0x7843          MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2429  *                      MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2430  *      0x7833          EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2431  *                      EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2432  *      0x7835          EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2433  *                      EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2434  *      0x7817          M2PCIe
2435  *      0x7814          IRP
2436 */
2437
2438 static const struct pci_device_id knl_uncore_pci_ids[] = {
2439         { /* MC0 UClk */
2440                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2441                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2442         },
2443         { /* MC1 UClk */
2444                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2445                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2446         },
2447         { /* MC0 DClk CH 0 */
2448                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2449                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2450         },
2451         { /* MC0 DClk CH 1 */
2452                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2453                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2454         },
2455         { /* MC0 DClk CH 2 */
2456                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2457                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2458         },
2459         { /* MC1 DClk CH 0 */
2460                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2461                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2462         },
2463         { /* MC1 DClk CH 1 */
2464                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2465                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2466         },
2467         { /* MC1 DClk CH 2 */
2468                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2469                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2470         },
2471         { /* EDC0 UClk */
2472                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2473                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2474         },
2475         { /* EDC1 UClk */
2476                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2477                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2478         },
2479         { /* EDC2 UClk */
2480                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2481                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2482         },
2483         { /* EDC3 UClk */
2484                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2485                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2486         },
2487         { /* EDC4 UClk */
2488                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2489                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2490         },
2491         { /* EDC5 UClk */
2492                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2493                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2494         },
2495         { /* EDC6 UClk */
2496                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2497                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2498         },
2499         { /* EDC7 UClk */
2500                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2501                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2502         },
2503         { /* EDC0 EClk */
2504                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2505                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2506         },
2507         { /* EDC1 EClk */
2508                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2509                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2510         },
2511         { /* EDC2 EClk */
2512                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2513                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2514         },
2515         { /* EDC3 EClk */
2516                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2517                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2518         },
2519         { /* EDC4 EClk */
2520                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2521                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2522         },
2523         { /* EDC5 EClk */
2524                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2525                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2526         },
2527         { /* EDC6 EClk */
2528                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2529                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2530         },
2531         { /* EDC7 EClk */
2532                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2533                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2534         },
2535         { /* M2PCIe */
2536                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2537                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2538         },
2539         { /* IRP */
2540                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2541                 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2542         },
2543         { /* end: all zeroes */ }
2544 };
2545
2546 static struct pci_driver knl_uncore_pci_driver = {
2547         .name           = "knl_uncore",
2548         .id_table       = knl_uncore_pci_ids,
2549 };
2550
2551 int knl_uncore_pci_init(void)
2552 {
2553         int ret;
2554
2555         /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2556         ret = snb_pci2phy_map_init(0x7814); /* IRP */
2557         if (ret)
2558                 return ret;
2559         ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2560         if (ret)
2561                 return ret;
2562         uncore_pci_uncores = knl_pci_uncores;
2563         uncore_pci_driver = &knl_uncore_pci_driver;
2564         return 0;
2565 }
2566
2567 /* end of KNL uncore support */
2568
2569 /* Haswell-EP uncore support */
2570 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2571         &format_attr_event.attr,
2572         &format_attr_umask.attr,
2573         &format_attr_edge.attr,
2574         &format_attr_inv.attr,
2575         &format_attr_thresh5.attr,
2576         &format_attr_filter_tid2.attr,
2577         &format_attr_filter_cid.attr,
2578         NULL,
2579 };
2580
2581 static const struct attribute_group hswep_uncore_ubox_format_group = {
2582         .name = "format",
2583         .attrs = hswep_uncore_ubox_formats_attr,
2584 };
2585
2586 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2587 {
2588         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2589         reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2590         reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2591         reg1->idx = 0;
2592         return 0;
2593 }
2594
2595 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2596         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2597         .hw_config              = hswep_ubox_hw_config,
2598         .get_constraint         = uncore_get_constraint,
2599         .put_constraint         = uncore_put_constraint,
2600 };
2601
2602 static struct intel_uncore_type hswep_uncore_ubox = {
2603         .name                   = "ubox",
2604         .num_counters           = 2,
2605         .num_boxes              = 1,
2606         .perf_ctr_bits          = 44,
2607         .fixed_ctr_bits         = 48,
2608         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2609         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2610         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2611         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2612         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2613         .num_shared_regs        = 1,
2614         .ops                    = &hswep_uncore_ubox_ops,
2615         .format_group           = &hswep_uncore_ubox_format_group,
2616 };
2617
2618 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2619         &format_attr_event.attr,
2620         &format_attr_umask.attr,
2621         &format_attr_edge.attr,
2622         &format_attr_tid_en.attr,
2623         &format_attr_thresh8.attr,
2624         &format_attr_filter_tid3.attr,
2625         &format_attr_filter_link2.attr,
2626         &format_attr_filter_state3.attr,
2627         &format_attr_filter_nid2.attr,
2628         &format_attr_filter_opc2.attr,
2629         &format_attr_filter_nc.attr,
2630         &format_attr_filter_c6.attr,
2631         &format_attr_filter_isoc.attr,
2632         NULL,
2633 };
2634
2635 static const struct attribute_group hswep_uncore_cbox_format_group = {
2636         .name = "format",
2637         .attrs = hswep_uncore_cbox_formats_attr,
2638 };
2639
2640 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2641         UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2642         UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2643         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2644         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2645         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2646         UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2647         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2648         EVENT_CONSTRAINT_END
2649 };
2650
2651 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2652         SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2653                                   SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2654         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2655         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2656         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2657         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2658         SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2659         SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2660         SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2661         SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2662         SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2663         SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2664         SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2665         SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2666         SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2667         SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2668         SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2669         SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2670         SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2671         SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2672         SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2673         SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2674         SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2675         SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2676         SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2677         SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2678         SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2679         SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2680         SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2681         SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2682         SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2683         SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2684         SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2685         SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2686         SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2687         SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2688         SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2689         SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2690         EVENT_EXTRA_END
2691 };
2692
2693 static u64 hswep_cbox_filter_mask(int fields)
2694 {
2695         u64 mask = 0;
2696         if (fields & 0x1)
2697                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2698         if (fields & 0x2)
2699                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2700         if (fields & 0x4)
2701                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2702         if (fields & 0x8)
2703                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2704         if (fields & 0x10) {
2705                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2706                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2707                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2708                 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2709         }
2710         return mask;
2711 }
2712
2713 static struct event_constraint *
2714 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2715 {
2716         return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2717 }
2718
2719 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2720 {
2721         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2722         struct extra_reg *er;
2723         int idx = 0;
2724
2725         for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2726                 if (er->event != (event->hw.config & er->config_mask))
2727                         continue;
2728                 idx |= er->idx;
2729         }
2730
2731         if (idx) {
2732                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2733                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2734                 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2735                 reg1->idx = idx;
2736         }
2737         return 0;
2738 }
2739
2740 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2741                                   struct perf_event *event)
2742 {
2743         struct hw_perf_event *hwc = &event->hw;
2744         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2745
2746         if (reg1->idx != EXTRA_REG_NONE) {
2747                 u64 filter = uncore_shared_reg_config(box, 0);
2748                 wrmsrl(reg1->reg, filter & 0xffffffff);
2749                 wrmsrl(reg1->reg + 1, filter >> 32);
2750         }
2751
2752         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2753 }
2754
2755 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2756         .init_box               = snbep_uncore_msr_init_box,
2757         .disable_box            = snbep_uncore_msr_disable_box,
2758         .enable_box             = snbep_uncore_msr_enable_box,
2759         .disable_event          = snbep_uncore_msr_disable_event,
2760         .enable_event           = hswep_cbox_enable_event,
2761         .read_counter           = uncore_msr_read_counter,
2762         .hw_config              = hswep_cbox_hw_config,
2763         .get_constraint         = hswep_cbox_get_constraint,
2764         .put_constraint         = snbep_cbox_put_constraint,
2765 };
2766
2767 static struct intel_uncore_type hswep_uncore_cbox = {
2768         .name                   = "cbox",
2769         .num_counters           = 4,
2770         .num_boxes              = 18,
2771         .perf_ctr_bits          = 48,
2772         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2773         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2774         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2775         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2776         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
2777         .num_shared_regs        = 1,
2778         .constraints            = hswep_uncore_cbox_constraints,
2779         .ops                    = &hswep_uncore_cbox_ops,
2780         .format_group           = &hswep_uncore_cbox_format_group,
2781 };
2782
2783 /*
2784  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2785  */
2786 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2787 {
2788         unsigned msr = uncore_msr_box_ctl(box);
2789
2790         if (msr) {
2791                 u64 init = SNBEP_PMON_BOX_CTL_INT;
2792                 u64 flags = 0;
2793                 int i;
2794
2795                 for_each_set_bit(i, (unsigned long *)&init, 64) {
2796                         flags |= (1ULL << i);
2797                         wrmsrl(msr, flags);
2798                 }
2799         }
2800 }
2801
2802 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2803         __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2804         .init_box               = hswep_uncore_sbox_msr_init_box
2805 };
2806
2807 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2808         &format_attr_event.attr,
2809         &format_attr_umask.attr,
2810         &format_attr_edge.attr,
2811         &format_attr_tid_en.attr,
2812         &format_attr_inv.attr,
2813         &format_attr_thresh8.attr,
2814         NULL,
2815 };
2816
2817 static const struct attribute_group hswep_uncore_sbox_format_group = {
2818         .name = "format",
2819         .attrs = hswep_uncore_sbox_formats_attr,
2820 };
2821
2822 static struct intel_uncore_type hswep_uncore_sbox = {
2823         .name                   = "sbox",
2824         .num_counters           = 4,
2825         .num_boxes              = 4,
2826         .perf_ctr_bits          = 44,
2827         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
2828         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
2829         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2830         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
2831         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
2832         .ops                    = &hswep_uncore_sbox_msr_ops,
2833         .format_group           = &hswep_uncore_sbox_format_group,
2834 };
2835
2836 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2837 {
2838         struct hw_perf_event *hwc = &event->hw;
2839         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2840         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2841
2842         if (ev_sel >= 0xb && ev_sel <= 0xe) {
2843                 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2844                 reg1->idx = ev_sel - 0xb;
2845                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2846         }
2847         return 0;
2848 }
2849
2850 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2851         SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2852         .hw_config              = hswep_pcu_hw_config,
2853         .get_constraint         = snbep_pcu_get_constraint,
2854         .put_constraint         = snbep_pcu_put_constraint,
2855 };
2856
2857 static struct intel_uncore_type hswep_uncore_pcu = {
2858         .name                   = "pcu",
2859         .num_counters           = 4,
2860         .num_boxes              = 1,
2861         .perf_ctr_bits          = 48,
2862         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2863         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2864         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2865         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2866         .num_shared_regs        = 1,
2867         .ops                    = &hswep_uncore_pcu_ops,
2868         .format_group           = &snbep_uncore_pcu_format_group,
2869 };
2870
2871 static struct intel_uncore_type *hswep_msr_uncores[] = {
2872         &hswep_uncore_ubox,
2873         &hswep_uncore_cbox,
2874         &hswep_uncore_sbox,
2875         &hswep_uncore_pcu,
2876         NULL,
2877 };
2878
2879 #define HSWEP_PCU_DID                   0x2fc0
2880 #define HSWEP_PCU_CAPID4_OFFET          0x94
2881 #define hswep_get_chop(_cap)            (((_cap) >> 6) & 0x3)
2882
2883 static bool hswep_has_limit_sbox(unsigned int device)
2884 {
2885         struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2886         u32 capid4;
2887
2888         if (!dev)
2889                 return false;
2890
2891         pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2892         if (!hswep_get_chop(capid4))
2893                 return true;
2894
2895         return false;
2896 }
2897
2898 void hswep_uncore_cpu_init(void)
2899 {
2900         if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2901                 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2902
2903         /* Detect 6-8 core systems with only two SBOXes */
2904         if (hswep_has_limit_sbox(HSWEP_PCU_DID))
2905                 hswep_uncore_sbox.num_boxes = 2;
2906
2907         uncore_msr_uncores = hswep_msr_uncores;
2908 }
2909
2910 static struct intel_uncore_type hswep_uncore_ha = {
2911         .name           = "ha",
2912         .num_counters   = 4,
2913         .num_boxes      = 2,
2914         .perf_ctr_bits  = 48,
2915         SNBEP_UNCORE_PCI_COMMON_INIT(),
2916 };
2917
2918 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2919         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2920         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2921         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2922         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2923         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2924         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2925         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2926         { /* end: all zeroes */ },
2927 };
2928
2929 static struct intel_uncore_type hswep_uncore_imc = {
2930         .name           = "imc",
2931         .num_counters   = 4,
2932         .num_boxes      = 8,
2933         .perf_ctr_bits  = 48,
2934         .fixed_ctr_bits = 48,
2935         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2936         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2937         .event_descs    = hswep_uncore_imc_events,
2938         SNBEP_UNCORE_PCI_COMMON_INIT(),
2939 };
2940
2941 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2942
2943 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2944 {
2945         struct pci_dev *pdev = box->pci_dev;
2946         struct hw_perf_event *hwc = &event->hw;
2947         u64 count = 0;
2948
2949         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2950         pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2951
2952         return count;
2953 }
2954
2955 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2956         .init_box       = snbep_uncore_pci_init_box,
2957         .disable_box    = snbep_uncore_pci_disable_box,
2958         .enable_box     = snbep_uncore_pci_enable_box,
2959         .disable_event  = ivbep_uncore_irp_disable_event,
2960         .enable_event   = ivbep_uncore_irp_enable_event,
2961         .read_counter   = hswep_uncore_irp_read_counter,
2962 };
2963
2964 static struct intel_uncore_type hswep_uncore_irp = {
2965         .name                   = "irp",
2966         .num_counters           = 4,
2967         .num_boxes              = 1,
2968         .perf_ctr_bits          = 48,
2969         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2970         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2971         .ops                    = &hswep_uncore_irp_ops,
2972         .format_group           = &snbep_uncore_format_group,
2973 };
2974
2975 static struct intel_uncore_type hswep_uncore_qpi = {
2976         .name                   = "qpi",
2977         .num_counters           = 4,
2978         .num_boxes              = 3,
2979         .perf_ctr_bits          = 48,
2980         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2981         .event_ctl              = SNBEP_PCI_PMON_CTL0,
2982         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2983         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2984         .num_shared_regs        = 1,
2985         .ops                    = &snbep_uncore_qpi_ops,
2986         .format_group           = &snbep_uncore_qpi_format_group,
2987 };
2988
2989 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2990         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2991         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2992         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2993         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2994         UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2995         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2996         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2997         UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2998         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2999         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3000         UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
3001         UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
3002         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3003         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3004         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3005         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3006         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3007         UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
3008         EVENT_CONSTRAINT_END
3009 };
3010
3011 static struct intel_uncore_type hswep_uncore_r2pcie = {
3012         .name           = "r2pcie",
3013         .num_counters   = 4,
3014         .num_boxes      = 1,
3015         .perf_ctr_bits  = 48,
3016         .constraints    = hswep_uncore_r2pcie_constraints,
3017         SNBEP_UNCORE_PCI_COMMON_INIT(),
3018 };
3019
3020 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
3021         UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
3022         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3023         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3024         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3025         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3026         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3027         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3028         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3029         UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
3030         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3031         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3032         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3033         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3034         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3035         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3036         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3037         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3038         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3039         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3040         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3041         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3042         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3043         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3044         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3045         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3046         UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
3047         UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3048         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3049         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3050         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3051         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3052         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3053         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3054         EVENT_CONSTRAINT_END
3055 };
3056
3057 static struct intel_uncore_type hswep_uncore_r3qpi = {
3058         .name           = "r3qpi",
3059         .num_counters   = 3,
3060         .num_boxes      = 3,
3061         .perf_ctr_bits  = 44,
3062         .constraints    = hswep_uncore_r3qpi_constraints,
3063         SNBEP_UNCORE_PCI_COMMON_INIT(),
3064 };
3065
3066 enum {
3067         HSWEP_PCI_UNCORE_HA,
3068         HSWEP_PCI_UNCORE_IMC,
3069         HSWEP_PCI_UNCORE_IRP,
3070         HSWEP_PCI_UNCORE_QPI,
3071         HSWEP_PCI_UNCORE_R2PCIE,
3072         HSWEP_PCI_UNCORE_R3QPI,
3073 };
3074
3075 static struct intel_uncore_type *hswep_pci_uncores[] = {
3076         [HSWEP_PCI_UNCORE_HA]   = &hswep_uncore_ha,
3077         [HSWEP_PCI_UNCORE_IMC]  = &hswep_uncore_imc,
3078         [HSWEP_PCI_UNCORE_IRP]  = &hswep_uncore_irp,
3079         [HSWEP_PCI_UNCORE_QPI]  = &hswep_uncore_qpi,
3080         [HSWEP_PCI_UNCORE_R2PCIE]       = &hswep_uncore_r2pcie,
3081         [HSWEP_PCI_UNCORE_R3QPI]        = &hswep_uncore_r3qpi,
3082         NULL,
3083 };
3084
3085 static const struct pci_device_id hswep_uncore_pci_ids[] = {
3086         { /* Home Agent 0 */
3087                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3088                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3089         },
3090         { /* Home Agent 1 */
3091                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3092                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3093         },
3094         { /* MC0 Channel 0 */
3095                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3096                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3097         },
3098         { /* MC0 Channel 1 */
3099                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3100                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3101         },
3102         { /* MC0 Channel 2 */
3103                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3104                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3105         },
3106         { /* MC0 Channel 3 */
3107                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3108                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3109         },
3110         { /* MC1 Channel 0 */
3111                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3112                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3113         },
3114         { /* MC1 Channel 1 */
3115                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3116                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3117         },
3118         { /* MC1 Channel 2 */
3119                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3120                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3121         },
3122         { /* MC1 Channel 3 */
3123                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3124                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3125         },
3126         { /* IRP */
3127                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3128                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3129         },
3130         { /* QPI0 Port 0 */
3131                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3132                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3133         },
3134         { /* QPI0 Port 1 */
3135                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3136                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3137         },
3138         { /* QPI1 Port 2 */
3139                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3140                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3141         },
3142         { /* R2PCIe */
3143                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3144                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3145         },
3146         { /* R3QPI0 Link 0 */
3147                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3148                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3149         },
3150         { /* R3QPI0 Link 1 */
3151                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3152                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3153         },
3154         { /* R3QPI1 Link 2 */
3155                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3156                 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3157         },
3158         { /* QPI Port 0 filter  */
3159                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3160                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3161                                                    SNBEP_PCI_QPI_PORT0_FILTER),
3162         },
3163         { /* QPI Port 1 filter  */
3164                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3165                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3166                                                    SNBEP_PCI_QPI_PORT1_FILTER),
3167         },
3168         { /* end: all zeroes */ }
3169 };
3170
3171 static struct pci_driver hswep_uncore_pci_driver = {
3172         .name           = "hswep_uncore",
3173         .id_table       = hswep_uncore_pci_ids,
3174 };
3175
3176 int hswep_uncore_pci_init(void)
3177 {
3178         int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3179         if (ret)
3180                 return ret;
3181         uncore_pci_uncores = hswep_pci_uncores;
3182         uncore_pci_driver = &hswep_uncore_pci_driver;
3183         return 0;
3184 }
3185 /* end of Haswell-EP uncore support */
3186
3187 /* BDX uncore support */
3188
3189 static struct intel_uncore_type bdx_uncore_ubox = {
3190         .name                   = "ubox",
3191         .num_counters           = 2,
3192         .num_boxes              = 1,
3193         .perf_ctr_bits          = 48,
3194         .fixed_ctr_bits         = 48,
3195         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
3196         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
3197         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3198         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3199         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3200         .num_shared_regs        = 1,
3201         .ops                    = &ivbep_uncore_msr_ops,
3202         .format_group           = &ivbep_uncore_ubox_format_group,
3203 };
3204
3205 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3206         UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3207         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3208         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3209         UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3210         EVENT_CONSTRAINT_END
3211 };
3212
3213 static struct intel_uncore_type bdx_uncore_cbox = {
3214         .name                   = "cbox",
3215         .num_counters           = 4,
3216         .num_boxes              = 24,
3217         .perf_ctr_bits          = 48,
3218         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3219         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3220         .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3221         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3222         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3223         .num_shared_regs        = 1,
3224         .constraints            = bdx_uncore_cbox_constraints,
3225         .ops                    = &hswep_uncore_cbox_ops,
3226         .format_group           = &hswep_uncore_cbox_format_group,
3227 };
3228
3229 static struct intel_uncore_type bdx_uncore_sbox = {
3230         .name                   = "sbox",
3231         .num_counters           = 4,
3232         .num_boxes              = 4,
3233         .perf_ctr_bits          = 48,
3234         .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
3235         .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
3236         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3237         .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
3238         .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
3239         .ops                    = &hswep_uncore_sbox_msr_ops,
3240         .format_group           = &hswep_uncore_sbox_format_group,
3241 };
3242
3243 #define BDX_MSR_UNCORE_SBOX     3
3244
3245 static struct intel_uncore_type *bdx_msr_uncores[] = {
3246         &bdx_uncore_ubox,
3247         &bdx_uncore_cbox,
3248         &hswep_uncore_pcu,
3249         &bdx_uncore_sbox,
3250         NULL,
3251 };
3252
3253 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3254 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3255         EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3256         EVENT_CONSTRAINT_END
3257 };
3258
3259 #define BDX_PCU_DID                     0x6fc0
3260
3261 void bdx_uncore_cpu_init(void)
3262 {
3263         if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3264                 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3265         uncore_msr_uncores = bdx_msr_uncores;
3266
3267         /* Detect systems with no SBOXes */
3268         if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
3269                 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3270
3271         hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3272 }
3273
3274 static struct intel_uncore_type bdx_uncore_ha = {
3275         .name           = "ha",
3276         .num_counters   = 4,
3277         .num_boxes      = 2,
3278         .perf_ctr_bits  = 48,
3279         SNBEP_UNCORE_PCI_COMMON_INIT(),
3280 };
3281
3282 static struct intel_uncore_type bdx_uncore_imc = {
3283         .name           = "imc",
3284         .num_counters   = 4,
3285         .num_boxes      = 8,
3286         .perf_ctr_bits  = 48,
3287         .fixed_ctr_bits = 48,
3288         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3289         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3290         .event_descs    = hswep_uncore_imc_events,
3291         SNBEP_UNCORE_PCI_COMMON_INIT(),
3292 };
3293
3294 static struct intel_uncore_type bdx_uncore_irp = {
3295         .name                   = "irp",
3296         .num_counters           = 4,
3297         .num_boxes              = 1,
3298         .perf_ctr_bits          = 48,
3299         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
3300         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3301         .ops                    = &hswep_uncore_irp_ops,
3302         .format_group           = &snbep_uncore_format_group,
3303 };
3304
3305 static struct intel_uncore_type bdx_uncore_qpi = {
3306         .name                   = "qpi",
3307         .num_counters           = 4,
3308         .num_boxes              = 3,
3309         .perf_ctr_bits          = 48,
3310         .perf_ctr               = SNBEP_PCI_PMON_CTR0,
3311         .event_ctl              = SNBEP_PCI_PMON_CTL0,
3312         .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3313         .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3314         .num_shared_regs        = 1,
3315         .ops                    = &snbep_uncore_qpi_ops,
3316         .format_group           = &snbep_uncore_qpi_format_group,
3317 };
3318
3319 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3320         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3321         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3322         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3323         UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3324         UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3325         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3326         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3327         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3328         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3329         EVENT_CONSTRAINT_END
3330 };
3331
3332 static struct intel_uncore_type bdx_uncore_r2pcie = {
3333         .name           = "r2pcie",
3334         .num_counters   = 4,
3335         .num_boxes      = 1,
3336         .perf_ctr_bits  = 48,
3337         .constraints    = bdx_uncore_r2pcie_constraints,
3338         SNBEP_UNCORE_PCI_COMMON_INIT(),
3339 };
3340
3341 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3342         UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3343         UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3344         UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3345         UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3346         UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3347         UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3348         UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3349         UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3350         UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3351         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3352         UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3353         UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3354         UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3355         UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3356         UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3357         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3358         UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3359         UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3360         UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3361         UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3362         UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3363         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3364         UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3365         UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3366         UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3367         UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3368         UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3369         UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3370         UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3371         UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3372         EVENT_CONSTRAINT_END
3373 };
3374
3375 static struct intel_uncore_type bdx_uncore_r3qpi = {
3376         .name           = "r3qpi",
3377         .num_counters   = 3,
3378         .num_boxes      = 3,
3379         .perf_ctr_bits  = 48,
3380         .constraints    = bdx_uncore_r3qpi_constraints,
3381         SNBEP_UNCORE_PCI_COMMON_INIT(),
3382 };
3383
3384 enum {
3385         BDX_PCI_UNCORE_HA,
3386         BDX_PCI_UNCORE_IMC,
3387         BDX_PCI_UNCORE_IRP,
3388         BDX_PCI_UNCORE_QPI,
3389         BDX_PCI_UNCORE_R2PCIE,
3390         BDX_PCI_UNCORE_R3QPI,
3391 };
3392
3393 static struct intel_uncore_type *bdx_pci_uncores[] = {
3394         [BDX_PCI_UNCORE_HA]     = &bdx_uncore_ha,
3395         [BDX_PCI_UNCORE_IMC]    = &bdx_uncore_imc,
3396         [BDX_PCI_UNCORE_IRP]    = &bdx_uncore_irp,
3397         [BDX_PCI_UNCORE_QPI]    = &bdx_uncore_qpi,
3398         [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3399         [BDX_PCI_UNCORE_R3QPI]  = &bdx_uncore_r3qpi,
3400         NULL,
3401 };
3402
3403 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3404         { /* Home Agent 0 */
3405                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3406                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3407         },
3408         { /* Home Agent 1 */
3409                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3410                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3411         },
3412         { /* MC0 Channel 0 */
3413                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3414                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3415         },
3416         { /* MC0 Channel 1 */
3417                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3418                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3419         },
3420         { /* MC0 Channel 2 */
3421                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3422                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3423         },
3424         { /* MC0 Channel 3 */
3425                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3426                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3427         },
3428         { /* MC1 Channel 0 */
3429                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3430                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3431         },
3432         { /* MC1 Channel 1 */
3433                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3434                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3435         },
3436         { /* MC1 Channel 2 */
3437                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3438                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3439         },
3440         { /* MC1 Channel 3 */
3441                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3442                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3443         },
3444         { /* IRP */
3445                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3446                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3447         },
3448         { /* QPI0 Port 0 */
3449                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3450                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3451         },
3452         { /* QPI0 Port 1 */
3453                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3454                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3455         },
3456         { /* QPI1 Port 2 */
3457                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3458                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3459         },
3460         { /* R2PCIe */
3461                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3462                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3463         },
3464         { /* R3QPI0 Link 0 */
3465                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3466                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3467         },
3468         { /* R3QPI0 Link 1 */
3469                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3470                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3471         },
3472         { /* R3QPI1 Link 2 */
3473                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3474                 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3475         },
3476         { /* QPI Port 0 filter  */
3477                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3478                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3479                                                    SNBEP_PCI_QPI_PORT0_FILTER),
3480         },
3481         { /* QPI Port 1 filter  */
3482                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3483                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3484                                                    SNBEP_PCI_QPI_PORT1_FILTER),
3485         },
3486         { /* QPI Port 2 filter  */
3487                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3488                 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3489                                                    BDX_PCI_QPI_PORT2_FILTER),
3490         },
3491         { /* end: all zeroes */ }
3492 };
3493
3494 static struct pci_driver bdx_uncore_pci_driver = {
3495         .name           = "bdx_uncore",
3496         .id_table       = bdx_uncore_pci_ids,
3497 };
3498
3499 int bdx_uncore_pci_init(void)
3500 {
3501         int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3502
3503         if (ret)
3504                 return ret;
3505         uncore_pci_uncores = bdx_pci_uncores;
3506         uncore_pci_driver = &bdx_uncore_pci_driver;
3507         return 0;
3508 }
3509
3510 /* end of BDX uncore support */
3511
3512 /* SKX uncore support */
3513
3514 static struct intel_uncore_type skx_uncore_ubox = {
3515         .name                   = "ubox",
3516         .num_counters           = 2,
3517         .num_boxes              = 1,
3518         .perf_ctr_bits          = 48,
3519         .fixed_ctr_bits         = 48,
3520         .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
3521         .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
3522         .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3523         .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3524         .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3525         .ops                    = &ivbep_uncore_msr_ops,
3526         .format_group           = &ivbep_uncore_ubox_format_group,
3527 };
3528
3529 static struct attribute *skx_uncore_cha_formats_attr[] = {
3530         &format_attr_event.attr,
3531         &format_attr_umask.attr,
3532         &format_attr_edge.attr,
3533         &format_attr_tid_en.attr,
3534         &format_attr_inv.attr,
3535         &format_attr_thresh8.attr,
3536         &format_attr_filter_tid4.attr,
3537         &format_attr_filter_state5.attr,
3538         &format_attr_filter_rem.attr,
3539         &format_attr_filter_loc.attr,
3540         &format_attr_filter_nm.attr,
3541         &format_attr_filter_all_op.attr,
3542         &format_attr_filter_not_nm.attr,
3543         &format_attr_filter_opc_0.attr,
3544         &format_attr_filter_opc_1.attr,
3545         &format_attr_filter_nc.attr,
3546         &format_attr_filter_isoc.attr,
3547         NULL,
3548 };
3549
3550 static const struct attribute_group skx_uncore_chabox_format_group = {
3551         .name = "format",
3552         .attrs = skx_uncore_cha_formats_attr,
3553 };
3554
3555 static struct event_constraint skx_uncore_chabox_constraints[] = {
3556         UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3557         UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3558         EVENT_CONSTRAINT_END
3559 };
3560
3561 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3562         SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3563         SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3564         SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3565         SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3566         SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3567         SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3568         SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3569         SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3570         SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3571         EVENT_EXTRA_END
3572 };
3573
3574 static u64 skx_cha_filter_mask(int fields)
3575 {
3576         u64 mask = 0;
3577
3578         if (fields & 0x1)
3579                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3580         if (fields & 0x2)
3581                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3582         if (fields & 0x4)
3583                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3584         if (fields & 0x8) {
3585                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3586                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3587                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3588                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3589                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3590                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3591                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3592                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3593                 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3594         }
3595         return mask;
3596 }
3597
3598 static struct event_constraint *
3599 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3600 {
3601         return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3602 }
3603
3604 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3605 {
3606         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3607         struct extra_reg *er;
3608         int idx = 0;
3609
3610         for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3611                 if (er->event != (event->hw.config & er->config_mask))
3612                         continue;
3613                 idx |= er->idx;
3614         }
3615
3616         if (idx) {
3617                 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3618                             HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3619                 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3620                 reg1->idx = idx;
3621         }
3622         return 0;
3623 }
3624
3625 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3626         /* There is no frz_en for chabox ctl */
3627         .init_box               = ivbep_uncore_msr_init_box,
3628         .disable_box            = snbep_uncore_msr_disable_box,
3629         .enable_box             = snbep_uncore_msr_enable_box,
3630         .disable_event          = snbep_uncore_msr_disable_event,
3631         .enable_event           = hswep_cbox_enable_event,
3632         .read_counter           = uncore_msr_read_counter,
3633         .hw_config              = skx_cha_hw_config,
3634         .get_constraint         = skx_cha_get_constraint,
3635         .put_constraint         = snbep_cbox_put_constraint,
3636 };
3637
3638 static struct intel_uncore_type skx_uncore_chabox = {
3639         .name                   = "cha",
3640         .num_counters           = 4,
3641         .perf_ctr_bits          = 48,
3642         .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3643         .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3644         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3645         .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3646         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3647         .num_shared_regs        = 1,
3648         .constraints            = skx_uncore_chabox_constraints,
3649         .ops                    = &skx_uncore_chabox_ops,
3650         .format_group           = &skx_uncore_chabox_format_group,
3651 };
3652
3653 static struct attribute *skx_uncore_iio_formats_attr[] = {
3654         &format_attr_event.attr,
3655         &format_attr_umask.attr,
3656         &format_attr_edge.attr,
3657         &format_attr_inv.attr,
3658         &format_attr_thresh9.attr,
3659         &format_attr_ch_mask.attr,
3660         &format_attr_fc_mask.attr,
3661         NULL,
3662 };
3663
3664 static const struct attribute_group skx_uncore_iio_format_group = {
3665         .name = "format",
3666         .attrs = skx_uncore_iio_formats_attr,
3667 };
3668
3669 static struct event_constraint skx_uncore_iio_constraints[] = {
3670         UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3671         UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3672         UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3673         UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3674         UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3675         UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3676         EVENT_CONSTRAINT_END
3677 };
3678
3679 static void skx_iio_enable_event(struct intel_uncore_box *box,
3680                                  struct perf_event *event)
3681 {
3682         struct hw_perf_event *hwc = &event->hw;
3683
3684         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3685 }
3686
3687 static struct intel_uncore_ops skx_uncore_iio_ops = {
3688         .init_box               = ivbep_uncore_msr_init_box,
3689         .disable_box            = snbep_uncore_msr_disable_box,
3690         .enable_box             = snbep_uncore_msr_enable_box,
3691         .disable_event          = snbep_uncore_msr_disable_event,
3692         .enable_event           = skx_iio_enable_event,
3693         .read_counter           = uncore_msr_read_counter,
3694 };
3695
3696 static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
3697 {
3698         return pmu->type->topology[die].configuration >>
3699                (pmu->pmu_idx * BUS_NUM_STRIDE);
3700 }
3701
3702 static umode_t
3703 pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
3704                          int die, int zero_bus_pmu)
3705 {
3706         struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3707
3708         return (!skx_iio_stack(pmu, die) && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
3709 }
3710
3711 static umode_t
3712 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3713 {
3714         /* Root bus 0x00 is valid only for pmu_idx = 0. */
3715         return pmu_iio_mapping_visible(kobj, attr, die, 0);
3716 }
3717
3718 static ssize_t skx_iio_mapping_show(struct device *dev,
3719                                     struct device_attribute *attr, char *buf)
3720 {
3721         struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
3722         struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3723         long die = (long)ea->var;
3724
3725         return sprintf(buf, "%04x:%02x\n", pmu->type->topology[die].segment,
3726                                            skx_iio_stack(pmu, die));
3727 }
3728
3729 static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3730 {
3731         u64 msr_value;
3732
3733         if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3734                         !(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3735                 return -ENXIO;
3736
3737         *topology = msr_value;
3738
3739         return 0;
3740 }
3741
3742 static int die_to_cpu(int die)
3743 {
3744         int res = 0, cpu, current_die;
3745         /*
3746          * Using cpus_read_lock() to ensure cpu is not going down between
3747          * looking at cpu_online_mask.
3748          */
3749         cpus_read_lock();
3750         for_each_online_cpu(cpu) {
3751                 current_die = topology_logical_die_id(cpu);
3752                 if (current_die == die) {
3753                         res = cpu;
3754                         break;
3755                 }
3756         }
3757         cpus_read_unlock();
3758         return res;
3759 }
3760
3761 static int skx_iio_get_topology(struct intel_uncore_type *type)
3762 {
3763         int die, ret = -EPERM;
3764
3765         type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
3766                                  GFP_KERNEL);
3767         if (!type->topology)
3768                 return -ENOMEM;
3769
3770         for (die = 0; die < uncore_max_dies(); die++) {
3771                 ret = skx_msr_cpu_bus_read(die_to_cpu(die),
3772                                            &type->topology[die].configuration);
3773                 if (ret)
3774                         break;
3775
3776                 ret = uncore_die_to_segment(die);
3777                 if (ret < 0)
3778                         break;
3779
3780                 type->topology[die].segment = ret;
3781         }
3782
3783         if (ret < 0) {
3784                 kfree(type->topology);
3785                 type->topology = NULL;
3786         }
3787
3788         return ret;
3789 }
3790
3791 static struct attribute_group skx_iio_mapping_group = {
3792         .is_visible     = skx_iio_mapping_visible,
3793 };
3794
3795 static const struct attribute_group *skx_iio_attr_update[] = {
3796         &skx_iio_mapping_group,
3797         NULL,
3798 };
3799
3800 static int
3801 pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3802 {
3803         char buf[64];
3804         int ret;
3805         long die = -1;
3806         struct attribute **attrs = NULL;
3807         struct dev_ext_attribute *eas = NULL;
3808
3809         ret = type->get_topology(type);
3810         if (ret < 0)
3811                 goto clear_attr_update;
3812
3813         ret = -ENOMEM;
3814
3815         /* One more for NULL. */
3816         attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3817         if (!attrs)
3818                 goto err;
3819
3820         eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3821         if (!eas)
3822                 goto err;
3823
3824         for (die = 0; die < uncore_max_dies(); die++) {
3825                 sprintf(buf, "die%ld", die);
3826                 sysfs_attr_init(&eas[die].attr.attr);
3827                 eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3828                 if (!eas[die].attr.attr.name)
3829                         goto err;
3830                 eas[die].attr.attr.mode = 0444;
3831                 eas[die].attr.show = skx_iio_mapping_show;
3832                 eas[die].attr.store = NULL;
3833                 eas[die].var = (void *)die;
3834                 attrs[die] = &eas[die].attr.attr;
3835         }
3836         ag->attrs = attrs;
3837
3838         return 0;
3839 err:
3840         for (; die >= 0; die--)
3841                 kfree(eas[die].attr.attr.name);
3842         kfree(eas);
3843         kfree(attrs);
3844         kfree(type->topology);
3845 clear_attr_update:
3846         type->attr_update = NULL;
3847         return ret;
3848 }
3849
3850 static int skx_iio_set_mapping(struct intel_uncore_type *type)
3851 {
3852         return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
3853 }
3854
3855 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
3856 {
3857         struct attribute **attr = skx_iio_mapping_group.attrs;
3858
3859         if (!attr)
3860                 return;
3861
3862         for (; *attr; attr++)
3863                 kfree((*attr)->name);
3864         kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs));
3865         kfree(skx_iio_mapping_group.attrs);
3866         skx_iio_mapping_group.attrs = NULL;
3867         kfree(type->topology);
3868 }
3869
3870 static struct intel_uncore_type skx_uncore_iio = {
3871         .name                   = "iio",
3872         .num_counters           = 4,
3873         .num_boxes              = 6,
3874         .perf_ctr_bits          = 48,
3875         .event_ctl              = SKX_IIO0_MSR_PMON_CTL0,
3876         .perf_ctr               = SKX_IIO0_MSR_PMON_CTR0,
3877         .event_mask             = SKX_IIO_PMON_RAW_EVENT_MASK,
3878         .event_mask_ext         = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3879         .box_ctl                = SKX_IIO0_MSR_PMON_BOX_CTL,
3880         .msr_offset             = SKX_IIO_MSR_OFFSET,
3881         .constraints            = skx_uncore_iio_constraints,
3882         .ops                    = &skx_uncore_iio_ops,
3883         .format_group           = &skx_uncore_iio_format_group,
3884         .attr_update            = skx_iio_attr_update,
3885         .get_topology           = skx_iio_get_topology,
3886         .set_mapping            = skx_iio_set_mapping,
3887         .cleanup_mapping        = skx_iio_cleanup_mapping,
3888 };
3889
3890 enum perf_uncore_iio_freerunning_type_id {
3891         SKX_IIO_MSR_IOCLK                       = 0,
3892         SKX_IIO_MSR_BW                          = 1,
3893         SKX_IIO_MSR_UTIL                        = 2,
3894
3895         SKX_IIO_FREERUNNING_TYPE_MAX,
3896 };
3897
3898
3899 static struct freerunning_counters skx_iio_freerunning[] = {
3900         [SKX_IIO_MSR_IOCLK]     = { 0xa45, 0x1, 0x20, 1, 36 },
3901         [SKX_IIO_MSR_BW]        = { 0xb00, 0x1, 0x10, 8, 36 },
3902         [SKX_IIO_MSR_UTIL]      = { 0xb08, 0x1, 0x10, 8, 36 },
3903 };
3904
3905 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3906         /* Free-Running IO CLOCKS Counter */
3907         INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
3908         /* Free-Running IIO BANDWIDTH Counters */
3909         INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
3910         INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
3911         INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
3912         INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
3913         INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
3914         INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
3915         INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
3916         INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
3917         INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
3918         INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
3919         INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
3920         INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
3921         INTEL_UNCORE_EVENT_DESC(bw_out_port0,           "event=0xff,umask=0x24"),
3922         INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,     "3.814697266e-6"),
3923         INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,      "MiB"),
3924         INTEL_UNCORE_EVENT_DESC(bw_out_port1,           "event=0xff,umask=0x25"),
3925         INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,     "3.814697266e-6"),
3926         INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,      "MiB"),
3927         INTEL_UNCORE_EVENT_DESC(bw_out_port2,           "event=0xff,umask=0x26"),
3928         INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,     "3.814697266e-6"),
3929         INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,      "MiB"),
3930         INTEL_UNCORE_EVENT_DESC(bw_out_port3,           "event=0xff,umask=0x27"),
3931         INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,     "3.814697266e-6"),
3932         INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,      "MiB"),
3933         /* Free-running IIO UTILIZATION Counters */
3934         INTEL_UNCORE_EVENT_DESC(util_in_port0,          "event=0xff,umask=0x30"),
3935         INTEL_UNCORE_EVENT_DESC(util_out_port0,         "event=0xff,umask=0x31"),
3936         INTEL_UNCORE_EVENT_DESC(util_in_port1,          "event=0xff,umask=0x32"),
3937         INTEL_UNCORE_EVENT_DESC(util_out_port1,         "event=0xff,umask=0x33"),
3938         INTEL_UNCORE_EVENT_DESC(util_in_port2,          "event=0xff,umask=0x34"),
3939         INTEL_UNCORE_EVENT_DESC(util_out_port2,         "event=0xff,umask=0x35"),
3940         INTEL_UNCORE_EVENT_DESC(util_in_port3,          "event=0xff,umask=0x36"),
3941         INTEL_UNCORE_EVENT_DESC(util_out_port3,         "event=0xff,umask=0x37"),
3942         { /* end: all zeroes */ },
3943 };
3944
3945 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3946         .read_counter           = uncore_msr_read_counter,
3947         .hw_config              = uncore_freerunning_hw_config,
3948 };
3949
3950 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3951         &format_attr_event.attr,
3952         &format_attr_umask.attr,
3953         NULL,
3954 };
3955
3956 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3957         .name = "format",
3958         .attrs = skx_uncore_iio_freerunning_formats_attr,
3959 };
3960
3961 static struct intel_uncore_type skx_uncore_iio_free_running = {
3962         .name                   = "iio_free_running",
3963         .num_counters           = 17,
3964         .num_boxes              = 6,
3965         .num_freerunning_types  = SKX_IIO_FREERUNNING_TYPE_MAX,
3966         .freerunning            = skx_iio_freerunning,
3967         .ops                    = &skx_uncore_iio_freerunning_ops,
3968         .event_descs            = skx_uncore_iio_freerunning_events,
3969         .format_group           = &skx_uncore_iio_freerunning_format_group,
3970 };
3971
3972 static struct attribute *skx_uncore_formats_attr[] = {
3973         &format_attr_event.attr,
3974         &format_attr_umask.attr,
3975         &format_attr_edge.attr,
3976         &format_attr_inv.attr,
3977         &format_attr_thresh8.attr,
3978         NULL,
3979 };
3980
3981 static const struct attribute_group skx_uncore_format_group = {
3982         .name = "format",
3983         .attrs = skx_uncore_formats_attr,
3984 };
3985
3986 static struct intel_uncore_type skx_uncore_irp = {
3987         .name                   = "irp",
3988         .num_counters           = 2,
3989         .num_boxes              = 6,
3990         .perf_ctr_bits          = 48,
3991         .event_ctl              = SKX_IRP0_MSR_PMON_CTL0,
3992         .perf_ctr               = SKX_IRP0_MSR_PMON_CTR0,
3993         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
3994         .box_ctl                = SKX_IRP0_MSR_PMON_BOX_CTL,
3995         .msr_offset             = SKX_IRP_MSR_OFFSET,
3996         .ops                    = &skx_uncore_iio_ops,
3997         .format_group           = &skx_uncore_format_group,
3998 };
3999
4000 static struct attribute *skx_uncore_pcu_formats_attr[] = {
4001         &format_attr_event.attr,
4002         &format_attr_umask.attr,
4003         &format_attr_edge.attr,
4004         &format_attr_inv.attr,
4005         &format_attr_thresh8.attr,
4006         &format_attr_occ_invert.attr,
4007         &format_attr_occ_edge_det.attr,
4008         &format_attr_filter_band0.attr,
4009         &format_attr_filter_band1.attr,
4010         &format_attr_filter_band2.attr,
4011         &format_attr_filter_band3.attr,
4012         NULL,
4013 };
4014
4015 static struct attribute_group skx_uncore_pcu_format_group = {
4016         .name = "format",
4017         .attrs = skx_uncore_pcu_formats_attr,
4018 };
4019
4020 static struct intel_uncore_ops skx_uncore_pcu_ops = {
4021         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4022         .hw_config              = hswep_pcu_hw_config,
4023         .get_constraint         = snbep_pcu_get_constraint,
4024         .put_constraint         = snbep_pcu_put_constraint,
4025 };
4026
4027 static struct intel_uncore_type skx_uncore_pcu = {
4028         .name                   = "pcu",
4029         .num_counters           = 4,
4030         .num_boxes              = 1,
4031         .perf_ctr_bits          = 48,
4032         .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
4033         .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
4034         .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
4035         .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
4036         .num_shared_regs        = 1,
4037         .ops                    = &skx_uncore_pcu_ops,
4038         .format_group           = &skx_uncore_pcu_format_group,
4039 };
4040
4041 static struct intel_uncore_type *skx_msr_uncores[] = {
4042         &skx_uncore_ubox,
4043         &skx_uncore_chabox,
4044         &skx_uncore_iio,
4045         &skx_uncore_iio_free_running,
4046         &skx_uncore_irp,
4047         &skx_uncore_pcu,
4048         NULL,
4049 };
4050
4051 /*
4052  * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
4053  * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
4054  */
4055 #define SKX_CAPID6              0x9c
4056 #define SKX_CHA_BIT_MASK        GENMASK(27, 0)
4057
4058 static int skx_count_chabox(void)
4059 {
4060         struct pci_dev *dev = NULL;
4061         u32 val = 0;
4062
4063         dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4064         if (!dev)
4065                 goto out;
4066
4067         pci_read_config_dword(dev, SKX_CAPID6, &val);
4068         val &= SKX_CHA_BIT_MASK;
4069 out:
4070         pci_dev_put(dev);
4071         return hweight32(val);
4072 }
4073
4074 void skx_uncore_cpu_init(void)
4075 {
4076         skx_uncore_chabox.num_boxes = skx_count_chabox();
4077         uncore_msr_uncores = skx_msr_uncores;
4078 }
4079
4080 static struct intel_uncore_type skx_uncore_imc = {
4081         .name           = "imc",
4082         .num_counters   = 4,
4083         .num_boxes      = 6,
4084         .perf_ctr_bits  = 48,
4085         .fixed_ctr_bits = 48,
4086         .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4087         .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4088         .event_descs    = hswep_uncore_imc_events,
4089         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
4090         .event_ctl      = SNBEP_PCI_PMON_CTL0,
4091         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4092         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
4093         .ops            = &ivbep_uncore_pci_ops,
4094         .format_group   = &skx_uncore_format_group,
4095 };
4096
4097 static struct attribute *skx_upi_uncore_formats_attr[] = {
4098         &format_attr_event.attr,
4099         &format_attr_umask_ext.attr,
4100         &format_attr_edge.attr,
4101         &format_attr_inv.attr,
4102         &format_attr_thresh8.attr,
4103         NULL,
4104 };
4105
4106 static const struct attribute_group skx_upi_uncore_format_group = {
4107         .name = "format",
4108         .attrs = skx_upi_uncore_formats_attr,
4109 };
4110
4111 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4112 {
4113         struct pci_dev *pdev = box->pci_dev;
4114
4115         __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4116         pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4117 }
4118
4119 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4120         .init_box       = skx_upi_uncore_pci_init_box,
4121         .disable_box    = snbep_uncore_pci_disable_box,
4122         .enable_box     = snbep_uncore_pci_enable_box,
4123         .disable_event  = snbep_uncore_pci_disable_event,
4124         .enable_event   = snbep_uncore_pci_enable_event,
4125         .read_counter   = snbep_uncore_pci_read_counter,
4126 };
4127
4128 static struct intel_uncore_type skx_uncore_upi = {
4129         .name           = "upi",
4130         .num_counters   = 4,
4131         .num_boxes      = 3,
4132         .perf_ctr_bits  = 48,
4133         .perf_ctr       = SKX_UPI_PCI_PMON_CTR0,
4134         .event_ctl      = SKX_UPI_PCI_PMON_CTL0,
4135         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4136         .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4137         .box_ctl        = SKX_UPI_PCI_PMON_BOX_CTL,
4138         .ops            = &skx_upi_uncore_pci_ops,
4139         .format_group   = &skx_upi_uncore_format_group,
4140 };
4141
4142 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4143 {
4144         struct pci_dev *pdev = box->pci_dev;
4145
4146         __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4147         pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4148 }
4149
4150 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4151         .init_box       = skx_m2m_uncore_pci_init_box,
4152         .disable_box    = snbep_uncore_pci_disable_box,
4153         .enable_box     = snbep_uncore_pci_enable_box,
4154         .disable_event  = snbep_uncore_pci_disable_event,
4155         .enable_event   = snbep_uncore_pci_enable_event,
4156         .read_counter   = snbep_uncore_pci_read_counter,
4157 };
4158
4159 static struct intel_uncore_type skx_uncore_m2m = {
4160         .name           = "m2m",
4161         .num_counters   = 4,
4162         .num_boxes      = 2,
4163         .perf_ctr_bits  = 48,
4164         .perf_ctr       = SKX_M2M_PCI_PMON_CTR0,
4165         .event_ctl      = SKX_M2M_PCI_PMON_CTL0,
4166         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4167         .box_ctl        = SKX_M2M_PCI_PMON_BOX_CTL,
4168         .ops            = &skx_m2m_uncore_pci_ops,
4169         .format_group   = &skx_uncore_format_group,
4170 };
4171
4172 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4173         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4174         EVENT_CONSTRAINT_END
4175 };
4176
4177 static struct intel_uncore_type skx_uncore_m2pcie = {
4178         .name           = "m2pcie",
4179         .num_counters   = 4,
4180         .num_boxes      = 4,
4181         .perf_ctr_bits  = 48,
4182         .constraints    = skx_uncore_m2pcie_constraints,
4183         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
4184         .event_ctl      = SNBEP_PCI_PMON_CTL0,
4185         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4186         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
4187         .ops            = &ivbep_uncore_pci_ops,
4188         .format_group   = &skx_uncore_format_group,
4189 };
4190
4191 static struct event_constraint skx_uncore_m3upi_constraints[] = {
4192         UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4193         UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4194         UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4195         UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4196         UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4197         UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4198         UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
4199         UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
4200         EVENT_CONSTRAINT_END
4201 };
4202
4203 static struct intel_uncore_type skx_uncore_m3upi = {
4204         .name           = "m3upi",
4205         .num_counters   = 3,
4206         .num_boxes      = 3,
4207         .perf_ctr_bits  = 48,
4208         .constraints    = skx_uncore_m3upi_constraints,
4209         .perf_ctr       = SNBEP_PCI_PMON_CTR0,
4210         .event_ctl      = SNBEP_PCI_PMON_CTL0,
4211         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4212         .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
4213         .ops            = &ivbep_uncore_pci_ops,
4214         .format_group   = &skx_uncore_format_group,
4215 };
4216
4217 enum {
4218         SKX_PCI_UNCORE_IMC,
4219         SKX_PCI_UNCORE_M2M,
4220         SKX_PCI_UNCORE_UPI,
4221         SKX_PCI_UNCORE_M2PCIE,
4222         SKX_PCI_UNCORE_M3UPI,
4223 };
4224
4225 static struct intel_uncore_type *skx_pci_uncores[] = {
4226         [SKX_PCI_UNCORE_IMC]    = &skx_uncore_imc,
4227         [SKX_PCI_UNCORE_M2M]    = &skx_uncore_m2m,
4228         [SKX_PCI_UNCORE_UPI]    = &skx_uncore_upi,
4229         [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
4230         [SKX_PCI_UNCORE_M3UPI]  = &skx_uncore_m3upi,
4231         NULL,
4232 };
4233
4234 static const struct pci_device_id skx_uncore_pci_ids[] = {
4235         { /* MC0 Channel 0 */
4236                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4237                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4238         },
4239         { /* MC0 Channel 1 */
4240                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4241                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4242         },
4243         { /* MC0 Channel 2 */
4244                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4245                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4246         },
4247         { /* MC1 Channel 0 */
4248                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4249                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4250         },
4251         { /* MC1 Channel 1 */
4252                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4253                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4254         },
4255         { /* MC1 Channel 2 */
4256                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4257                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4258         },
4259         { /* M2M0 */
4260                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4261                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4262         },
4263         { /* M2M1 */
4264                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4265                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4266         },
4267         { /* UPI0 Link 0 */
4268                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4269                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4270         },
4271         { /* UPI0 Link 1 */
4272                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4273                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4274         },
4275         { /* UPI1 Link 2 */
4276                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4277                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4278         },
4279         { /* M2PCIe 0 */
4280                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4281                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4282         },
4283         { /* M2PCIe 1 */
4284                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4285                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4286         },
4287         { /* M2PCIe 2 */
4288                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4289                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4290         },
4291         { /* M2PCIe 3 */
4292                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4293                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4294         },
4295         { /* M3UPI0 Link 0 */
4296                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4297                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4298         },
4299         { /* M3UPI0 Link 1 */
4300                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4301                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4302         },
4303         { /* M3UPI1 Link 2 */
4304                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4305                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4306         },
4307         { /* end: all zeroes */ }
4308 };
4309
4310
4311 static struct pci_driver skx_uncore_pci_driver = {
4312         .name           = "skx_uncore",
4313         .id_table       = skx_uncore_pci_ids,
4314 };
4315
4316 int skx_uncore_pci_init(void)
4317 {
4318         /* need to double check pci address */
4319         int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4320
4321         if (ret)
4322                 return ret;
4323
4324         uncore_pci_uncores = skx_pci_uncores;
4325         uncore_pci_driver = &skx_uncore_pci_driver;
4326         return 0;
4327 }
4328
4329 /* end of SKX uncore support */
4330
4331 /* SNR uncore support */
4332
4333 static struct intel_uncore_type snr_uncore_ubox = {
4334         .name                   = "ubox",
4335         .num_counters           = 2,
4336         .num_boxes              = 1,
4337         .perf_ctr_bits          = 48,
4338         .fixed_ctr_bits         = 48,
4339         .perf_ctr               = SNR_U_MSR_PMON_CTR0,
4340         .event_ctl              = SNR_U_MSR_PMON_CTL0,
4341         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4342         .fixed_ctr              = SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4343         .fixed_ctl              = SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4344         .ops                    = &ivbep_uncore_msr_ops,
4345         .format_group           = &ivbep_uncore_format_group,
4346 };
4347
4348 static struct attribute *snr_uncore_cha_formats_attr[] = {
4349         &format_attr_event.attr,
4350         &format_attr_umask_ext2.attr,
4351         &format_attr_edge.attr,
4352         &format_attr_tid_en.attr,
4353         &format_attr_inv.attr,
4354         &format_attr_thresh8.attr,
4355         &format_attr_filter_tid5.attr,
4356         NULL,
4357 };
4358 static const struct attribute_group snr_uncore_chabox_format_group = {
4359         .name = "format",
4360         .attrs = snr_uncore_cha_formats_attr,
4361 };
4362
4363 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4364 {
4365         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4366
4367         reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4368                     box->pmu->type->msr_offset * box->pmu->pmu_idx;
4369         reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4370         reg1->idx = 0;
4371
4372         return 0;
4373 }
4374
4375 static void snr_cha_enable_event(struct intel_uncore_box *box,
4376                                    struct perf_event *event)
4377 {
4378         struct hw_perf_event *hwc = &event->hw;
4379         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4380
4381         if (reg1->idx != EXTRA_REG_NONE)
4382                 wrmsrl(reg1->reg, reg1->config);
4383
4384         wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4385 }
4386
4387 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4388         .init_box               = ivbep_uncore_msr_init_box,
4389         .disable_box            = snbep_uncore_msr_disable_box,
4390         .enable_box             = snbep_uncore_msr_enable_box,
4391         .disable_event          = snbep_uncore_msr_disable_event,
4392         .enable_event           = snr_cha_enable_event,
4393         .read_counter           = uncore_msr_read_counter,
4394         .hw_config              = snr_cha_hw_config,
4395 };
4396
4397 static struct intel_uncore_type snr_uncore_chabox = {
4398         .name                   = "cha",
4399         .num_counters           = 4,
4400         .num_boxes              = 6,
4401         .perf_ctr_bits          = 48,
4402         .event_ctl              = SNR_CHA_MSR_PMON_CTL0,
4403         .perf_ctr               = SNR_CHA_MSR_PMON_CTR0,
4404         .box_ctl                = SNR_CHA_MSR_PMON_BOX_CTL,
4405         .msr_offset             = HSWEP_CBO_MSR_OFFSET,
4406         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4407         .event_mask_ext         = SNR_CHA_RAW_EVENT_MASK_EXT,
4408         .ops                    = &snr_uncore_chabox_ops,
4409         .format_group           = &snr_uncore_chabox_format_group,
4410 };
4411
4412 static struct attribute *snr_uncore_iio_formats_attr[] = {
4413         &format_attr_event.attr,
4414         &format_attr_umask.attr,
4415         &format_attr_edge.attr,
4416         &format_attr_inv.attr,
4417         &format_attr_thresh9.attr,
4418         &format_attr_ch_mask2.attr,
4419         &format_attr_fc_mask2.attr,
4420         NULL,
4421 };
4422
4423 static const struct attribute_group snr_uncore_iio_format_group = {
4424         .name = "format",
4425         .attrs = snr_uncore_iio_formats_attr,
4426 };
4427
4428 static umode_t
4429 snr_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4430 {
4431         /* Root bus 0x00 is valid only for pmu_idx = 1. */
4432         return pmu_iio_mapping_visible(kobj, attr, die, 1);
4433 }
4434
4435 static struct attribute_group snr_iio_mapping_group = {
4436         .is_visible     = snr_iio_mapping_visible,
4437 };
4438
4439 static const struct attribute_group *snr_iio_attr_update[] = {
4440         &snr_iio_mapping_group,
4441         NULL,
4442 };
4443
4444 static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_mapping)
4445 {
4446         u32 sad_cfg;
4447         int die, stack_id, ret = -EPERM;
4448         struct pci_dev *dev = NULL;
4449
4450         type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
4451                                  GFP_KERNEL);
4452         if (!type->topology)
4453                 return -ENOMEM;
4454
4455         while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) {
4456                 ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg);
4457                 if (ret) {
4458                         ret = pcibios_err_to_errno(ret);
4459                         break;
4460                 }
4461
4462                 die = uncore_pcibus_to_dieid(dev->bus);
4463                 stack_id = SAD_CONTROL_STACK_ID(sad_cfg);
4464                 if (die < 0 || stack_id >= type->num_boxes) {
4465                         ret = -EPERM;
4466                         break;
4467                 }
4468
4469                 /* Convert stack id from SAD_CONTROL to PMON notation. */
4470                 stack_id = sad_pmon_mapping[stack_id];
4471
4472                 ((u8 *)&(type->topology[die].configuration))[stack_id] = dev->bus->number;
4473                 type->topology[die].segment = pci_domain_nr(dev->bus);
4474         }
4475
4476         if (ret) {
4477                 kfree(type->topology);
4478                 type->topology = NULL;
4479         }
4480
4481         return ret;
4482 }
4483
4484 /*
4485  * SNR has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
4486  */
4487 enum {
4488         SNR_QAT_PMON_ID,
4489         SNR_CBDMA_DMI_PMON_ID,
4490         SNR_NIS_PMON_ID,
4491         SNR_DLB_PMON_ID,
4492         SNR_PCIE_GEN3_PMON_ID
4493 };
4494
4495 static u8 snr_sad_pmon_mapping[] = {
4496         SNR_CBDMA_DMI_PMON_ID,
4497         SNR_PCIE_GEN3_PMON_ID,
4498         SNR_DLB_PMON_ID,
4499         SNR_NIS_PMON_ID,
4500         SNR_QAT_PMON_ID
4501 };
4502
4503 static int snr_iio_get_topology(struct intel_uncore_type *type)
4504 {
4505         return sad_cfg_iio_topology(type, snr_sad_pmon_mapping);
4506 }
4507
4508 static int snr_iio_set_mapping(struct intel_uncore_type *type)
4509 {
4510         return pmu_iio_set_mapping(type, &snr_iio_mapping_group);
4511 }
4512
4513 static struct intel_uncore_type snr_uncore_iio = {
4514         .name                   = "iio",
4515         .num_counters           = 4,
4516         .num_boxes              = 5,
4517         .perf_ctr_bits          = 48,
4518         .event_ctl              = SNR_IIO_MSR_PMON_CTL0,
4519         .perf_ctr               = SNR_IIO_MSR_PMON_CTR0,
4520         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4521         .event_mask_ext         = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4522         .box_ctl                = SNR_IIO_MSR_PMON_BOX_CTL,
4523         .msr_offset             = SNR_IIO_MSR_OFFSET,
4524         .ops                    = &ivbep_uncore_msr_ops,
4525         .format_group           = &snr_uncore_iio_format_group,
4526         .attr_update            = snr_iio_attr_update,
4527         .get_topology           = snr_iio_get_topology,
4528         .set_mapping            = snr_iio_set_mapping,
4529         .cleanup_mapping        = skx_iio_cleanup_mapping,
4530 };
4531
4532 static struct intel_uncore_type snr_uncore_irp = {
4533         .name                   = "irp",
4534         .num_counters           = 2,
4535         .num_boxes              = 5,
4536         .perf_ctr_bits          = 48,
4537         .event_ctl              = SNR_IRP0_MSR_PMON_CTL0,
4538         .perf_ctr               = SNR_IRP0_MSR_PMON_CTR0,
4539         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4540         .box_ctl                = SNR_IRP0_MSR_PMON_BOX_CTL,
4541         .msr_offset             = SNR_IRP_MSR_OFFSET,
4542         .ops                    = &ivbep_uncore_msr_ops,
4543         .format_group           = &ivbep_uncore_format_group,
4544 };
4545
4546 static struct intel_uncore_type snr_uncore_m2pcie = {
4547         .name           = "m2pcie",
4548         .num_counters   = 4,
4549         .num_boxes      = 5,
4550         .perf_ctr_bits  = 48,
4551         .event_ctl      = SNR_M2PCIE_MSR_PMON_CTL0,
4552         .perf_ctr       = SNR_M2PCIE_MSR_PMON_CTR0,
4553         .box_ctl        = SNR_M2PCIE_MSR_PMON_BOX_CTL,
4554         .msr_offset     = SNR_M2PCIE_MSR_OFFSET,
4555         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4556         .ops            = &ivbep_uncore_msr_ops,
4557         .format_group   = &ivbep_uncore_format_group,
4558 };
4559
4560 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4561 {
4562         struct hw_perf_event *hwc = &event->hw;
4563         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4564         int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4565
4566         if (ev_sel >= 0xb && ev_sel <= 0xe) {
4567                 reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4568                 reg1->idx = ev_sel - 0xb;
4569                 reg1->config = event->attr.config1 & (0xff << reg1->idx);
4570         }
4571         return 0;
4572 }
4573
4574 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4575         IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4576         .hw_config              = snr_pcu_hw_config,
4577         .get_constraint         = snbep_pcu_get_constraint,
4578         .put_constraint         = snbep_pcu_put_constraint,
4579 };
4580
4581 static struct intel_uncore_type snr_uncore_pcu = {
4582         .name                   = "pcu",
4583         .num_counters           = 4,
4584         .num_boxes              = 1,
4585         .perf_ctr_bits          = 48,
4586         .perf_ctr               = SNR_PCU_MSR_PMON_CTR0,
4587         .event_ctl              = SNR_PCU_MSR_PMON_CTL0,
4588         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4589         .box_ctl                = SNR_PCU_MSR_PMON_BOX_CTL,
4590         .num_shared_regs        = 1,
4591         .ops                    = &snr_uncore_pcu_ops,
4592         .format_group           = &skx_uncore_pcu_format_group,
4593 };
4594
4595 enum perf_uncore_snr_iio_freerunning_type_id {
4596         SNR_IIO_MSR_IOCLK,
4597         SNR_IIO_MSR_BW_IN,
4598
4599         SNR_IIO_FREERUNNING_TYPE_MAX,
4600 };
4601
4602 static struct freerunning_counters snr_iio_freerunning[] = {
4603         [SNR_IIO_MSR_IOCLK]     = { 0x1eac, 0x1, 0x10, 1, 48 },
4604         [SNR_IIO_MSR_BW_IN]     = { 0x1f00, 0x1, 0x10, 8, 48 },
4605 };
4606
4607 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4608         /* Free-Running IIO CLOCKS Counter */
4609         INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
4610         /* Free-Running IIO BANDWIDTH IN Counters */
4611         INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
4612         INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
4613         INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
4614         INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
4615         INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
4616         INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
4617         INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
4618         INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
4619         INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
4620         INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
4621         INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
4622         INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
4623         INTEL_UNCORE_EVENT_DESC(bw_in_port4,            "event=0xff,umask=0x24"),
4624         INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,      "3.814697266e-6"),
4625         INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,       "MiB"),
4626         INTEL_UNCORE_EVENT_DESC(bw_in_port5,            "event=0xff,umask=0x25"),
4627         INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,      "3.814697266e-6"),
4628         INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,       "MiB"),
4629         INTEL_UNCORE_EVENT_DESC(bw_in_port6,            "event=0xff,umask=0x26"),
4630         INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,      "3.814697266e-6"),
4631         INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,       "MiB"),
4632         INTEL_UNCORE_EVENT_DESC(bw_in_port7,            "event=0xff,umask=0x27"),
4633         INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,      "3.814697266e-6"),
4634         INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,       "MiB"),
4635         { /* end: all zeroes */ },
4636 };
4637
4638 static struct intel_uncore_type snr_uncore_iio_free_running = {
4639         .name                   = "iio_free_running",
4640         .num_counters           = 9,
4641         .num_boxes              = 5,
4642         .num_freerunning_types  = SNR_IIO_FREERUNNING_TYPE_MAX,
4643         .freerunning            = snr_iio_freerunning,
4644         .ops                    = &skx_uncore_iio_freerunning_ops,
4645         .event_descs            = snr_uncore_iio_freerunning_events,
4646         .format_group           = &skx_uncore_iio_freerunning_format_group,
4647 };
4648
4649 static struct intel_uncore_type *snr_msr_uncores[] = {
4650         &snr_uncore_ubox,
4651         &snr_uncore_chabox,
4652         &snr_uncore_iio,
4653         &snr_uncore_irp,
4654         &snr_uncore_m2pcie,
4655         &snr_uncore_pcu,
4656         &snr_uncore_iio_free_running,
4657         NULL,
4658 };
4659
4660 void snr_uncore_cpu_init(void)
4661 {
4662         uncore_msr_uncores = snr_msr_uncores;
4663 }
4664
4665 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4666 {
4667         struct pci_dev *pdev = box->pci_dev;
4668         int box_ctl = uncore_pci_box_ctl(box);
4669
4670         __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4671         pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4672 }
4673
4674 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4675         .init_box       = snr_m2m_uncore_pci_init_box,
4676         .disable_box    = snbep_uncore_pci_disable_box,
4677         .enable_box     = snbep_uncore_pci_enable_box,
4678         .disable_event  = snbep_uncore_pci_disable_event,
4679         .enable_event   = snbep_uncore_pci_enable_event,
4680         .read_counter   = snbep_uncore_pci_read_counter,
4681 };
4682
4683 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4684         &format_attr_event.attr,
4685         &format_attr_umask_ext3.attr,
4686         &format_attr_edge.attr,
4687         &format_attr_inv.attr,
4688         &format_attr_thresh8.attr,
4689         NULL,
4690 };
4691
4692 static const struct attribute_group snr_m2m_uncore_format_group = {
4693         .name = "format",
4694         .attrs = snr_m2m_uncore_formats_attr,
4695 };
4696
4697 static struct intel_uncore_type snr_uncore_m2m = {
4698         .name           = "m2m",
4699         .num_counters   = 4,
4700         .num_boxes      = 1,
4701         .perf_ctr_bits  = 48,
4702         .perf_ctr       = SNR_M2M_PCI_PMON_CTR0,
4703         .event_ctl      = SNR_M2M_PCI_PMON_CTL0,
4704         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4705         .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
4706         .box_ctl        = SNR_M2M_PCI_PMON_BOX_CTL,
4707         .ops            = &snr_m2m_uncore_pci_ops,
4708         .format_group   = &snr_m2m_uncore_format_group,
4709 };
4710
4711 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4712 {
4713         struct pci_dev *pdev = box->pci_dev;
4714         struct hw_perf_event *hwc = &event->hw;
4715
4716         pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4717         pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4718 }
4719
4720 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
4721         .init_box       = snr_m2m_uncore_pci_init_box,
4722         .disable_box    = snbep_uncore_pci_disable_box,
4723         .enable_box     = snbep_uncore_pci_enable_box,
4724         .disable_event  = snbep_uncore_pci_disable_event,
4725         .enable_event   = snr_uncore_pci_enable_event,
4726         .read_counter   = snbep_uncore_pci_read_counter,
4727 };
4728
4729 static struct intel_uncore_type snr_uncore_pcie3 = {
4730         .name           = "pcie3",
4731         .num_counters   = 4,
4732         .num_boxes      = 1,
4733         .perf_ctr_bits  = 48,
4734         .perf_ctr       = SNR_PCIE3_PCI_PMON_CTR0,
4735         .event_ctl      = SNR_PCIE3_PCI_PMON_CTL0,
4736         .event_mask     = SKX_IIO_PMON_RAW_EVENT_MASK,
4737         .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4738         .box_ctl        = SNR_PCIE3_PCI_PMON_BOX_CTL,
4739         .ops            = &snr_pcie3_uncore_pci_ops,
4740         .format_group   = &skx_uncore_iio_format_group,
4741 };
4742
4743 enum {
4744         SNR_PCI_UNCORE_M2M,
4745         SNR_PCI_UNCORE_PCIE3,
4746 };
4747
4748 static struct intel_uncore_type *snr_pci_uncores[] = {
4749         [SNR_PCI_UNCORE_M2M]            = &snr_uncore_m2m,
4750         [SNR_PCI_UNCORE_PCIE3]          = &snr_uncore_pcie3,
4751         NULL,
4752 };
4753
4754 static const struct pci_device_id snr_uncore_pci_ids[] = {
4755         { /* M2M */
4756                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4757                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4758         },
4759         { /* end: all zeroes */ }
4760 };
4761
4762 static struct pci_driver snr_uncore_pci_driver = {
4763         .name           = "snr_uncore",
4764         .id_table       = snr_uncore_pci_ids,
4765 };
4766
4767 static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
4768         { /* PCIe3 RP */
4769                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
4770                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
4771         },
4772         { /* end: all zeroes */ }
4773 };
4774
4775 static struct pci_driver snr_uncore_pci_sub_driver = {
4776         .name           = "snr_uncore_sub",
4777         .id_table       = snr_uncore_pci_sub_ids,
4778 };
4779
4780 int snr_uncore_pci_init(void)
4781 {
4782         /* SNR UBOX DID */
4783         int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4784                                          SKX_GIDNIDMAP, true);
4785
4786         if (ret)
4787                 return ret;
4788
4789         uncore_pci_uncores = snr_pci_uncores;
4790         uncore_pci_driver = &snr_uncore_pci_driver;
4791         uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
4792         return 0;
4793 }
4794
4795 #define SNR_MC_DEVICE_ID        0x3451
4796
4797 static struct pci_dev *snr_uncore_get_mc_dev(unsigned int device, int id)
4798 {
4799         struct pci_dev *mc_dev = NULL;
4800         int pkg;
4801
4802         while (1) {
4803                 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, mc_dev);
4804                 if (!mc_dev)
4805                         break;
4806                 pkg = uncore_pcibus_to_dieid(mc_dev->bus);
4807                 if (pkg == id)
4808                         break;
4809         }
4810         return mc_dev;
4811 }
4812
4813 static int snr_uncore_mmio_map(struct intel_uncore_box *box,
4814                                unsigned int box_ctl, int mem_offset,
4815                                unsigned int device)
4816 {
4817         struct pci_dev *pdev = snr_uncore_get_mc_dev(device, box->dieid);
4818         struct intel_uncore_type *type = box->pmu->type;
4819         resource_size_t addr;
4820         u32 pci_dword;
4821
4822         if (!pdev)
4823                 return -ENODEV;
4824
4825         pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4826         addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4827
4828         pci_read_config_dword(pdev, mem_offset, &pci_dword);
4829         addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4830
4831         addr += box_ctl;
4832
4833         box->io_addr = ioremap(addr, type->mmio_map_size);
4834         if (!box->io_addr) {
4835                 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
4836                 return -EINVAL;
4837         }
4838
4839         return 0;
4840 }
4841
4842 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
4843                                        unsigned int box_ctl, int mem_offset,
4844                                        unsigned int device)
4845 {
4846         if (!snr_uncore_mmio_map(box, box_ctl, mem_offset, device))
4847                 writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4848 }
4849
4850 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4851 {
4852         __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
4853                                    SNR_IMC_MMIO_MEM0_OFFSET,
4854                                    SNR_MC_DEVICE_ID);
4855 }
4856
4857 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4858 {
4859         u32 config;
4860
4861         if (!box->io_addr)
4862                 return;
4863
4864         config = readl(box->io_addr);
4865         config |= SNBEP_PMON_BOX_CTL_FRZ;
4866         writel(config, box->io_addr);
4867 }
4868
4869 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4870 {
4871         u32 config;
4872
4873         if (!box->io_addr)
4874                 return;
4875
4876         config = readl(box->io_addr);
4877         config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4878         writel(config, box->io_addr);
4879 }
4880
4881 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4882                                            struct perf_event *event)
4883 {
4884         struct hw_perf_event *hwc = &event->hw;
4885
4886         if (!box->io_addr)
4887                 return;
4888
4889         if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4890                 return;
4891
4892         writel(hwc->config | SNBEP_PMON_CTL_EN,
4893                box->io_addr + hwc->config_base);
4894 }
4895
4896 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4897                                             struct perf_event *event)
4898 {
4899         struct hw_perf_event *hwc = &event->hw;
4900
4901         if (!box->io_addr)
4902                 return;
4903
4904         if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4905                 return;
4906
4907         writel(hwc->config, box->io_addr + hwc->config_base);
4908 }
4909
4910 static struct intel_uncore_ops snr_uncore_mmio_ops = {
4911         .init_box       = snr_uncore_mmio_init_box,
4912         .exit_box       = uncore_mmio_exit_box,
4913         .disable_box    = snr_uncore_mmio_disable_box,
4914         .enable_box     = snr_uncore_mmio_enable_box,
4915         .disable_event  = snr_uncore_mmio_disable_event,
4916         .enable_event   = snr_uncore_mmio_enable_event,
4917         .read_counter   = uncore_mmio_read_counter,
4918 };
4919
4920 static struct uncore_event_desc snr_uncore_imc_events[] = {
4921         INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
4922         INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x0f"),
4923         INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4924         INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4925         INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4926         INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4927         INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4928         { /* end: all zeroes */ },
4929 };
4930
4931 static struct intel_uncore_type snr_uncore_imc = {
4932         .name           = "imc",
4933         .num_counters   = 4,
4934         .num_boxes      = 2,
4935         .perf_ctr_bits  = 48,
4936         .fixed_ctr_bits = 48,
4937         .fixed_ctr      = SNR_IMC_MMIO_PMON_FIXED_CTR,
4938         .fixed_ctl      = SNR_IMC_MMIO_PMON_FIXED_CTL,
4939         .event_descs    = snr_uncore_imc_events,
4940         .perf_ctr       = SNR_IMC_MMIO_PMON_CTR0,
4941         .event_ctl      = SNR_IMC_MMIO_PMON_CTL0,
4942         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4943         .box_ctl        = SNR_IMC_MMIO_PMON_BOX_CTL,
4944         .mmio_offset    = SNR_IMC_MMIO_OFFSET,
4945         .mmio_map_size  = SNR_IMC_MMIO_SIZE,
4946         .ops            = &snr_uncore_mmio_ops,
4947         .format_group   = &skx_uncore_format_group,
4948 };
4949
4950 enum perf_uncore_snr_imc_freerunning_type_id {
4951         SNR_IMC_DCLK,
4952         SNR_IMC_DDR,
4953
4954         SNR_IMC_FREERUNNING_TYPE_MAX,
4955 };
4956
4957 static struct freerunning_counters snr_imc_freerunning[] = {
4958         [SNR_IMC_DCLK]  = { 0x22b0, 0x0, 0, 1, 48 },
4959         [SNR_IMC_DDR]   = { 0x2290, 0x8, 0, 2, 48 },
4960 };
4961
4962 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4963         INTEL_UNCORE_EVENT_DESC(dclk,           "event=0xff,umask=0x10"),
4964
4965         INTEL_UNCORE_EVENT_DESC(read,           "event=0xff,umask=0x20"),
4966         INTEL_UNCORE_EVENT_DESC(read.scale,     "6.103515625e-5"),
4967         INTEL_UNCORE_EVENT_DESC(read.unit,      "MiB"),
4968         INTEL_UNCORE_EVENT_DESC(write,          "event=0xff,umask=0x21"),
4969         INTEL_UNCORE_EVENT_DESC(write.scale,    "6.103515625e-5"),
4970         INTEL_UNCORE_EVENT_DESC(write.unit,     "MiB"),
4971         { /* end: all zeroes */ },
4972 };
4973
4974 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
4975         .init_box       = snr_uncore_mmio_init_box,
4976         .exit_box       = uncore_mmio_exit_box,
4977         .read_counter   = uncore_mmio_read_counter,
4978         .hw_config      = uncore_freerunning_hw_config,
4979 };
4980
4981 static struct intel_uncore_type snr_uncore_imc_free_running = {
4982         .name                   = "imc_free_running",
4983         .num_counters           = 3,
4984         .num_boxes              = 1,
4985         .num_freerunning_types  = SNR_IMC_FREERUNNING_TYPE_MAX,
4986         .mmio_map_size          = SNR_IMC_MMIO_SIZE,
4987         .freerunning            = snr_imc_freerunning,
4988         .ops                    = &snr_uncore_imc_freerunning_ops,
4989         .event_descs            = snr_uncore_imc_freerunning_events,
4990         .format_group           = &skx_uncore_iio_freerunning_format_group,
4991 };
4992
4993 static struct intel_uncore_type *snr_mmio_uncores[] = {
4994         &snr_uncore_imc,
4995         &snr_uncore_imc_free_running,
4996         NULL,
4997 };
4998
4999 void snr_uncore_mmio_init(void)
5000 {
5001         uncore_mmio_uncores = snr_mmio_uncores;
5002 }
5003
5004 /* end of SNR uncore support */
5005
5006 /* ICX uncore support */
5007
5008 static unsigned icx_cha_msr_offsets[] = {
5009         0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
5010         0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
5011         0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
5012         0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0,   0xe,
5013         0x1c,  0x2a,  0x38,  0x46,
5014 };
5015
5016 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5017 {
5018         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5019         bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
5020
5021         if (tie_en) {
5022                 reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
5023                             icx_cha_msr_offsets[box->pmu->pmu_idx];
5024                 reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
5025                 reg1->idx = 0;
5026         }
5027
5028         return 0;
5029 }
5030
5031 static struct intel_uncore_ops icx_uncore_chabox_ops = {
5032         .init_box               = ivbep_uncore_msr_init_box,
5033         .disable_box            = snbep_uncore_msr_disable_box,
5034         .enable_box             = snbep_uncore_msr_enable_box,
5035         .disable_event          = snbep_uncore_msr_disable_event,
5036         .enable_event           = snr_cha_enable_event,
5037         .read_counter           = uncore_msr_read_counter,
5038         .hw_config              = icx_cha_hw_config,
5039 };
5040
5041 static struct intel_uncore_type icx_uncore_chabox = {
5042         .name                   = "cha",
5043         .num_counters           = 4,
5044         .perf_ctr_bits          = 48,
5045         .event_ctl              = ICX_C34_MSR_PMON_CTL0,
5046         .perf_ctr               = ICX_C34_MSR_PMON_CTR0,
5047         .box_ctl                = ICX_C34_MSR_PMON_BOX_CTL,
5048         .msr_offsets            = icx_cha_msr_offsets,
5049         .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
5050         .event_mask_ext         = SNR_CHA_RAW_EVENT_MASK_EXT,
5051         .constraints            = skx_uncore_chabox_constraints,
5052         .ops                    = &icx_uncore_chabox_ops,
5053         .format_group           = &snr_uncore_chabox_format_group,
5054 };
5055
5056 static unsigned icx_msr_offsets[] = {
5057         0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5058 };
5059
5060 static struct event_constraint icx_uncore_iio_constraints[] = {
5061         UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
5062         UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
5063         UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
5064         UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
5065         UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
5066         EVENT_CONSTRAINT_END
5067 };
5068
5069 static umode_t
5070 icx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
5071 {
5072         /* Root bus 0x00 is valid only for pmu_idx = 5. */
5073         return pmu_iio_mapping_visible(kobj, attr, die, 5);
5074 }
5075
5076 static struct attribute_group icx_iio_mapping_group = {
5077         .is_visible     = icx_iio_mapping_visible,
5078 };
5079
5080 static const struct attribute_group *icx_iio_attr_update[] = {
5081         &icx_iio_mapping_group,
5082         NULL,
5083 };
5084
5085 /*
5086  * ICX has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
5087  */
5088 enum {
5089         ICX_PCIE1_PMON_ID,
5090         ICX_PCIE2_PMON_ID,
5091         ICX_PCIE3_PMON_ID,
5092         ICX_PCIE4_PMON_ID,
5093         ICX_PCIE5_PMON_ID,
5094         ICX_CBDMA_DMI_PMON_ID
5095 };
5096
5097 static u8 icx_sad_pmon_mapping[] = {
5098         ICX_CBDMA_DMI_PMON_ID,
5099         ICX_PCIE1_PMON_ID,
5100         ICX_PCIE2_PMON_ID,
5101         ICX_PCIE3_PMON_ID,
5102         ICX_PCIE4_PMON_ID,
5103         ICX_PCIE5_PMON_ID,
5104 };
5105
5106 static int icx_iio_get_topology(struct intel_uncore_type *type)
5107 {
5108         return sad_cfg_iio_topology(type, icx_sad_pmon_mapping);
5109 }
5110
5111 static int icx_iio_set_mapping(struct intel_uncore_type *type)
5112 {
5113         return pmu_iio_set_mapping(type, &icx_iio_mapping_group);
5114 }
5115
5116 static struct intel_uncore_type icx_uncore_iio = {
5117         .name                   = "iio",
5118         .num_counters           = 4,
5119         .num_boxes              = 6,
5120         .perf_ctr_bits          = 48,
5121         .event_ctl              = ICX_IIO_MSR_PMON_CTL0,
5122         .perf_ctr               = ICX_IIO_MSR_PMON_CTR0,
5123         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
5124         .event_mask_ext         = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5125         .box_ctl                = ICX_IIO_MSR_PMON_BOX_CTL,
5126         .msr_offsets            = icx_msr_offsets,
5127         .constraints            = icx_uncore_iio_constraints,
5128         .ops                    = &skx_uncore_iio_ops,
5129         .format_group           = &snr_uncore_iio_format_group,
5130         .attr_update            = icx_iio_attr_update,
5131         .get_topology           = icx_iio_get_topology,
5132         .set_mapping            = icx_iio_set_mapping,
5133         .cleanup_mapping        = skx_iio_cleanup_mapping,
5134 };
5135
5136 static struct intel_uncore_type icx_uncore_irp = {
5137         .name                   = "irp",
5138         .num_counters           = 2,
5139         .num_boxes              = 6,
5140         .perf_ctr_bits          = 48,
5141         .event_ctl              = ICX_IRP0_MSR_PMON_CTL0,
5142         .perf_ctr               = ICX_IRP0_MSR_PMON_CTR0,
5143         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
5144         .box_ctl                = ICX_IRP0_MSR_PMON_BOX_CTL,
5145         .msr_offsets            = icx_msr_offsets,
5146         .ops                    = &ivbep_uncore_msr_ops,
5147         .format_group           = &ivbep_uncore_format_group,
5148 };
5149
5150 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
5151         UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
5152         UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
5153         UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
5154         EVENT_CONSTRAINT_END
5155 };
5156
5157 static struct intel_uncore_type icx_uncore_m2pcie = {
5158         .name           = "m2pcie",
5159         .num_counters   = 4,
5160         .num_boxes      = 6,
5161         .perf_ctr_bits  = 48,
5162         .event_ctl      = ICX_M2PCIE_MSR_PMON_CTL0,
5163         .perf_ctr       = ICX_M2PCIE_MSR_PMON_CTR0,
5164         .box_ctl        = ICX_M2PCIE_MSR_PMON_BOX_CTL,
5165         .msr_offsets    = icx_msr_offsets,
5166         .constraints    = icx_uncore_m2pcie_constraints,
5167         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
5168         .ops            = &ivbep_uncore_msr_ops,
5169         .format_group   = &ivbep_uncore_format_group,
5170 };
5171
5172 enum perf_uncore_icx_iio_freerunning_type_id {
5173         ICX_IIO_MSR_IOCLK,
5174         ICX_IIO_MSR_BW_IN,
5175
5176         ICX_IIO_FREERUNNING_TYPE_MAX,
5177 };
5178
5179 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
5180         0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5181 };
5182
5183 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
5184         0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
5185 };
5186
5187 static struct freerunning_counters icx_iio_freerunning[] = {
5188         [ICX_IIO_MSR_IOCLK]     = { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5189         [ICX_IIO_MSR_BW_IN]     = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5190 };
5191
5192 static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
5193         /* Free-Running IIO CLOCKS Counter */
5194         INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
5195         /* Free-Running IIO BANDWIDTH IN Counters */
5196         INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
5197         INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
5198         INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
5199         INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
5200         INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
5201         INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
5202         INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
5203         INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
5204         INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
5205         INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
5206         INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
5207         INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
5208         INTEL_UNCORE_EVENT_DESC(bw_in_port4,            "event=0xff,umask=0x24"),
5209         INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,      "3.814697266e-6"),
5210         INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,       "MiB"),
5211         INTEL_UNCORE_EVENT_DESC(bw_in_port5,            "event=0xff,umask=0x25"),
5212         INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,      "3.814697266e-6"),
5213         INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,       "MiB"),
5214         INTEL_UNCORE_EVENT_DESC(bw_in_port6,            "event=0xff,umask=0x26"),
5215         INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,      "3.814697266e-6"),
5216         INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,       "MiB"),
5217         INTEL_UNCORE_EVENT_DESC(bw_in_port7,            "event=0xff,umask=0x27"),
5218         INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,      "3.814697266e-6"),
5219         INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,       "MiB"),
5220         { /* end: all zeroes */ },
5221 };
5222
5223 static struct intel_uncore_type icx_uncore_iio_free_running = {
5224         .name                   = "iio_free_running",
5225         .num_counters           = 9,
5226         .num_boxes              = 6,
5227         .num_freerunning_types  = ICX_IIO_FREERUNNING_TYPE_MAX,
5228         .freerunning            = icx_iio_freerunning,
5229         .ops                    = &skx_uncore_iio_freerunning_ops,
5230         .event_descs            = icx_uncore_iio_freerunning_events,
5231         .format_group           = &skx_uncore_iio_freerunning_format_group,
5232 };
5233
5234 static struct intel_uncore_type *icx_msr_uncores[] = {
5235         &skx_uncore_ubox,
5236         &icx_uncore_chabox,
5237         &icx_uncore_iio,
5238         &icx_uncore_irp,
5239         &icx_uncore_m2pcie,
5240         &skx_uncore_pcu,
5241         &icx_uncore_iio_free_running,
5242         NULL,
5243 };
5244
5245 /*
5246  * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5247  * registers which located at Device 30, Function 3
5248  */
5249 #define ICX_CAPID6              0x9c
5250 #define ICX_CAPID7              0xa0
5251
5252 static u64 icx_count_chabox(void)
5253 {
5254         struct pci_dev *dev = NULL;
5255         u64 caps = 0;
5256
5257         dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5258         if (!dev)
5259                 goto out;
5260
5261         pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5262         pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5263 out:
5264         pci_dev_put(dev);
5265         return hweight64(caps);
5266 }
5267
5268 void icx_uncore_cpu_init(void)
5269 {
5270         u64 num_boxes = icx_count_chabox();
5271
5272         if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5273                 return;
5274         icx_uncore_chabox.num_boxes = num_boxes;
5275         uncore_msr_uncores = icx_msr_uncores;
5276 }
5277
5278 static struct intel_uncore_type icx_uncore_m2m = {
5279         .name           = "m2m",
5280         .num_counters   = 4,
5281         .num_boxes      = 4,
5282         .perf_ctr_bits  = 48,
5283         .perf_ctr       = SNR_M2M_PCI_PMON_CTR0,
5284         .event_ctl      = SNR_M2M_PCI_PMON_CTL0,
5285         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
5286         .box_ctl        = SNR_M2M_PCI_PMON_BOX_CTL,
5287         .ops            = &snr_m2m_uncore_pci_ops,
5288         .format_group   = &skx_uncore_format_group,
5289 };
5290
5291 static struct attribute *icx_upi_uncore_formats_attr[] = {
5292         &format_attr_event.attr,
5293         &format_attr_umask_ext4.attr,
5294         &format_attr_edge.attr,
5295         &format_attr_inv.attr,
5296         &format_attr_thresh8.attr,
5297         NULL,
5298 };
5299
5300 static const struct attribute_group icx_upi_uncore_format_group = {
5301         .name = "format",
5302         .attrs = icx_upi_uncore_formats_attr,
5303 };
5304
5305 static struct intel_uncore_type icx_uncore_upi = {
5306         .name           = "upi",
5307         .num_counters   = 4,
5308         .num_boxes      = 3,
5309         .perf_ctr_bits  = 48,
5310         .perf_ctr       = ICX_UPI_PCI_PMON_CTR0,
5311         .event_ctl      = ICX_UPI_PCI_PMON_CTL0,
5312         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
5313         .event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5314         .box_ctl        = ICX_UPI_PCI_PMON_BOX_CTL,
5315         .ops            = &skx_upi_uncore_pci_ops,
5316         .format_group   = &icx_upi_uncore_format_group,
5317 };
5318
5319 static struct event_constraint icx_uncore_m3upi_constraints[] = {
5320         UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5321         UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5322         UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5323         UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5324         UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5325         UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5326         UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5327         UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5328         EVENT_CONSTRAINT_END
5329 };
5330
5331 static struct intel_uncore_type icx_uncore_m3upi = {
5332         .name           = "m3upi",
5333         .num_counters   = 4,
5334         .num_boxes      = 3,
5335         .perf_ctr_bits  = 48,
5336         .perf_ctr       = ICX_M3UPI_PCI_PMON_CTR0,
5337         .event_ctl      = ICX_M3UPI_PCI_PMON_CTL0,
5338         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
5339         .box_ctl        = ICX_M3UPI_PCI_PMON_BOX_CTL,
5340         .constraints    = icx_uncore_m3upi_constraints,
5341         .ops            = &ivbep_uncore_pci_ops,
5342         .format_group   = &skx_uncore_format_group,
5343 };
5344
5345 enum {
5346         ICX_PCI_UNCORE_M2M,
5347         ICX_PCI_UNCORE_UPI,
5348         ICX_PCI_UNCORE_M3UPI,
5349 };
5350
5351 static struct intel_uncore_type *icx_pci_uncores[] = {
5352         [ICX_PCI_UNCORE_M2M]            = &icx_uncore_m2m,
5353         [ICX_PCI_UNCORE_UPI]            = &icx_uncore_upi,
5354         [ICX_PCI_UNCORE_M3UPI]          = &icx_uncore_m3upi,
5355         NULL,
5356 };
5357
5358 static const struct pci_device_id icx_uncore_pci_ids[] = {
5359         { /* M2M 0 */
5360                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5361                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5362         },
5363         { /* M2M 1 */
5364                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5365                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5366         },
5367         { /* M2M 2 */
5368                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5369                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5370         },
5371         { /* M2M 3 */
5372                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5373                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5374         },
5375         { /* UPI Link 0 */
5376                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5377                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5378         },
5379         { /* UPI Link 1 */
5380                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5381                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5382         },
5383         { /* UPI Link 2 */
5384                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5385                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5386         },
5387         { /* M3UPI Link 0 */
5388                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5389                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5390         },
5391         { /* M3UPI Link 1 */
5392                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5393                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5394         },
5395         { /* M3UPI Link 2 */
5396                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5397                 .driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5398         },
5399         { /* end: all zeroes */ }
5400 };
5401
5402 static struct pci_driver icx_uncore_pci_driver = {
5403         .name           = "icx_uncore",
5404         .id_table       = icx_uncore_pci_ids,
5405 };
5406
5407 int icx_uncore_pci_init(void)
5408 {
5409         /* ICX UBOX DID */
5410         int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5411                                          SKX_GIDNIDMAP, true);
5412
5413         if (ret)
5414                 return ret;
5415
5416         uncore_pci_uncores = icx_pci_uncores;
5417         uncore_pci_driver = &icx_uncore_pci_driver;
5418         return 0;
5419 }
5420
5421 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5422 {
5423         unsigned int box_ctl = box->pmu->type->box_ctl +
5424                                box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5425         int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5426                          SNR_IMC_MMIO_MEM0_OFFSET;
5427
5428         __snr_uncore_mmio_init_box(box, box_ctl, mem_offset,
5429                                    SNR_MC_DEVICE_ID);
5430 }
5431
5432 static struct intel_uncore_ops icx_uncore_mmio_ops = {
5433         .init_box       = icx_uncore_imc_init_box,
5434         .exit_box       = uncore_mmio_exit_box,
5435         .disable_box    = snr_uncore_mmio_disable_box,
5436         .enable_box     = snr_uncore_mmio_enable_box,
5437         .disable_event  = snr_uncore_mmio_disable_event,
5438         .enable_event   = snr_uncore_mmio_enable_event,
5439         .read_counter   = uncore_mmio_read_counter,
5440 };
5441
5442 static struct intel_uncore_type icx_uncore_imc = {
5443         .name           = "imc",
5444         .num_counters   = 4,
5445         .num_boxes      = 8,
5446         .perf_ctr_bits  = 48,
5447         .fixed_ctr_bits = 48,
5448         .fixed_ctr      = SNR_IMC_MMIO_PMON_FIXED_CTR,
5449         .fixed_ctl      = SNR_IMC_MMIO_PMON_FIXED_CTL,
5450         .event_descs    = hswep_uncore_imc_events,
5451         .perf_ctr       = SNR_IMC_MMIO_PMON_CTR0,
5452         .event_ctl      = SNR_IMC_MMIO_PMON_CTL0,
5453         .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
5454         .box_ctl        = SNR_IMC_MMIO_PMON_BOX_CTL,
5455         .mmio_offset    = SNR_IMC_MMIO_OFFSET,
5456         .mmio_map_size  = SNR_IMC_MMIO_SIZE,
5457         .ops            = &icx_uncore_mmio_ops,
5458         .format_group   = &skx_uncore_format_group,
5459 };
5460
5461 enum perf_uncore_icx_imc_freerunning_type_id {
5462         ICX_IMC_DCLK,
5463         ICX_IMC_DDR,
5464         ICX_IMC_DDRT,
5465
5466         ICX_IMC_FREERUNNING_TYPE_MAX,
5467 };
5468
5469 static struct freerunning_counters icx_imc_freerunning[] = {
5470         [ICX_IMC_DCLK]  = { 0x22b0, 0x0, 0, 1, 48 },
5471         [ICX_IMC_DDR]   = { 0x2290, 0x8, 0, 2, 48 },
5472         [ICX_IMC_DDRT]  = { 0x22a0, 0x8, 0, 2, 48 },
5473 };
5474
5475 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5476         INTEL_UNCORE_EVENT_DESC(dclk,                   "event=0xff,umask=0x10"),
5477
5478         INTEL_UNCORE_EVENT_DESC(read,                   "event=0xff,umask=0x20"),
5479         INTEL_UNCORE_EVENT_DESC(read.scale,             "6.103515625e-5"),
5480         INTEL_UNCORE_EVENT_DESC(read.unit,              "MiB"),
5481         INTEL_UNCORE_EVENT_DESC(write,                  "event=0xff,umask=0x21"),
5482         INTEL_UNCORE_EVENT_DESC(write.scale,            "6.103515625e-5"),
5483         INTEL_UNCORE_EVENT_DESC(write.unit,             "MiB"),
5484
5485         INTEL_UNCORE_EVENT_DESC(ddrt_read,              "event=0xff,umask=0x30"),
5486         INTEL_UNCORE_EVENT_DESC(ddrt_read.scale,        "6.103515625e-5"),
5487         INTEL_UNCORE_EVENT_DESC(ddrt_read.unit,         "MiB"),
5488         INTEL_UNCORE_EVENT_DESC(ddrt_write,             "event=0xff,umask=0x31"),
5489         INTEL_UNCORE_EVENT_DESC(ddrt_write.scale,       "6.103515625e-5"),
5490         INTEL_UNCORE_EVENT_DESC(ddrt_write.unit,        "MiB"),
5491         { /* end: all zeroes */ },
5492 };
5493
5494 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5495 {
5496         int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5497                          SNR_IMC_MMIO_MEM0_OFFSET;
5498
5499         snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
5500                             mem_offset, SNR_MC_DEVICE_ID);
5501 }
5502
5503 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5504         .init_box       = icx_uncore_imc_freerunning_init_box,
5505         .exit_box       = uncore_mmio_exit_box,
5506         .read_counter   = uncore_mmio_read_counter,
5507         .hw_config      = uncore_freerunning_hw_config,
5508 };
5509
5510 static struct intel_uncore_type icx_uncore_imc_free_running = {
5511         .name                   = "imc_free_running",
5512         .num_counters           = 5,
5513         .num_boxes              = 4,
5514         .num_freerunning_types  = ICX_IMC_FREERUNNING_TYPE_MAX,
5515         .mmio_map_size          = SNR_IMC_MMIO_SIZE,
5516         .freerunning            = icx_imc_freerunning,
5517         .ops                    = &icx_uncore_imc_freerunning_ops,
5518         .event_descs            = icx_uncore_imc_freerunning_events,
5519         .format_group           = &skx_uncore_iio_freerunning_format_group,
5520 };
5521
5522 static struct intel_uncore_type *icx_mmio_uncores[] = {
5523         &icx_uncore_imc,
5524         &icx_uncore_imc_free_running,
5525         NULL,
5526 };
5527
5528 void icx_uncore_mmio_init(void)
5529 {
5530         uncore_mmio_uncores = icx_mmio_uncores;
5531 }
5532
5533 /* end of ICX uncore support */
5534
5535 /* SPR uncore support */
5536
5537 static void spr_uncore_msr_enable_event(struct intel_uncore_box *box,
5538                                         struct perf_event *event)
5539 {
5540         struct hw_perf_event *hwc = &event->hw;
5541         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5542
5543         if (reg1->idx != EXTRA_REG_NONE)
5544                 wrmsrl(reg1->reg, reg1->config);
5545
5546         wrmsrl(hwc->config_base, hwc->config);
5547 }
5548
5549 static void spr_uncore_msr_disable_event(struct intel_uncore_box *box,
5550                                          struct perf_event *event)
5551 {
5552         struct hw_perf_event *hwc = &event->hw;
5553         struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5554
5555         if (reg1->idx != EXTRA_REG_NONE)
5556                 wrmsrl(reg1->reg, 0);
5557
5558         wrmsrl(hwc->config_base, 0);
5559 }
5560
5561 static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5562 {
5563         struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5564         bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN);
5565         struct intel_uncore_type *type = box->pmu->type;
5566
5567         if (tie_en) {
5568                 reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 +
5569                             HSWEP_CBO_MSR_OFFSET * type->box_ids[box->pmu->pmu_idx];
5570                 reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID;
5571                 reg1->idx = 0;
5572         }
5573
5574         return 0;
5575 }
5576
5577 static struct intel_uncore_ops spr_uncore_chabox_ops = {
5578         .init_box               = intel_generic_uncore_msr_init_box,
5579         .disable_box            = intel_generic_uncore_msr_disable_box,
5580         .enable_box             = intel_generic_uncore_msr_enable_box,
5581         .disable_event          = spr_uncore_msr_disable_event,
5582         .enable_event           = spr_uncore_msr_enable_event,
5583         .read_counter           = uncore_msr_read_counter,
5584         .hw_config              = spr_cha_hw_config,
5585         .get_constraint         = uncore_get_constraint,
5586         .put_constraint         = uncore_put_constraint,
5587 };
5588
5589 static struct attribute *spr_uncore_cha_formats_attr[] = {
5590         &format_attr_event.attr,
5591         &format_attr_umask_ext4.attr,
5592         &format_attr_tid_en2.attr,
5593         &format_attr_edge.attr,
5594         &format_attr_inv.attr,
5595         &format_attr_thresh8.attr,
5596         &format_attr_filter_tid5.attr,
5597         NULL,
5598 };
5599 static const struct attribute_group spr_uncore_chabox_format_group = {
5600         .name = "format",
5601         .attrs = spr_uncore_cha_formats_attr,
5602 };
5603
5604 static ssize_t alias_show(struct device *dev,
5605                           struct device_attribute *attr,
5606                           char *buf)
5607 {
5608         struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
5609         char pmu_name[UNCORE_PMU_NAME_LEN];
5610
5611         uncore_get_alias_name(pmu_name, pmu);
5612         return sysfs_emit(buf, "%s\n", pmu_name);
5613 }
5614
5615 static DEVICE_ATTR_RO(alias);
5616
5617 static struct attribute *uncore_alias_attrs[] = {
5618         &dev_attr_alias.attr,
5619         NULL
5620 };
5621
5622 ATTRIBUTE_GROUPS(uncore_alias);
5623
5624 static struct intel_uncore_type spr_uncore_chabox = {
5625         .name                   = "cha",
5626         .event_mask             = SPR_CHA_PMON_EVENT_MASK,
5627         .event_mask_ext         = SPR_RAW_EVENT_MASK_EXT,
5628         .num_shared_regs        = 1,
5629         .ops                    = &spr_uncore_chabox_ops,
5630         .format_group           = &spr_uncore_chabox_format_group,
5631         .attr_update            = uncore_alias_groups,
5632 };
5633
5634 static struct intel_uncore_type spr_uncore_iio = {
5635         .name                   = "iio",
5636         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
5637         .event_mask_ext         = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5638         .format_group           = &snr_uncore_iio_format_group,
5639         .attr_update            = uncore_alias_groups,
5640 };
5641
5642 static struct attribute *spr_uncore_raw_formats_attr[] = {
5643         &format_attr_event.attr,
5644         &format_attr_umask_ext4.attr,
5645         &format_attr_edge.attr,
5646         &format_attr_inv.attr,
5647         &format_attr_thresh8.attr,
5648         NULL,
5649 };
5650
5651 static const struct attribute_group spr_uncore_raw_format_group = {
5652         .name                   = "format",
5653         .attrs                  = spr_uncore_raw_formats_attr,
5654 };
5655
5656 #define SPR_UNCORE_COMMON_FORMAT()                              \
5657         .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,    \
5658         .event_mask_ext         = SPR_RAW_EVENT_MASK_EXT,       \
5659         .format_group           = &spr_uncore_raw_format_group, \
5660         .attr_update            = uncore_alias_groups
5661
5662 static struct intel_uncore_type spr_uncore_irp = {
5663         SPR_UNCORE_COMMON_FORMAT(),
5664         .name                   = "irp",
5665
5666 };
5667
5668 static struct intel_uncore_type spr_uncore_m2pcie = {
5669         SPR_UNCORE_COMMON_FORMAT(),
5670         .name                   = "m2pcie",
5671 };
5672
5673 static struct intel_uncore_type spr_uncore_pcu = {
5674         .name                   = "pcu",
5675         .attr_update            = uncore_alias_groups,
5676 };
5677
5678 static void spr_uncore_mmio_enable_event(struct intel_uncore_box *box,
5679                                          struct perf_event *event)
5680 {
5681         struct hw_perf_event *hwc = &event->hw;
5682
5683         if (!box->io_addr)
5684                 return;
5685
5686         if (uncore_pmc_fixed(hwc->idx))
5687                 writel(SNBEP_PMON_CTL_EN, box->io_addr + hwc->config_base);
5688         else
5689                 writel(hwc->config, box->io_addr + hwc->config_base);
5690 }
5691
5692 static struct intel_uncore_ops spr_uncore_mmio_ops = {
5693         .init_box               = intel_generic_uncore_mmio_init_box,
5694         .exit_box               = uncore_mmio_exit_box,
5695         .disable_box            = intel_generic_uncore_mmio_disable_box,
5696         .enable_box             = intel_generic_uncore_mmio_enable_box,
5697         .disable_event          = intel_generic_uncore_mmio_disable_event,
5698         .enable_event           = spr_uncore_mmio_enable_event,
5699         .read_counter           = uncore_mmio_read_counter,
5700 };
5701
5702 static struct intel_uncore_type spr_uncore_imc = {
5703         SPR_UNCORE_COMMON_FORMAT(),
5704         .name                   = "imc",
5705         .fixed_ctr_bits         = 48,
5706         .fixed_ctr              = SNR_IMC_MMIO_PMON_FIXED_CTR,
5707         .fixed_ctl              = SNR_IMC_MMIO_PMON_FIXED_CTL,
5708         .ops                    = &spr_uncore_mmio_ops,
5709 };
5710
5711 static void spr_uncore_pci_enable_event(struct intel_uncore_box *box,
5712                                         struct perf_event *event)
5713 {
5714         struct pci_dev *pdev = box->pci_dev;
5715         struct hw_perf_event *hwc = &event->hw;
5716
5717         pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
5718         pci_write_config_dword(pdev, hwc->config_base, (u32)hwc->config);
5719 }
5720
5721 static struct intel_uncore_ops spr_uncore_pci_ops = {
5722         .init_box               = intel_generic_uncore_pci_init_box,
5723         .disable_box            = intel_generic_uncore_pci_disable_box,
5724         .enable_box             = intel_generic_uncore_pci_enable_box,
5725         .disable_event          = intel_generic_uncore_pci_disable_event,
5726         .enable_event           = spr_uncore_pci_enable_event,
5727         .read_counter           = intel_generic_uncore_pci_read_counter,
5728 };
5729
5730 #define SPR_UNCORE_PCI_COMMON_FORMAT()                  \
5731         SPR_UNCORE_COMMON_FORMAT(),                     \
5732         .ops                    = &spr_uncore_pci_ops
5733
5734 static struct intel_uncore_type spr_uncore_m2m = {
5735         SPR_UNCORE_PCI_COMMON_FORMAT(),
5736         .name                   = "m2m",
5737 };
5738
5739 static struct intel_uncore_type spr_uncore_upi = {
5740         SPR_UNCORE_PCI_COMMON_FORMAT(),
5741         .name                   = "upi",
5742 };
5743
5744 static struct intel_uncore_type spr_uncore_m3upi = {
5745         SPR_UNCORE_PCI_COMMON_FORMAT(),
5746         .name                   = "m3upi",
5747 };
5748
5749 static struct intel_uncore_type spr_uncore_mdf = {
5750         SPR_UNCORE_COMMON_FORMAT(),
5751         .name                   = "mdf",
5752 };
5753
5754 #define UNCORE_SPR_NUM_UNCORE_TYPES             12
5755 #define UNCORE_SPR_IIO                          1
5756
5757 static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = {
5758         &spr_uncore_chabox,
5759         &spr_uncore_iio,
5760         &spr_uncore_irp,
5761         &spr_uncore_m2pcie,
5762         &spr_uncore_pcu,
5763         NULL,
5764         &spr_uncore_imc,
5765         &spr_uncore_m2m,
5766         &spr_uncore_upi,
5767         &spr_uncore_m3upi,
5768         NULL,
5769         &spr_uncore_mdf,
5770 };
5771
5772 enum perf_uncore_spr_iio_freerunning_type_id {
5773         SPR_IIO_MSR_IOCLK,
5774         SPR_IIO_MSR_BW_IN,
5775         SPR_IIO_MSR_BW_OUT,
5776
5777         SPR_IIO_FREERUNNING_TYPE_MAX,
5778 };
5779
5780 static struct freerunning_counters spr_iio_freerunning[] = {
5781         [SPR_IIO_MSR_IOCLK]     = { 0x340e, 0x1, 0x10, 1, 48 },
5782         [SPR_IIO_MSR_BW_IN]     = { 0x3800, 0x1, 0x10, 8, 48 },
5783         [SPR_IIO_MSR_BW_OUT]    = { 0x3808, 0x1, 0x10, 8, 48 },
5784 };
5785
5786 static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
5787         /* Free-Running IIO CLOCKS Counter */
5788         INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
5789         /* Free-Running IIO BANDWIDTH IN Counters */
5790         INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
5791         INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
5792         INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
5793         INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
5794         INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
5795         INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
5796         INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
5797         INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
5798         INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
5799         INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
5800         INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
5801         INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
5802         INTEL_UNCORE_EVENT_DESC(bw_in_port4,            "event=0xff,umask=0x24"),
5803         INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,      "3.814697266e-6"),
5804         INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,       "MiB"),
5805         INTEL_UNCORE_EVENT_DESC(bw_in_port5,            "event=0xff,umask=0x25"),
5806         INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,      "3.814697266e-6"),
5807         INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,       "MiB"),
5808         INTEL_UNCORE_EVENT_DESC(bw_in_port6,            "event=0xff,umask=0x26"),
5809         INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,      "3.814697266e-6"),
5810         INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,       "MiB"),
5811         INTEL_UNCORE_EVENT_DESC(bw_in_port7,            "event=0xff,umask=0x27"),
5812         INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,      "3.814697266e-6"),
5813         INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,       "MiB"),
5814         /* Free-Running IIO BANDWIDTH OUT Counters */
5815         INTEL_UNCORE_EVENT_DESC(bw_out_port0,           "event=0xff,umask=0x30"),
5816         INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,     "3.814697266e-6"),
5817         INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,      "MiB"),
5818         INTEL_UNCORE_EVENT_DESC(bw_out_port1,           "event=0xff,umask=0x31"),
5819         INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,     "3.814697266e-6"),
5820         INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,      "MiB"),
5821         INTEL_UNCORE_EVENT_DESC(bw_out_port2,           "event=0xff,umask=0x32"),
5822         INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,     "3.814697266e-6"),
5823         INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,      "MiB"),
5824         INTEL_UNCORE_EVENT_DESC(bw_out_port3,           "event=0xff,umask=0x33"),
5825         INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,     "3.814697266e-6"),
5826         INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,      "MiB"),
5827         INTEL_UNCORE_EVENT_DESC(bw_out_port4,           "event=0xff,umask=0x34"),
5828         INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale,     "3.814697266e-6"),
5829         INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit,      "MiB"),
5830         INTEL_UNCORE_EVENT_DESC(bw_out_port5,           "event=0xff,umask=0x35"),
5831         INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale,     "3.814697266e-6"),
5832         INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit,      "MiB"),
5833         INTEL_UNCORE_EVENT_DESC(bw_out_port6,           "event=0xff,umask=0x36"),
5834         INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale,     "3.814697266e-6"),
5835         INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit,      "MiB"),
5836         INTEL_UNCORE_EVENT_DESC(bw_out_port7,           "event=0xff,umask=0x37"),
5837         INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale,     "3.814697266e-6"),
5838         INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit,      "MiB"),
5839         { /* end: all zeroes */ },
5840 };
5841
5842 static struct intel_uncore_type spr_uncore_iio_free_running = {
5843         .name                   = "iio_free_running",
5844         .num_counters           = 17,
5845         .num_freerunning_types  = SPR_IIO_FREERUNNING_TYPE_MAX,
5846         .freerunning            = spr_iio_freerunning,
5847         .ops                    = &skx_uncore_iio_freerunning_ops,
5848         .event_descs            = spr_uncore_iio_freerunning_events,
5849         .format_group           = &skx_uncore_iio_freerunning_format_group,
5850 };
5851
5852 #define UNCORE_SPR_MSR_EXTRA_UNCORES            1
5853
5854 static struct intel_uncore_type *spr_msr_uncores[UNCORE_SPR_MSR_EXTRA_UNCORES] = {
5855         &spr_uncore_iio_free_running,
5856 };
5857
5858 static void uncore_type_customized_copy(struct intel_uncore_type *to_type,
5859                                         struct intel_uncore_type *from_type)
5860 {
5861         if (!to_type || !from_type)
5862                 return;
5863
5864         if (from_type->name)
5865                 to_type->name = from_type->name;
5866         if (from_type->fixed_ctr_bits)
5867                 to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
5868         if (from_type->event_mask)
5869                 to_type->event_mask = from_type->event_mask;
5870         if (from_type->event_mask_ext)
5871                 to_type->event_mask_ext = from_type->event_mask_ext;
5872         if (from_type->fixed_ctr)
5873                 to_type->fixed_ctr = from_type->fixed_ctr;
5874         if (from_type->fixed_ctl)
5875                 to_type->fixed_ctl = from_type->fixed_ctl;
5876         if (from_type->fixed_ctr_bits)
5877                 to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
5878         if (from_type->num_shared_regs)
5879                 to_type->num_shared_regs = from_type->num_shared_regs;
5880         if (from_type->constraints)
5881                 to_type->constraints = from_type->constraints;
5882         if (from_type->ops)
5883                 to_type->ops = from_type->ops;
5884         if (from_type->event_descs)
5885                 to_type->event_descs = from_type->event_descs;
5886         if (from_type->format_group)
5887                 to_type->format_group = from_type->format_group;
5888         if (from_type->attr_update)
5889                 to_type->attr_update = from_type->attr_update;
5890 }
5891
5892 static struct intel_uncore_type **
5893 uncore_get_uncores(enum uncore_access_type type_id, int num_extra,
5894                     struct intel_uncore_type **extra)
5895 {
5896         struct intel_uncore_type **types, **start_types;
5897         int i;
5898
5899         start_types = types = intel_uncore_generic_init_uncores(type_id, num_extra);
5900
5901         /* Only copy the customized features */
5902         for (; *types; types++) {
5903                 if ((*types)->type_id >= UNCORE_SPR_NUM_UNCORE_TYPES)
5904                         continue;
5905                 uncore_type_customized_copy(*types, spr_uncores[(*types)->type_id]);
5906         }
5907
5908         for (i = 0; i < num_extra; i++, types++)
5909                 *types = extra[i];
5910
5911         return start_types;
5912 }
5913
5914 static struct intel_uncore_type *
5915 uncore_find_type_by_id(struct intel_uncore_type **types, int type_id)
5916 {
5917         for (; *types; types++) {
5918                 if (type_id == (*types)->type_id)
5919                         return *types;
5920         }
5921
5922         return NULL;
5923 }
5924
5925 static int uncore_type_max_boxes(struct intel_uncore_type **types,
5926                                  int type_id)
5927 {
5928         struct intel_uncore_type *type;
5929         int i, max = 0;
5930
5931         type = uncore_find_type_by_id(types, type_id);
5932         if (!type)
5933                 return 0;
5934
5935         for (i = 0; i < type->num_boxes; i++) {
5936                 if (type->box_ids[i] > max)
5937                         max = type->box_ids[i];
5938         }
5939
5940         return max + 1;
5941 }
5942
5943 void spr_uncore_cpu_init(void)
5944 {
5945         uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
5946                                                 UNCORE_SPR_MSR_EXTRA_UNCORES,
5947                                                 spr_msr_uncores);
5948
5949         spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
5950 }
5951
5952 int spr_uncore_pci_init(void)
5953 {
5954         uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL);
5955         return 0;
5956 }
5957
5958 void spr_uncore_mmio_init(void)
5959 {
5960         uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL);
5961 }
5962
5963 /* end of SPR uncore support */